1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCCCState.h" 18 #include "PPCCallingConv.h" 19 #include "PPCFrameLowering.h" 20 #include "PPCInstrInfo.h" 21 #include "PPCMachineFunctionInfo.h" 22 #include "PPCPerfectShuffle.h" 23 #include "PPCRegisterInfo.h" 24 #include "PPCSubtarget.h" 25 #include "PPCTargetMachine.h" 26 #include "llvm/ADT/APFloat.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallPtrSet.h" 33 #include "llvm/ADT/SmallSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/ISDOpcodes.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFrameInfo.h" 42 #include "llvm/CodeGen/MachineFunction.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineJumpTableInfo.h" 46 #include "llvm/CodeGen/MachineLoopInfo.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetRegisterInfo.h" 56 #include "llvm/CodeGen/ValueTypes.h" 57 #include "llvm/IR/CallSite.h" 58 #include "llvm/IR/CallingConv.h" 59 #include "llvm/IR/Constant.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/DataLayout.h" 62 #include "llvm/IR/DebugLoc.h" 63 #include "llvm/IR/DerivedTypes.h" 64 #include "llvm/IR/Function.h" 65 #include "llvm/IR/GlobalValue.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/Intrinsics.h" 69 #include "llvm/IR/Module.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/MC/MCExpr.h" 74 #include "llvm/MC/MCRegisterInfo.h" 75 #include "llvm/Support/AtomicOrdering.h" 76 #include "llvm/Support/BranchProbability.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CodeGen.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/Debug.h" 82 #include "llvm/Support/ErrorHandling.h" 83 #include "llvm/Support/Format.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MachineValueType.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include "llvm/Target/TargetMachine.h" 89 #include "llvm/Target/TargetOptions.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 100 #define DEBUG_TYPE "ppc-lowering" 101 102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 104 105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 107 108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 110 111 static cl::opt<bool> DisableSCO("disable-ppc-sco", 112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 113 114 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision", 115 cl::desc("enable quad precision float support on ppc"), cl::Hidden); 116 117 STATISTIC(NumTailCalls, "Number of tail calls"); 118 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 119 120 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 121 122 // FIXME: Remove this once the bug has been fixed! 123 extern cl::opt<bool> ANDIGlueBug; 124 125 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 126 const PPCSubtarget &STI) 127 : TargetLowering(TM), Subtarget(STI) { 128 // Use _setjmp/_longjmp instead of setjmp/longjmp. 129 setUseUnderscoreSetJmp(true); 130 setUseUnderscoreLongJmp(true); 131 132 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 133 // arguments are at least 4/8 bytes aligned. 134 bool isPPC64 = Subtarget.isPPC64(); 135 setMinStackArgumentAlignment(isPPC64 ? 8:4); 136 137 // Set up the register classes. 138 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 139 if (!useSoftFloat()) { 140 if (hasSPE()) { 141 addRegisterClass(MVT::f32, &PPC::SPE4RCRegClass); 142 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 143 } else { 144 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 145 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 146 } 147 } 148 149 // Match BITREVERSE to customized fast code sequence in the td file. 150 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 151 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 152 153 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 154 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 155 156 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 157 for (MVT VT : MVT::integer_valuetypes()) { 158 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 159 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 160 } 161 162 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 163 164 // PowerPC has pre-inc load and store's. 165 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 166 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 167 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 168 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 169 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 170 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 171 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 172 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 173 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 174 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 175 if (!Subtarget.hasSPE()) { 176 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 177 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 178 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 179 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 180 } 181 182 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 183 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 184 for (MVT VT : ScalarIntVTs) { 185 setOperationAction(ISD::ADDC, VT, Legal); 186 setOperationAction(ISD::ADDE, VT, Legal); 187 setOperationAction(ISD::SUBC, VT, Legal); 188 setOperationAction(ISD::SUBE, VT, Legal); 189 } 190 191 if (Subtarget.useCRBits()) { 192 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 193 194 if (isPPC64 || Subtarget.hasFPCVT()) { 195 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 196 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 197 isPPC64 ? MVT::i64 : MVT::i32); 198 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 199 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 200 isPPC64 ? MVT::i64 : MVT::i32); 201 } else { 202 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 203 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 204 } 205 206 // PowerPC does not support direct load/store of condition registers. 207 setOperationAction(ISD::LOAD, MVT::i1, Custom); 208 setOperationAction(ISD::STORE, MVT::i1, Custom); 209 210 // FIXME: Remove this once the ANDI glue bug is fixed: 211 if (ANDIGlueBug) 212 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 213 214 for (MVT VT : MVT::integer_valuetypes()) { 215 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 216 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 217 setTruncStoreAction(VT, MVT::i1, Expand); 218 } 219 220 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 221 } 222 223 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 224 // PPC (the libcall is not available). 225 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 226 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 227 228 // We do not currently implement these libm ops for PowerPC. 229 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 230 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 231 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 232 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 233 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 234 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 235 236 // PowerPC has no SREM/UREM instructions unless we are on P9 237 // On P9 we may use a hardware instruction to compute the remainder. 238 // The instructions are not legalized directly because in the cases where the 239 // result of both the remainder and the division is required it is more 240 // efficient to compute the remainder from the result of the division rather 241 // than use the remainder instruction. 242 if (Subtarget.isISA3_0()) { 243 setOperationAction(ISD::SREM, MVT::i32, Custom); 244 setOperationAction(ISD::UREM, MVT::i32, Custom); 245 setOperationAction(ISD::SREM, MVT::i64, Custom); 246 setOperationAction(ISD::UREM, MVT::i64, Custom); 247 } else { 248 setOperationAction(ISD::SREM, MVT::i32, Expand); 249 setOperationAction(ISD::UREM, MVT::i32, Expand); 250 setOperationAction(ISD::SREM, MVT::i64, Expand); 251 setOperationAction(ISD::UREM, MVT::i64, Expand); 252 } 253 254 if (Subtarget.hasP9Vector()) { 255 setOperationAction(ISD::ABS, MVT::v4i32, Legal); 256 setOperationAction(ISD::ABS, MVT::v8i16, Legal); 257 setOperationAction(ISD::ABS, MVT::v16i8, Legal); 258 } 259 260 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 261 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 262 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 263 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 264 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 265 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 266 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 267 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 268 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 269 270 // We don't support sin/cos/sqrt/fmod/pow 271 setOperationAction(ISD::FSIN , MVT::f64, Expand); 272 setOperationAction(ISD::FCOS , MVT::f64, Expand); 273 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 274 setOperationAction(ISD::FREM , MVT::f64, Expand); 275 setOperationAction(ISD::FPOW , MVT::f64, Expand); 276 setOperationAction(ISD::FSIN , MVT::f32, Expand); 277 setOperationAction(ISD::FCOS , MVT::f32, Expand); 278 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 279 setOperationAction(ISD::FREM , MVT::f32, Expand); 280 setOperationAction(ISD::FPOW , MVT::f32, Expand); 281 if (Subtarget.hasSPE()) { 282 setOperationAction(ISD::FMA , MVT::f64, Expand); 283 setOperationAction(ISD::FMA , MVT::f32, Expand); 284 } else { 285 setOperationAction(ISD::FMA , MVT::f64, Legal); 286 setOperationAction(ISD::FMA , MVT::f32, Legal); 287 } 288 289 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 290 291 // If we're enabling GP optimizations, use hardware square root 292 if (!Subtarget.hasFSQRT() && 293 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 294 Subtarget.hasFRE())) 295 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 296 297 if (!Subtarget.hasFSQRT() && 298 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 299 Subtarget.hasFRES())) 300 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 301 302 if (Subtarget.hasFCPSGN()) { 303 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 304 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 305 } else { 306 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 307 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 308 } 309 310 if (Subtarget.hasFPRND()) { 311 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 312 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 313 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 314 setOperationAction(ISD::FROUND, MVT::f64, Legal); 315 316 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 317 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 318 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 319 setOperationAction(ISD::FROUND, MVT::f32, Legal); 320 } 321 322 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 323 // to speed up scalar BSWAP64. 324 // CTPOP or CTTZ were introduced in P8/P9 respectively 325 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 326 if (Subtarget.isISA3_0()) { 327 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 328 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 329 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 330 } else { 331 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 332 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 333 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 334 } 335 336 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 337 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 338 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 339 } else { 340 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 341 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 342 } 343 344 // PowerPC does not have ROTR 345 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 346 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 347 348 if (!Subtarget.useCRBits()) { 349 // PowerPC does not have Select 350 setOperationAction(ISD::SELECT, MVT::i32, Expand); 351 setOperationAction(ISD::SELECT, MVT::i64, Expand); 352 setOperationAction(ISD::SELECT, MVT::f32, Expand); 353 setOperationAction(ISD::SELECT, MVT::f64, Expand); 354 } 355 356 // PowerPC wants to turn select_cc of FP into fsel when possible. 357 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 358 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 359 360 // PowerPC wants to optimize integer setcc a bit 361 if (!Subtarget.useCRBits()) 362 setOperationAction(ISD::SETCC, MVT::i32, Custom); 363 364 // PowerPC does not have BRCOND which requires SetCC 365 if (!Subtarget.useCRBits()) 366 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 367 368 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 369 370 if (Subtarget.hasSPE()) { 371 // SPE has built-in conversions 372 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 373 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 374 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 375 } else { 376 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 377 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 378 379 // PowerPC does not have [U|S]INT_TO_FP 380 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 381 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 382 } 383 384 if (Subtarget.hasDirectMove() && isPPC64) { 385 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 386 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 387 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 388 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 389 } else { 390 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 391 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 392 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 393 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 394 } 395 396 // We cannot sextinreg(i1). Expand to shifts. 397 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 398 399 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 400 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 401 // support continuation, user-level threading, and etc.. As a result, no 402 // other SjLj exception interfaces are implemented and please don't build 403 // your own exception handling based on them. 404 // LLVM/Clang supports zero-cost DWARF exception handling. 405 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 406 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 407 408 // We want to legalize GlobalAddress and ConstantPool nodes into the 409 // appropriate instructions to materialize the address. 410 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 411 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 412 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 413 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 414 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 415 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 416 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 417 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 418 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 419 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 420 421 // TRAP is legal. 422 setOperationAction(ISD::TRAP, MVT::Other, Legal); 423 424 // TRAMPOLINE is custom lowered. 425 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 426 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 427 428 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 429 setOperationAction(ISD::VASTART , MVT::Other, Custom); 430 431 if (Subtarget.isSVR4ABI()) { 432 if (isPPC64) { 433 // VAARG always uses double-word chunks, so promote anything smaller. 434 setOperationAction(ISD::VAARG, MVT::i1, Promote); 435 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 436 setOperationAction(ISD::VAARG, MVT::i8, Promote); 437 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 438 setOperationAction(ISD::VAARG, MVT::i16, Promote); 439 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 440 setOperationAction(ISD::VAARG, MVT::i32, Promote); 441 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 442 setOperationAction(ISD::VAARG, MVT::Other, Expand); 443 } else { 444 // VAARG is custom lowered with the 32-bit SVR4 ABI. 445 setOperationAction(ISD::VAARG, MVT::Other, Custom); 446 setOperationAction(ISD::VAARG, MVT::i64, Custom); 447 } 448 } else 449 setOperationAction(ISD::VAARG, MVT::Other, Expand); 450 451 if (Subtarget.isSVR4ABI() && !isPPC64) 452 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 453 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 454 else 455 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 456 457 // Use the default implementation. 458 setOperationAction(ISD::VAEND , MVT::Other, Expand); 459 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 460 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 461 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 462 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 463 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 464 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 465 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 466 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 467 468 // We want to custom lower some of our intrinsics. 469 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 470 471 // To handle counter-based loop conditions. 472 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 473 474 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 475 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 476 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 477 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 478 479 // Comparisons that require checking two conditions. 480 if (Subtarget.hasSPE()) { 481 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 482 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 483 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 484 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 485 } 486 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 487 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 488 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 489 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 490 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 491 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 492 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 493 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 494 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 495 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 496 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 497 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 498 499 if (Subtarget.has64BitSupport()) { 500 // They also have instructions for converting between i64 and fp. 501 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 502 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 503 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 504 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 505 // This is just the low 32 bits of a (signed) fp->i64 conversion. 506 // We cannot do this with Promote because i64 is not a legal type. 507 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 508 509 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 510 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 511 } else { 512 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 513 if (Subtarget.hasSPE()) 514 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 515 else 516 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 517 } 518 519 // With the instructions enabled under FPCVT, we can do everything. 520 if (Subtarget.hasFPCVT()) { 521 if (Subtarget.has64BitSupport()) { 522 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 523 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 524 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 525 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 526 } 527 528 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 529 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 530 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 531 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 532 } 533 534 if (Subtarget.use64BitRegs()) { 535 // 64-bit PowerPC implementations can support i64 types directly 536 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 537 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 538 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 539 // 64-bit PowerPC wants to expand i128 shifts itself. 540 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 541 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 542 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 543 } else { 544 // 32-bit PowerPC wants to expand i64 shifts itself. 545 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 546 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 547 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 548 } 549 550 if (Subtarget.hasAltivec()) { 551 // First set operation action for all vector types to expand. Then we 552 // will selectively turn on ones that can be effectively codegen'd. 553 for (MVT VT : MVT::vector_valuetypes()) { 554 // add/sub are legal for all supported vector VT's. 555 setOperationAction(ISD::ADD, VT, Legal); 556 setOperationAction(ISD::SUB, VT, Legal); 557 558 // Vector instructions introduced in P8 559 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 560 setOperationAction(ISD::CTPOP, VT, Legal); 561 setOperationAction(ISD::CTLZ, VT, Legal); 562 } 563 else { 564 setOperationAction(ISD::CTPOP, VT, Expand); 565 setOperationAction(ISD::CTLZ, VT, Expand); 566 } 567 568 // Vector instructions introduced in P9 569 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 570 setOperationAction(ISD::CTTZ, VT, Legal); 571 else 572 setOperationAction(ISD::CTTZ, VT, Expand); 573 574 // We promote all shuffles to v16i8. 575 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 576 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 577 578 // We promote all non-typed operations to v4i32. 579 setOperationAction(ISD::AND , VT, Promote); 580 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 581 setOperationAction(ISD::OR , VT, Promote); 582 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 583 setOperationAction(ISD::XOR , VT, Promote); 584 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 585 setOperationAction(ISD::LOAD , VT, Promote); 586 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 587 setOperationAction(ISD::SELECT, VT, Promote); 588 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 589 setOperationAction(ISD::SELECT_CC, VT, Promote); 590 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 591 setOperationAction(ISD::STORE, VT, Promote); 592 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 593 594 // No other operations are legal. 595 setOperationAction(ISD::MUL , VT, Expand); 596 setOperationAction(ISD::SDIV, VT, Expand); 597 setOperationAction(ISD::SREM, VT, Expand); 598 setOperationAction(ISD::UDIV, VT, Expand); 599 setOperationAction(ISD::UREM, VT, Expand); 600 setOperationAction(ISD::FDIV, VT, Expand); 601 setOperationAction(ISD::FREM, VT, Expand); 602 setOperationAction(ISD::FNEG, VT, Expand); 603 setOperationAction(ISD::FSQRT, VT, Expand); 604 setOperationAction(ISD::FLOG, VT, Expand); 605 setOperationAction(ISD::FLOG10, VT, Expand); 606 setOperationAction(ISD::FLOG2, VT, Expand); 607 setOperationAction(ISD::FEXP, VT, Expand); 608 setOperationAction(ISD::FEXP2, VT, Expand); 609 setOperationAction(ISD::FSIN, VT, Expand); 610 setOperationAction(ISD::FCOS, VT, Expand); 611 setOperationAction(ISD::FABS, VT, Expand); 612 setOperationAction(ISD::FFLOOR, VT, Expand); 613 setOperationAction(ISD::FCEIL, VT, Expand); 614 setOperationAction(ISD::FTRUNC, VT, Expand); 615 setOperationAction(ISD::FRINT, VT, Expand); 616 setOperationAction(ISD::FNEARBYINT, VT, Expand); 617 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 618 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 619 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 620 setOperationAction(ISD::MULHU, VT, Expand); 621 setOperationAction(ISD::MULHS, VT, Expand); 622 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 623 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 624 setOperationAction(ISD::UDIVREM, VT, Expand); 625 setOperationAction(ISD::SDIVREM, VT, Expand); 626 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 627 setOperationAction(ISD::FPOW, VT, Expand); 628 setOperationAction(ISD::BSWAP, VT, Expand); 629 setOperationAction(ISD::VSELECT, VT, Expand); 630 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 631 setOperationAction(ISD::ROTL, VT, Expand); 632 setOperationAction(ISD::ROTR, VT, Expand); 633 634 for (MVT InnerVT : MVT::vector_valuetypes()) { 635 setTruncStoreAction(VT, InnerVT, Expand); 636 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 637 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 638 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 639 } 640 } 641 642 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 643 // with merges, splats, etc. 644 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 645 646 setOperationAction(ISD::AND , MVT::v4i32, Legal); 647 setOperationAction(ISD::OR , MVT::v4i32, Legal); 648 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 649 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 650 setOperationAction(ISD::SELECT, MVT::v4i32, 651 Subtarget.useCRBits() ? Legal : Expand); 652 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 653 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 654 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 655 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 656 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 657 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 658 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 659 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 660 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 661 662 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 663 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 664 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 665 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 666 667 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 668 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 669 670 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 671 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 672 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 673 } 674 675 if (Subtarget.hasP8Altivec()) 676 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 677 else 678 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 679 680 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 681 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 682 683 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 684 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 685 686 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 687 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 688 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 689 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 690 691 // Altivec does not contain unordered floating-point compare instructions 692 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 693 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 694 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 695 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 696 697 if (Subtarget.hasVSX()) { 698 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 699 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 700 if (Subtarget.hasP8Vector()) { 701 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 703 } 704 if (Subtarget.hasDirectMove() && isPPC64) { 705 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 706 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 707 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 708 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 709 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 710 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 711 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 712 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 713 } 714 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 715 716 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 717 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 718 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 719 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 720 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 721 722 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 723 724 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 725 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 726 727 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 728 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 729 730 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 731 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 732 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 733 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 734 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 735 736 // Share the Altivec comparison restrictions. 737 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 738 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 739 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 740 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 741 742 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 743 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 744 745 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 746 747 if (Subtarget.hasP8Vector()) 748 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 749 750 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 751 752 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 753 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 754 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 755 756 if (Subtarget.hasP8Altivec()) { 757 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 758 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 759 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 760 761 // 128 bit shifts can be accomplished via 3 instructions for SHL and 762 // SRL, but not for SRA because of the instructions available: 763 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 764 // doing 765 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 766 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 767 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 768 769 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 770 } 771 else { 772 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 773 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 774 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 775 776 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 777 778 // VSX v2i64 only supports non-arithmetic operations. 779 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 780 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 781 } 782 783 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 784 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 785 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 786 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 787 788 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 789 790 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 791 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 792 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 793 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 794 795 // Vector operation legalization checks the result type of 796 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 797 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 798 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 799 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 800 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 801 802 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 803 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 804 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 805 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 806 807 if (Subtarget.hasDirectMove()) 808 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 809 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 810 811 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 812 } 813 814 if (Subtarget.hasP8Altivec()) { 815 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 816 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 817 } 818 819 if (Subtarget.hasP9Vector()) { 820 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 821 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 822 823 // 128 bit shifts can be accomplished via 3 instructions for SHL and 824 // SRL, but not for SRA because of the instructions available: 825 // VS{RL} and VS{RL}O. 826 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 827 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 828 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 829 830 if (EnableQuadPrecision) { 831 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 832 setOperationAction(ISD::FADD, MVT::f128, Legal); 833 setOperationAction(ISD::FSUB, MVT::f128, Legal); 834 setOperationAction(ISD::FDIV, MVT::f128, Legal); 835 setOperationAction(ISD::FMUL, MVT::f128, Legal); 836 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 837 // No extending loads to f128 on PPC. 838 for (MVT FPT : MVT::fp_valuetypes()) 839 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 840 setOperationAction(ISD::FMA, MVT::f128, Legal); 841 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 842 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 843 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 844 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 845 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 846 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 847 848 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 849 setOperationAction(ISD::FRINT, MVT::f128, Legal); 850 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 851 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 852 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 853 setOperationAction(ISD::FROUND, MVT::f128, Legal); 854 855 setOperationAction(ISD::SELECT, MVT::f128, Expand); 856 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 857 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 858 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 859 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 860 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 861 // No implementation for these ops for PowerPC. 862 setOperationAction(ISD::FSIN , MVT::f128, Expand); 863 setOperationAction(ISD::FCOS , MVT::f128, Expand); 864 setOperationAction(ISD::FPOW, MVT::f128, Expand); 865 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 866 setOperationAction(ISD::FREM, MVT::f128, Expand); 867 } 868 869 } 870 871 if (Subtarget.hasP9Altivec()) { 872 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 873 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 874 } 875 } 876 877 if (Subtarget.hasQPX()) { 878 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 879 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 880 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 881 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 882 883 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 884 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 885 886 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 887 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 888 889 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 890 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 891 892 if (!Subtarget.useCRBits()) 893 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 894 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 895 896 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 897 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 898 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 899 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 900 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 901 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 902 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 903 904 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 905 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 906 907 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 908 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 909 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 910 911 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 912 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 913 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 914 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 915 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 916 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 917 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 918 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 919 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 920 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 921 922 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 923 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 924 925 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 926 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 927 928 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 929 930 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 931 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 932 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 933 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 934 935 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 936 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 937 938 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 939 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 940 941 if (!Subtarget.useCRBits()) 942 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 943 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 944 945 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 946 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 947 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 948 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 949 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 950 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 951 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 952 953 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 954 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 955 956 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 957 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 958 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 959 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 960 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 961 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 962 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 963 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 964 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 965 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 966 967 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 968 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 969 970 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 971 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 972 973 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 974 975 setOperationAction(ISD::AND , MVT::v4i1, Legal); 976 setOperationAction(ISD::OR , MVT::v4i1, Legal); 977 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 978 979 if (!Subtarget.useCRBits()) 980 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 981 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 982 983 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 984 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 985 986 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 987 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 988 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 989 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 990 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 991 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 992 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 993 994 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 995 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 996 997 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 998 999 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1000 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 1001 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 1002 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 1003 1004 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 1005 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 1006 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 1007 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 1008 1009 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 1010 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 1011 1012 // These need to set FE_INEXACT, and so cannot be vectorized here. 1013 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 1014 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 1015 1016 if (TM.Options.UnsafeFPMath) { 1017 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1018 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1019 1020 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 1021 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 1022 } else { 1023 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 1024 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 1025 1026 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 1027 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 1028 } 1029 } 1030 1031 if (Subtarget.has64BitSupport()) 1032 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1033 1034 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1035 1036 if (!isPPC64) { 1037 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1038 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1039 } 1040 1041 setBooleanContents(ZeroOrOneBooleanContent); 1042 1043 if (Subtarget.hasAltivec()) { 1044 // Altivec instructions set fields to all zeros or all ones. 1045 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1046 } 1047 1048 if (!isPPC64) { 1049 // These libcalls are not available in 32-bit. 1050 setLibcallName(RTLIB::SHL_I128, nullptr); 1051 setLibcallName(RTLIB::SRL_I128, nullptr); 1052 setLibcallName(RTLIB::SRA_I128, nullptr); 1053 } 1054 1055 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1056 1057 // We have target-specific dag combine patterns for the following nodes: 1058 setTargetDAGCombine(ISD::SHL); 1059 setTargetDAGCombine(ISD::SRA); 1060 setTargetDAGCombine(ISD::SRL); 1061 setTargetDAGCombine(ISD::SINT_TO_FP); 1062 setTargetDAGCombine(ISD::BUILD_VECTOR); 1063 if (Subtarget.hasFPCVT()) 1064 setTargetDAGCombine(ISD::UINT_TO_FP); 1065 setTargetDAGCombine(ISD::LOAD); 1066 setTargetDAGCombine(ISD::STORE); 1067 setTargetDAGCombine(ISD::BR_CC); 1068 if (Subtarget.useCRBits()) 1069 setTargetDAGCombine(ISD::BRCOND); 1070 setTargetDAGCombine(ISD::BSWAP); 1071 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1072 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1073 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1074 1075 setTargetDAGCombine(ISD::SIGN_EXTEND); 1076 setTargetDAGCombine(ISD::ZERO_EXTEND); 1077 setTargetDAGCombine(ISD::ANY_EXTEND); 1078 1079 if (Subtarget.useCRBits()) { 1080 setTargetDAGCombine(ISD::TRUNCATE); 1081 setTargetDAGCombine(ISD::SETCC); 1082 setTargetDAGCombine(ISD::SELECT_CC); 1083 } 1084 1085 // Use reciprocal estimates. 1086 if (TM.Options.UnsafeFPMath) { 1087 setTargetDAGCombine(ISD::FDIV); 1088 setTargetDAGCombine(ISD::FSQRT); 1089 } 1090 1091 // Darwin long double math library functions have $LDBL128 appended. 1092 if (Subtarget.isDarwin()) { 1093 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1094 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1095 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1096 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1097 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1098 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1099 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1100 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1101 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1102 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1103 } 1104 1105 if (EnableQuadPrecision) { 1106 setLibcallName(RTLIB::LOG_F128, "logf128"); 1107 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1108 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1109 setLibcallName(RTLIB::EXP_F128, "expf128"); 1110 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1111 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1112 setLibcallName(RTLIB::COS_F128, "cosf128"); 1113 setLibcallName(RTLIB::POW_F128, "powf128"); 1114 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1115 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1116 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 1117 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1118 } 1119 1120 // With 32 condition bits, we don't need to sink (and duplicate) compares 1121 // aggressively in CodeGenPrep. 1122 if (Subtarget.useCRBits()) { 1123 setHasMultipleConditionRegisters(); 1124 setJumpIsExpensive(); 1125 } 1126 1127 setMinFunctionAlignment(2); 1128 if (Subtarget.isDarwin()) 1129 setPrefFunctionAlignment(4); 1130 1131 switch (Subtarget.getDarwinDirective()) { 1132 default: break; 1133 case PPC::DIR_970: 1134 case PPC::DIR_A2: 1135 case PPC::DIR_E500: 1136 case PPC::DIR_E500mc: 1137 case PPC::DIR_E5500: 1138 case PPC::DIR_PWR4: 1139 case PPC::DIR_PWR5: 1140 case PPC::DIR_PWR5X: 1141 case PPC::DIR_PWR6: 1142 case PPC::DIR_PWR6X: 1143 case PPC::DIR_PWR7: 1144 case PPC::DIR_PWR8: 1145 case PPC::DIR_PWR9: 1146 setPrefFunctionAlignment(4); 1147 setPrefLoopAlignment(4); 1148 break; 1149 } 1150 1151 if (Subtarget.enableMachineScheduler()) 1152 setSchedulingPreference(Sched::Source); 1153 else 1154 setSchedulingPreference(Sched::Hybrid); 1155 1156 computeRegisterProperties(STI.getRegisterInfo()); 1157 1158 // The Freescale cores do better with aggressive inlining of memcpy and 1159 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1160 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1161 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1162 MaxStoresPerMemset = 32; 1163 MaxStoresPerMemsetOptSize = 16; 1164 MaxStoresPerMemcpy = 32; 1165 MaxStoresPerMemcpyOptSize = 8; 1166 MaxStoresPerMemmove = 32; 1167 MaxStoresPerMemmoveOptSize = 8; 1168 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1169 // The A2 also benefits from (very) aggressive inlining of memcpy and 1170 // friends. The overhead of a the function call, even when warm, can be 1171 // over one hundred cycles. 1172 MaxStoresPerMemset = 128; 1173 MaxStoresPerMemcpy = 128; 1174 MaxStoresPerMemmove = 128; 1175 MaxLoadsPerMemcmp = 128; 1176 } else { 1177 MaxLoadsPerMemcmp = 8; 1178 MaxLoadsPerMemcmpOptSize = 4; 1179 } 1180 } 1181 1182 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1183 /// the desired ByVal argument alignment. 1184 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1185 unsigned MaxMaxAlign) { 1186 if (MaxAlign == MaxMaxAlign) 1187 return; 1188 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1189 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1190 MaxAlign = 32; 1191 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1192 MaxAlign = 16; 1193 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1194 unsigned EltAlign = 0; 1195 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1196 if (EltAlign > MaxAlign) 1197 MaxAlign = EltAlign; 1198 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1199 for (auto *EltTy : STy->elements()) { 1200 unsigned EltAlign = 0; 1201 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1202 if (EltAlign > MaxAlign) 1203 MaxAlign = EltAlign; 1204 if (MaxAlign == MaxMaxAlign) 1205 break; 1206 } 1207 } 1208 } 1209 1210 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1211 /// function arguments in the caller parameter area. 1212 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1213 const DataLayout &DL) const { 1214 // Darwin passes everything on 4 byte boundary. 1215 if (Subtarget.isDarwin()) 1216 return 4; 1217 1218 // 16byte and wider vectors are passed on 16byte boundary. 1219 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1220 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1221 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1222 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1223 return Align; 1224 } 1225 1226 unsigned PPCTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 1227 CallingConv:: ID CC, 1228 EVT VT) const { 1229 if (Subtarget.hasSPE() && VT == MVT::f64) 1230 return 2; 1231 return PPCTargetLowering::getNumRegisters(Context, VT); 1232 } 1233 1234 MVT PPCTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 1235 CallingConv:: ID CC, 1236 EVT VT) const { 1237 if (Subtarget.hasSPE() && VT == MVT::f64) 1238 return MVT::i32; 1239 return PPCTargetLowering::getRegisterType(Context, VT); 1240 } 1241 1242 bool PPCTargetLowering::useSoftFloat() const { 1243 return Subtarget.useSoftFloat(); 1244 } 1245 1246 bool PPCTargetLowering::hasSPE() const { 1247 return Subtarget.hasSPE(); 1248 } 1249 1250 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1251 switch ((PPCISD::NodeType)Opcode) { 1252 case PPCISD::FIRST_NUMBER: break; 1253 case PPCISD::FSEL: return "PPCISD::FSEL"; 1254 case PPCISD::FCFID: return "PPCISD::FCFID"; 1255 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1256 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1257 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1258 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1259 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1260 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1261 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1262 case PPCISD::FP_TO_UINT_IN_VSR: 1263 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1264 case PPCISD::FP_TO_SINT_IN_VSR: 1265 return "PPCISD::FP_TO_SINT_IN_VSR"; 1266 case PPCISD::FRE: return "PPCISD::FRE"; 1267 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1268 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1269 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1270 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1271 case PPCISD::VPERM: return "PPCISD::VPERM"; 1272 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1273 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1274 case PPCISD::XXREVERSE: return "PPCISD::XXREVERSE"; 1275 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1276 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1277 case PPCISD::CMPB: return "PPCISD::CMPB"; 1278 case PPCISD::Hi: return "PPCISD::Hi"; 1279 case PPCISD::Lo: return "PPCISD::Lo"; 1280 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1281 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1282 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1283 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1284 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1285 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1286 case PPCISD::SRL: return "PPCISD::SRL"; 1287 case PPCISD::SRA: return "PPCISD::SRA"; 1288 case PPCISD::SHL: return "PPCISD::SHL"; 1289 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1290 case PPCISD::CALL: return "PPCISD::CALL"; 1291 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1292 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1293 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1294 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1295 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1296 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1297 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1298 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1299 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1300 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1301 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1302 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1303 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1304 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1305 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1306 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1307 case PPCISD::VCMP: return "PPCISD::VCMP"; 1308 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1309 case PPCISD::LBRX: return "PPCISD::LBRX"; 1310 case PPCISD::STBRX: return "PPCISD::STBRX"; 1311 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1312 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1313 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1314 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1315 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1316 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1317 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1318 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1319 case PPCISD::ST_VSR_SCAL_INT: 1320 return "PPCISD::ST_VSR_SCAL_INT"; 1321 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1322 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1323 case PPCISD::BDZ: return "PPCISD::BDZ"; 1324 case PPCISD::MFFS: return "PPCISD::MFFS"; 1325 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1326 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1327 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1328 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1329 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1330 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1331 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1332 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1333 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1334 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1335 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1336 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1337 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1338 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1339 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1340 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1341 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1342 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1343 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1344 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1345 case PPCISD::SC: return "PPCISD::SC"; 1346 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1347 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1348 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1349 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1350 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1351 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1352 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1353 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1354 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1355 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1356 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1357 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1358 } 1359 return nullptr; 1360 } 1361 1362 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1363 EVT VT) const { 1364 if (!VT.isVector()) 1365 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1366 1367 if (Subtarget.hasQPX()) 1368 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1369 1370 return VT.changeVectorElementTypeToInteger(); 1371 } 1372 1373 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1374 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1375 return true; 1376 } 1377 1378 //===----------------------------------------------------------------------===// 1379 // Node matching predicates, for use by the tblgen matching code. 1380 //===----------------------------------------------------------------------===// 1381 1382 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1383 static bool isFloatingPointZero(SDValue Op) { 1384 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1385 return CFP->getValueAPF().isZero(); 1386 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1387 // Maybe this has already been legalized into the constant pool? 1388 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1389 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1390 return CFP->getValueAPF().isZero(); 1391 } 1392 return false; 1393 } 1394 1395 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1396 /// true if Op is undef or if it matches the specified value. 1397 static bool isConstantOrUndef(int Op, int Val) { 1398 return Op < 0 || Op == Val; 1399 } 1400 1401 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1402 /// VPKUHUM instruction. 1403 /// The ShuffleKind distinguishes between big-endian operations with 1404 /// two different inputs (0), either-endian operations with two identical 1405 /// inputs (1), and little-endian operations with two different inputs (2). 1406 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1407 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1408 SelectionDAG &DAG) { 1409 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1410 if (ShuffleKind == 0) { 1411 if (IsLE) 1412 return false; 1413 for (unsigned i = 0; i != 16; ++i) 1414 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1415 return false; 1416 } else if (ShuffleKind == 2) { 1417 if (!IsLE) 1418 return false; 1419 for (unsigned i = 0; i != 16; ++i) 1420 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1421 return false; 1422 } else if (ShuffleKind == 1) { 1423 unsigned j = IsLE ? 0 : 1; 1424 for (unsigned i = 0; i != 8; ++i) 1425 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1426 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1427 return false; 1428 } 1429 return true; 1430 } 1431 1432 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1433 /// VPKUWUM instruction. 1434 /// The ShuffleKind distinguishes between big-endian operations with 1435 /// two different inputs (0), either-endian operations with two identical 1436 /// inputs (1), and little-endian operations with two different inputs (2). 1437 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1438 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1439 SelectionDAG &DAG) { 1440 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1441 if (ShuffleKind == 0) { 1442 if (IsLE) 1443 return false; 1444 for (unsigned i = 0; i != 16; i += 2) 1445 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1446 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1447 return false; 1448 } else if (ShuffleKind == 2) { 1449 if (!IsLE) 1450 return false; 1451 for (unsigned i = 0; i != 16; i += 2) 1452 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1453 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1454 return false; 1455 } else if (ShuffleKind == 1) { 1456 unsigned j = IsLE ? 0 : 2; 1457 for (unsigned i = 0; i != 8; i += 2) 1458 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1459 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1460 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1461 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1462 return false; 1463 } 1464 return true; 1465 } 1466 1467 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1468 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1469 /// current subtarget. 1470 /// 1471 /// The ShuffleKind distinguishes between big-endian operations with 1472 /// two different inputs (0), either-endian operations with two identical 1473 /// inputs (1), and little-endian operations with two different inputs (2). 1474 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1475 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1476 SelectionDAG &DAG) { 1477 const PPCSubtarget& Subtarget = 1478 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1479 if (!Subtarget.hasP8Vector()) 1480 return false; 1481 1482 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1483 if (ShuffleKind == 0) { 1484 if (IsLE) 1485 return false; 1486 for (unsigned i = 0; i != 16; i += 4) 1487 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1488 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1489 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1490 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1491 return false; 1492 } else if (ShuffleKind == 2) { 1493 if (!IsLE) 1494 return false; 1495 for (unsigned i = 0; i != 16; i += 4) 1496 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1497 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1498 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1499 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1500 return false; 1501 } else if (ShuffleKind == 1) { 1502 unsigned j = IsLE ? 0 : 4; 1503 for (unsigned i = 0; i != 8; i += 4) 1504 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1505 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1506 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1507 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1508 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1509 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1510 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1511 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1512 return false; 1513 } 1514 return true; 1515 } 1516 1517 /// isVMerge - Common function, used to match vmrg* shuffles. 1518 /// 1519 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1520 unsigned LHSStart, unsigned RHSStart) { 1521 if (N->getValueType(0) != MVT::v16i8) 1522 return false; 1523 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1524 "Unsupported merge size!"); 1525 1526 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1527 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1528 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1529 LHSStart+j+i*UnitSize) || 1530 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1531 RHSStart+j+i*UnitSize)) 1532 return false; 1533 } 1534 return true; 1535 } 1536 1537 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1538 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1539 /// The ShuffleKind distinguishes between big-endian merges with two 1540 /// different inputs (0), either-endian merges with two identical inputs (1), 1541 /// and little-endian merges with two different inputs (2). For the latter, 1542 /// the input operands are swapped (see PPCInstrAltivec.td). 1543 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1544 unsigned ShuffleKind, SelectionDAG &DAG) { 1545 if (DAG.getDataLayout().isLittleEndian()) { 1546 if (ShuffleKind == 1) // unary 1547 return isVMerge(N, UnitSize, 0, 0); 1548 else if (ShuffleKind == 2) // swapped 1549 return isVMerge(N, UnitSize, 0, 16); 1550 else 1551 return false; 1552 } else { 1553 if (ShuffleKind == 1) // unary 1554 return isVMerge(N, UnitSize, 8, 8); 1555 else if (ShuffleKind == 0) // normal 1556 return isVMerge(N, UnitSize, 8, 24); 1557 else 1558 return false; 1559 } 1560 } 1561 1562 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1563 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1564 /// The ShuffleKind distinguishes between big-endian merges with two 1565 /// different inputs (0), either-endian merges with two identical inputs (1), 1566 /// and little-endian merges with two different inputs (2). For the latter, 1567 /// the input operands are swapped (see PPCInstrAltivec.td). 1568 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1569 unsigned ShuffleKind, SelectionDAG &DAG) { 1570 if (DAG.getDataLayout().isLittleEndian()) { 1571 if (ShuffleKind == 1) // unary 1572 return isVMerge(N, UnitSize, 8, 8); 1573 else if (ShuffleKind == 2) // swapped 1574 return isVMerge(N, UnitSize, 8, 24); 1575 else 1576 return false; 1577 } else { 1578 if (ShuffleKind == 1) // unary 1579 return isVMerge(N, UnitSize, 0, 0); 1580 else if (ShuffleKind == 0) // normal 1581 return isVMerge(N, UnitSize, 0, 16); 1582 else 1583 return false; 1584 } 1585 } 1586 1587 /** 1588 * Common function used to match vmrgew and vmrgow shuffles 1589 * 1590 * The indexOffset determines whether to look for even or odd words in 1591 * the shuffle mask. This is based on the of the endianness of the target 1592 * machine. 1593 * - Little Endian: 1594 * - Use offset of 0 to check for odd elements 1595 * - Use offset of 4 to check for even elements 1596 * - Big Endian: 1597 * - Use offset of 0 to check for even elements 1598 * - Use offset of 4 to check for odd elements 1599 * A detailed description of the vector element ordering for little endian and 1600 * big endian can be found at 1601 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1602 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1603 * compiler differences mean to you 1604 * 1605 * The mask to the shuffle vector instruction specifies the indices of the 1606 * elements from the two input vectors to place in the result. The elements are 1607 * numbered in array-access order, starting with the first vector. These vectors 1608 * are always of type v16i8, thus each vector will contain 16 elements of size 1609 * 8. More info on the shuffle vector can be found in the 1610 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1611 * Language Reference. 1612 * 1613 * The RHSStartValue indicates whether the same input vectors are used (unary) 1614 * or two different input vectors are used, based on the following: 1615 * - If the instruction uses the same vector for both inputs, the range of the 1616 * indices will be 0 to 15. In this case, the RHSStart value passed should 1617 * be 0. 1618 * - If the instruction has two different vectors then the range of the 1619 * indices will be 0 to 31. In this case, the RHSStart value passed should 1620 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1621 * to 31 specify elements in the second vector). 1622 * 1623 * \param[in] N The shuffle vector SD Node to analyze 1624 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1625 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1626 * vector to the shuffle_vector instruction 1627 * \return true iff this shuffle vector represents an even or odd word merge 1628 */ 1629 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1630 unsigned RHSStartValue) { 1631 if (N->getValueType(0) != MVT::v16i8) 1632 return false; 1633 1634 for (unsigned i = 0; i < 2; ++i) 1635 for (unsigned j = 0; j < 4; ++j) 1636 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1637 i*RHSStartValue+j+IndexOffset) || 1638 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1639 i*RHSStartValue+j+IndexOffset+8)) 1640 return false; 1641 return true; 1642 } 1643 1644 /** 1645 * Determine if the specified shuffle mask is suitable for the vmrgew or 1646 * vmrgow instructions. 1647 * 1648 * \param[in] N The shuffle vector SD Node to analyze 1649 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1650 * \param[in] ShuffleKind Identify the type of merge: 1651 * - 0 = big-endian merge with two different inputs; 1652 * - 1 = either-endian merge with two identical inputs; 1653 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1654 * little-endian merges). 1655 * \param[in] DAG The current SelectionDAG 1656 * \return true iff this shuffle mask 1657 */ 1658 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1659 unsigned ShuffleKind, SelectionDAG &DAG) { 1660 if (DAG.getDataLayout().isLittleEndian()) { 1661 unsigned indexOffset = CheckEven ? 4 : 0; 1662 if (ShuffleKind == 1) // Unary 1663 return isVMerge(N, indexOffset, 0); 1664 else if (ShuffleKind == 2) // swapped 1665 return isVMerge(N, indexOffset, 16); 1666 else 1667 return false; 1668 } 1669 else { 1670 unsigned indexOffset = CheckEven ? 0 : 4; 1671 if (ShuffleKind == 1) // Unary 1672 return isVMerge(N, indexOffset, 0); 1673 else if (ShuffleKind == 0) // Normal 1674 return isVMerge(N, indexOffset, 16); 1675 else 1676 return false; 1677 } 1678 return false; 1679 } 1680 1681 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1682 /// amount, otherwise return -1. 1683 /// The ShuffleKind distinguishes between big-endian operations with two 1684 /// different inputs (0), either-endian operations with two identical inputs 1685 /// (1), and little-endian operations with two different inputs (2). For the 1686 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1687 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1688 SelectionDAG &DAG) { 1689 if (N->getValueType(0) != MVT::v16i8) 1690 return -1; 1691 1692 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1693 1694 // Find the first non-undef value in the shuffle mask. 1695 unsigned i; 1696 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1697 /*search*/; 1698 1699 if (i == 16) return -1; // all undef. 1700 1701 // Otherwise, check to see if the rest of the elements are consecutively 1702 // numbered from this value. 1703 unsigned ShiftAmt = SVOp->getMaskElt(i); 1704 if (ShiftAmt < i) return -1; 1705 1706 ShiftAmt -= i; 1707 bool isLE = DAG.getDataLayout().isLittleEndian(); 1708 1709 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1710 // Check the rest of the elements to see if they are consecutive. 1711 for (++i; i != 16; ++i) 1712 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1713 return -1; 1714 } else if (ShuffleKind == 1) { 1715 // Check the rest of the elements to see if they are consecutive. 1716 for (++i; i != 16; ++i) 1717 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1718 return -1; 1719 } else 1720 return -1; 1721 1722 if (isLE) 1723 ShiftAmt = 16 - ShiftAmt; 1724 1725 return ShiftAmt; 1726 } 1727 1728 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1729 /// specifies a splat of a single element that is suitable for input to 1730 /// VSPLTB/VSPLTH/VSPLTW. 1731 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1732 assert(N->getValueType(0) == MVT::v16i8 && 1733 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1734 1735 // The consecutive indices need to specify an element, not part of two 1736 // different elements. So abandon ship early if this isn't the case. 1737 if (N->getMaskElt(0) % EltSize != 0) 1738 return false; 1739 1740 // This is a splat operation if each element of the permute is the same, and 1741 // if the value doesn't reference the second vector. 1742 unsigned ElementBase = N->getMaskElt(0); 1743 1744 // FIXME: Handle UNDEF elements too! 1745 if (ElementBase >= 16) 1746 return false; 1747 1748 // Check that the indices are consecutive, in the case of a multi-byte element 1749 // splatted with a v16i8 mask. 1750 for (unsigned i = 1; i != EltSize; ++i) 1751 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1752 return false; 1753 1754 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1755 if (N->getMaskElt(i) < 0) continue; 1756 for (unsigned j = 0; j != EltSize; ++j) 1757 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1758 return false; 1759 } 1760 return true; 1761 } 1762 1763 /// Check that the mask is shuffling N byte elements. Within each N byte 1764 /// element of the mask, the indices could be either in increasing or 1765 /// decreasing order as long as they are consecutive. 1766 /// \param[in] N the shuffle vector SD Node to analyze 1767 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1768 /// Word/DoubleWord/QuadWord). 1769 /// \param[in] StepLen the delta indices number among the N byte element, if 1770 /// the mask is in increasing/decreasing order then it is 1/-1. 1771 /// \return true iff the mask is shuffling N byte elements. 1772 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1773 int StepLen) { 1774 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1775 "Unexpected element width."); 1776 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1777 1778 unsigned NumOfElem = 16 / Width; 1779 unsigned MaskVal[16]; // Width is never greater than 16 1780 for (unsigned i = 0; i < NumOfElem; ++i) { 1781 MaskVal[0] = N->getMaskElt(i * Width); 1782 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1783 return false; 1784 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1785 return false; 1786 } 1787 1788 for (unsigned int j = 1; j < Width; ++j) { 1789 MaskVal[j] = N->getMaskElt(i * Width + j); 1790 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1791 return false; 1792 } 1793 } 1794 } 1795 1796 return true; 1797 } 1798 1799 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1800 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1801 if (!isNByteElemShuffleMask(N, 4, 1)) 1802 return false; 1803 1804 // Now we look at mask elements 0,4,8,12 1805 unsigned M0 = N->getMaskElt(0) / 4; 1806 unsigned M1 = N->getMaskElt(4) / 4; 1807 unsigned M2 = N->getMaskElt(8) / 4; 1808 unsigned M3 = N->getMaskElt(12) / 4; 1809 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1810 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1811 1812 // Below, let H and L be arbitrary elements of the shuffle mask 1813 // where H is in the range [4,7] and L is in the range [0,3]. 1814 // H, 1, 2, 3 or L, 5, 6, 7 1815 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1816 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1817 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1818 InsertAtByte = IsLE ? 12 : 0; 1819 Swap = M0 < 4; 1820 return true; 1821 } 1822 // 0, H, 2, 3 or 4, L, 6, 7 1823 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1824 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1825 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1826 InsertAtByte = IsLE ? 8 : 4; 1827 Swap = M1 < 4; 1828 return true; 1829 } 1830 // 0, 1, H, 3 or 4, 5, L, 7 1831 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1832 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1833 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1834 InsertAtByte = IsLE ? 4 : 8; 1835 Swap = M2 < 4; 1836 return true; 1837 } 1838 // 0, 1, 2, H or 4, 5, 6, L 1839 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1840 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1841 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1842 InsertAtByte = IsLE ? 0 : 12; 1843 Swap = M3 < 4; 1844 return true; 1845 } 1846 1847 // If both vector operands for the shuffle are the same vector, the mask will 1848 // contain only elements from the first one and the second one will be undef. 1849 if (N->getOperand(1).isUndef()) { 1850 ShiftElts = 0; 1851 Swap = true; 1852 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1853 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1854 InsertAtByte = IsLE ? 12 : 0; 1855 return true; 1856 } 1857 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1858 InsertAtByte = IsLE ? 8 : 4; 1859 return true; 1860 } 1861 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1862 InsertAtByte = IsLE ? 4 : 8; 1863 return true; 1864 } 1865 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1866 InsertAtByte = IsLE ? 0 : 12; 1867 return true; 1868 } 1869 } 1870 1871 return false; 1872 } 1873 1874 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1875 bool &Swap, bool IsLE) { 1876 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1877 // Ensure each byte index of the word is consecutive. 1878 if (!isNByteElemShuffleMask(N, 4, 1)) 1879 return false; 1880 1881 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1882 unsigned M0 = N->getMaskElt(0) / 4; 1883 unsigned M1 = N->getMaskElt(4) / 4; 1884 unsigned M2 = N->getMaskElt(8) / 4; 1885 unsigned M3 = N->getMaskElt(12) / 4; 1886 1887 // If both vector operands for the shuffle are the same vector, the mask will 1888 // contain only elements from the first one and the second one will be undef. 1889 if (N->getOperand(1).isUndef()) { 1890 assert(M0 < 4 && "Indexing into an undef vector?"); 1891 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1892 return false; 1893 1894 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1895 Swap = false; 1896 return true; 1897 } 1898 1899 // Ensure each word index of the ShuffleVector Mask is consecutive. 1900 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1901 return false; 1902 1903 if (IsLE) { 1904 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1905 // Input vectors don't need to be swapped if the leading element 1906 // of the result is one of the 3 left elements of the second vector 1907 // (or if there is no shift to be done at all). 1908 Swap = false; 1909 ShiftElts = (8 - M0) % 8; 1910 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 1911 // Input vectors need to be swapped if the leading element 1912 // of the result is one of the 3 left elements of the first vector 1913 // (or if we're shifting by 4 - thereby simply swapping the vectors). 1914 Swap = true; 1915 ShiftElts = (4 - M0) % 4; 1916 } 1917 1918 return true; 1919 } else { // BE 1920 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 1921 // Input vectors don't need to be swapped if the leading element 1922 // of the result is one of the 4 elements of the first vector. 1923 Swap = false; 1924 ShiftElts = M0; 1925 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 1926 // Input vectors need to be swapped if the leading element 1927 // of the result is one of the 4 elements of the right vector. 1928 Swap = true; 1929 ShiftElts = M0 - 4; 1930 } 1931 1932 return true; 1933 } 1934 } 1935 1936 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 1937 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1938 1939 if (!isNByteElemShuffleMask(N, Width, -1)) 1940 return false; 1941 1942 for (int i = 0; i < 16; i += Width) 1943 if (N->getMaskElt(i) != i + Width - 1) 1944 return false; 1945 1946 return true; 1947 } 1948 1949 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 1950 return isXXBRShuffleMaskHelper(N, 2); 1951 } 1952 1953 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 1954 return isXXBRShuffleMaskHelper(N, 4); 1955 } 1956 1957 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 1958 return isXXBRShuffleMaskHelper(N, 8); 1959 } 1960 1961 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 1962 return isXXBRShuffleMaskHelper(N, 16); 1963 } 1964 1965 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 1966 /// if the inputs to the instruction should be swapped and set \p DM to the 1967 /// value for the immediate. 1968 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 1969 /// AND element 0 of the result comes from the first input (LE) or second input 1970 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 1971 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 1972 /// mask. 1973 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 1974 bool &Swap, bool IsLE) { 1975 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1976 1977 // Ensure each byte index of the double word is consecutive. 1978 if (!isNByteElemShuffleMask(N, 8, 1)) 1979 return false; 1980 1981 unsigned M0 = N->getMaskElt(0) / 8; 1982 unsigned M1 = N->getMaskElt(8) / 8; 1983 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 1984 1985 // If both vector operands for the shuffle are the same vector, the mask will 1986 // contain only elements from the first one and the second one will be undef. 1987 if (N->getOperand(1).isUndef()) { 1988 if ((M0 | M1) < 2) { 1989 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 1990 Swap = false; 1991 return true; 1992 } else 1993 return false; 1994 } 1995 1996 if (IsLE) { 1997 if (M0 > 1 && M1 < 2) { 1998 Swap = false; 1999 } else if (M0 < 2 && M1 > 1) { 2000 M0 = (M0 + 2) % 4; 2001 M1 = (M1 + 2) % 4; 2002 Swap = true; 2003 } else 2004 return false; 2005 2006 // Note: if control flow comes here that means Swap is already set above 2007 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2008 return true; 2009 } else { // BE 2010 if (M0 < 2 && M1 > 1) { 2011 Swap = false; 2012 } else if (M0 > 1 && M1 < 2) { 2013 M0 = (M0 + 2) % 4; 2014 M1 = (M1 + 2) % 4; 2015 Swap = true; 2016 } else 2017 return false; 2018 2019 // Note: if control flow comes here that means Swap is already set above 2020 DM = (M0 << 1) + (M1 & 1); 2021 return true; 2022 } 2023 } 2024 2025 2026 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 2027 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 2028 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 2029 SelectionDAG &DAG) { 2030 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2031 assert(isSplatShuffleMask(SVOp, EltSize)); 2032 if (DAG.getDataLayout().isLittleEndian()) 2033 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2034 else 2035 return SVOp->getMaskElt(0) / EltSize; 2036 } 2037 2038 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2039 /// by using a vspltis[bhw] instruction of the specified element size, return 2040 /// the constant being splatted. The ByteSize field indicates the number of 2041 /// bytes of each element [124] -> [bhw]. 2042 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2043 SDValue OpVal(nullptr, 0); 2044 2045 // If ByteSize of the splat is bigger than the element size of the 2046 // build_vector, then we have a case where we are checking for a splat where 2047 // multiple elements of the buildvector are folded together into a single 2048 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2049 unsigned EltSize = 16/N->getNumOperands(); 2050 if (EltSize < ByteSize) { 2051 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2052 SDValue UniquedVals[4]; 2053 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2054 2055 // See if all of the elements in the buildvector agree across. 2056 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2057 if (N->getOperand(i).isUndef()) continue; 2058 // If the element isn't a constant, bail fully out. 2059 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2060 2061 if (!UniquedVals[i&(Multiple-1)].getNode()) 2062 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2063 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2064 return SDValue(); // no match. 2065 } 2066 2067 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2068 // either constant or undef values that are identical for each chunk. See 2069 // if these chunks can form into a larger vspltis*. 2070 2071 // Check to see if all of the leading entries are either 0 or -1. If 2072 // neither, then this won't fit into the immediate field. 2073 bool LeadingZero = true; 2074 bool LeadingOnes = true; 2075 for (unsigned i = 0; i != Multiple-1; ++i) { 2076 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2077 2078 LeadingZero &= isNullConstant(UniquedVals[i]); 2079 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2080 } 2081 // Finally, check the least significant entry. 2082 if (LeadingZero) { 2083 if (!UniquedVals[Multiple-1].getNode()) 2084 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2085 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2086 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2087 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2088 } 2089 if (LeadingOnes) { 2090 if (!UniquedVals[Multiple-1].getNode()) 2091 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2092 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2093 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2094 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2095 } 2096 2097 return SDValue(); 2098 } 2099 2100 // Check to see if this buildvec has a single non-undef value in its elements. 2101 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2102 if (N->getOperand(i).isUndef()) continue; 2103 if (!OpVal.getNode()) 2104 OpVal = N->getOperand(i); 2105 else if (OpVal != N->getOperand(i)) 2106 return SDValue(); 2107 } 2108 2109 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2110 2111 unsigned ValSizeInBytes = EltSize; 2112 uint64_t Value = 0; 2113 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2114 Value = CN->getZExtValue(); 2115 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2116 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2117 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2118 } 2119 2120 // If the splat value is larger than the element value, then we can never do 2121 // this splat. The only case that we could fit the replicated bits into our 2122 // immediate field for would be zero, and we prefer to use vxor for it. 2123 if (ValSizeInBytes < ByteSize) return SDValue(); 2124 2125 // If the element value is larger than the splat value, check if it consists 2126 // of a repeated bit pattern of size ByteSize. 2127 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2128 return SDValue(); 2129 2130 // Properly sign extend the value. 2131 int MaskVal = SignExtend32(Value, ByteSize * 8); 2132 2133 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2134 if (MaskVal == 0) return SDValue(); 2135 2136 // Finally, if this value fits in a 5 bit sext field, return it 2137 if (SignExtend32<5>(MaskVal) == MaskVal) 2138 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2139 return SDValue(); 2140 } 2141 2142 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2143 /// amount, otherwise return -1. 2144 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2145 EVT VT = N->getValueType(0); 2146 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2147 return -1; 2148 2149 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2150 2151 // Find the first non-undef value in the shuffle mask. 2152 unsigned i; 2153 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2154 /*search*/; 2155 2156 if (i == 4) return -1; // all undef. 2157 2158 // Otherwise, check to see if the rest of the elements are consecutively 2159 // numbered from this value. 2160 unsigned ShiftAmt = SVOp->getMaskElt(i); 2161 if (ShiftAmt < i) return -1; 2162 ShiftAmt -= i; 2163 2164 // Check the rest of the elements to see if they are consecutive. 2165 for (++i; i != 4; ++i) 2166 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2167 return -1; 2168 2169 return ShiftAmt; 2170 } 2171 2172 //===----------------------------------------------------------------------===// 2173 // Addressing Mode Selection 2174 //===----------------------------------------------------------------------===// 2175 2176 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2177 /// or 64-bit immediate, and if the value can be accurately represented as a 2178 /// sign extension from a 16-bit value. If so, this returns true and the 2179 /// immediate. 2180 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2181 if (!isa<ConstantSDNode>(N)) 2182 return false; 2183 2184 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2185 if (N->getValueType(0) == MVT::i32) 2186 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2187 else 2188 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2189 } 2190 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2191 return isIntS16Immediate(Op.getNode(), Imm); 2192 } 2193 2194 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2195 /// can be represented as an indexed [r+r] operation. Returns false if it 2196 /// can be more efficiently represented with [r+imm]. 2197 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2198 SDValue &Index, 2199 SelectionDAG &DAG) const { 2200 int16_t imm = 0; 2201 if (N.getOpcode() == ISD::ADD) { 2202 if (isIntS16Immediate(N.getOperand(1), imm)) 2203 return false; // r+i 2204 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2205 return false; // r+i 2206 2207 Base = N.getOperand(0); 2208 Index = N.getOperand(1); 2209 return true; 2210 } else if (N.getOpcode() == ISD::OR) { 2211 if (isIntS16Immediate(N.getOperand(1), imm)) 2212 return false; // r+i can fold it if we can. 2213 2214 // If this is an or of disjoint bitfields, we can codegen this as an add 2215 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2216 // disjoint. 2217 KnownBits LHSKnown, RHSKnown; 2218 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2219 2220 if (LHSKnown.Zero.getBoolValue()) { 2221 DAG.computeKnownBits(N.getOperand(1), RHSKnown); 2222 // If all of the bits are known zero on the LHS or RHS, the add won't 2223 // carry. 2224 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2225 Base = N.getOperand(0); 2226 Index = N.getOperand(1); 2227 return true; 2228 } 2229 } 2230 } 2231 2232 return false; 2233 } 2234 2235 // If we happen to be doing an i64 load or store into a stack slot that has 2236 // less than a 4-byte alignment, then the frame-index elimination may need to 2237 // use an indexed load or store instruction (because the offset may not be a 2238 // multiple of 4). The extra register needed to hold the offset comes from the 2239 // register scavenger, and it is possible that the scavenger will need to use 2240 // an emergency spill slot. As a result, we need to make sure that a spill slot 2241 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2242 // stack slot. 2243 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2244 // FIXME: This does not handle the LWA case. 2245 if (VT != MVT::i64) 2246 return; 2247 2248 // NOTE: We'll exclude negative FIs here, which come from argument 2249 // lowering, because there are no known test cases triggering this problem 2250 // using packed structures (or similar). We can remove this exclusion if 2251 // we find such a test case. The reason why this is so test-case driven is 2252 // because this entire 'fixup' is only to prevent crashes (from the 2253 // register scavenger) on not-really-valid inputs. For example, if we have: 2254 // %a = alloca i1 2255 // %b = bitcast i1* %a to i64* 2256 // store i64* a, i64 b 2257 // then the store should really be marked as 'align 1', but is not. If it 2258 // were marked as 'align 1' then the indexed form would have been 2259 // instruction-selected initially, and the problem this 'fixup' is preventing 2260 // won't happen regardless. 2261 if (FrameIdx < 0) 2262 return; 2263 2264 MachineFunction &MF = DAG.getMachineFunction(); 2265 MachineFrameInfo &MFI = MF.getFrameInfo(); 2266 2267 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2268 if (Align >= 4) 2269 return; 2270 2271 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2272 FuncInfo->setHasNonRISpills(); 2273 } 2274 2275 /// Returns true if the address N can be represented by a base register plus 2276 /// a signed 16-bit displacement [r+imm], and if it is not better 2277 /// represented as reg+reg. If \p Alignment is non-zero, only accept 2278 /// displacements that are multiples of that value. 2279 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2280 SDValue &Base, 2281 SelectionDAG &DAG, 2282 unsigned Alignment) const { 2283 // FIXME dl should come from parent load or store, not from address 2284 SDLoc dl(N); 2285 // If this can be more profitably realized as r+r, fail. 2286 if (SelectAddressRegReg(N, Disp, Base, DAG)) 2287 return false; 2288 2289 if (N.getOpcode() == ISD::ADD) { 2290 int16_t imm = 0; 2291 if (isIntS16Immediate(N.getOperand(1), imm) && 2292 (!Alignment || (imm % Alignment) == 0)) { 2293 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2294 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2295 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2296 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2297 } else { 2298 Base = N.getOperand(0); 2299 } 2300 return true; // [r+i] 2301 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2302 // Match LOAD (ADD (X, Lo(G))). 2303 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2304 && "Cannot handle constant offsets yet!"); 2305 Disp = N.getOperand(1).getOperand(0); // The global address. 2306 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2307 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2308 Disp.getOpcode() == ISD::TargetConstantPool || 2309 Disp.getOpcode() == ISD::TargetJumpTable); 2310 Base = N.getOperand(0); 2311 return true; // [&g+r] 2312 } 2313 } else if (N.getOpcode() == ISD::OR) { 2314 int16_t imm = 0; 2315 if (isIntS16Immediate(N.getOperand(1), imm) && 2316 (!Alignment || (imm % Alignment) == 0)) { 2317 // If this is an or of disjoint bitfields, we can codegen this as an add 2318 // (for better address arithmetic) if the LHS and RHS of the OR are 2319 // provably disjoint. 2320 KnownBits LHSKnown; 2321 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2322 2323 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2324 // If all of the bits are known zero on the LHS or RHS, the add won't 2325 // carry. 2326 if (FrameIndexSDNode *FI = 2327 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2328 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2329 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2330 } else { 2331 Base = N.getOperand(0); 2332 } 2333 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2334 return true; 2335 } 2336 } 2337 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2338 // Loading from a constant address. 2339 2340 // If this address fits entirely in a 16-bit sext immediate field, codegen 2341 // this as "d, 0" 2342 int16_t Imm; 2343 if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) { 2344 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2345 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2346 CN->getValueType(0)); 2347 return true; 2348 } 2349 2350 // Handle 32-bit sext immediates with LIS + addr mode. 2351 if ((CN->getValueType(0) == MVT::i32 || 2352 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2353 (!Alignment || (CN->getZExtValue() % Alignment) == 0)) { 2354 int Addr = (int)CN->getZExtValue(); 2355 2356 // Otherwise, break this down into an LIS + disp. 2357 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2358 2359 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2360 MVT::i32); 2361 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2362 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2363 return true; 2364 } 2365 } 2366 2367 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2368 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2369 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2370 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2371 } else 2372 Base = N; 2373 return true; // [r+0] 2374 } 2375 2376 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2377 /// represented as an indexed [r+r] operation. 2378 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2379 SDValue &Index, 2380 SelectionDAG &DAG) const { 2381 // Check to see if we can easily represent this as an [r+r] address. This 2382 // will fail if it thinks that the address is more profitably represented as 2383 // reg+imm, e.g. where imm = 0. 2384 if (SelectAddressRegReg(N, Base, Index, DAG)) 2385 return true; 2386 2387 // If the address is the result of an add, we will utilize the fact that the 2388 // address calculation includes an implicit add. However, we can reduce 2389 // register pressure if we do not materialize a constant just for use as the 2390 // index register. We only get rid of the add if it is not an add of a 2391 // value and a 16-bit signed constant and both have a single use. 2392 int16_t imm = 0; 2393 if (N.getOpcode() == ISD::ADD && 2394 (!isIntS16Immediate(N.getOperand(1), imm) || 2395 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2396 Base = N.getOperand(0); 2397 Index = N.getOperand(1); 2398 return true; 2399 } 2400 2401 // Otherwise, do it the hard way, using R0 as the base register. 2402 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2403 N.getValueType()); 2404 Index = N; 2405 return true; 2406 } 2407 2408 /// getPreIndexedAddressParts - returns true by value, base pointer and 2409 /// offset pointer and addressing mode by reference if the node's address 2410 /// can be legally represented as pre-indexed load / store address. 2411 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2412 SDValue &Offset, 2413 ISD::MemIndexedMode &AM, 2414 SelectionDAG &DAG) const { 2415 if (DisablePPCPreinc) return false; 2416 2417 bool isLoad = true; 2418 SDValue Ptr; 2419 EVT VT; 2420 unsigned Alignment; 2421 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2422 Ptr = LD->getBasePtr(); 2423 VT = LD->getMemoryVT(); 2424 Alignment = LD->getAlignment(); 2425 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2426 Ptr = ST->getBasePtr(); 2427 VT = ST->getMemoryVT(); 2428 Alignment = ST->getAlignment(); 2429 isLoad = false; 2430 } else 2431 return false; 2432 2433 // PowerPC doesn't have preinc load/store instructions for vectors (except 2434 // for QPX, which does have preinc r+r forms). 2435 if (VT.isVector()) { 2436 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2437 return false; 2438 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2439 AM = ISD::PRE_INC; 2440 return true; 2441 } 2442 } 2443 2444 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2445 // Common code will reject creating a pre-inc form if the base pointer 2446 // is a frame index, or if N is a store and the base pointer is either 2447 // the same as or a predecessor of the value being stored. Check for 2448 // those situations here, and try with swapped Base/Offset instead. 2449 bool Swap = false; 2450 2451 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2452 Swap = true; 2453 else if (!isLoad) { 2454 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2455 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2456 Swap = true; 2457 } 2458 2459 if (Swap) 2460 std::swap(Base, Offset); 2461 2462 AM = ISD::PRE_INC; 2463 return true; 2464 } 2465 2466 // LDU/STU can only handle immediates that are a multiple of 4. 2467 if (VT != MVT::i64) { 2468 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2469 return false; 2470 } else { 2471 // LDU/STU need an address with at least 4-byte alignment. 2472 if (Alignment < 4) 2473 return false; 2474 2475 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2476 return false; 2477 } 2478 2479 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2480 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2481 // sext i32 to i64 when addr mode is r+i. 2482 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2483 LD->getExtensionType() == ISD::SEXTLOAD && 2484 isa<ConstantSDNode>(Offset)) 2485 return false; 2486 } 2487 2488 AM = ISD::PRE_INC; 2489 return true; 2490 } 2491 2492 //===----------------------------------------------------------------------===// 2493 // LowerOperation implementation 2494 //===----------------------------------------------------------------------===// 2495 2496 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2497 /// and LoOpFlags to the target MO flags. 2498 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2499 unsigned &HiOpFlags, unsigned &LoOpFlags, 2500 const GlobalValue *GV = nullptr) { 2501 HiOpFlags = PPCII::MO_HA; 2502 LoOpFlags = PPCII::MO_LO; 2503 2504 // Don't use the pic base if not in PIC relocation model. 2505 if (IsPIC) { 2506 HiOpFlags |= PPCII::MO_PIC_FLAG; 2507 LoOpFlags |= PPCII::MO_PIC_FLAG; 2508 } 2509 2510 // If this is a reference to a global value that requires a non-lazy-ptr, make 2511 // sure that instruction lowering adds it. 2512 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2513 HiOpFlags |= PPCII::MO_NLP_FLAG; 2514 LoOpFlags |= PPCII::MO_NLP_FLAG; 2515 2516 if (GV->hasHiddenVisibility()) { 2517 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2518 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2519 } 2520 } 2521 } 2522 2523 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2524 SelectionDAG &DAG) { 2525 SDLoc DL(HiPart); 2526 EVT PtrVT = HiPart.getValueType(); 2527 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2528 2529 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2530 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2531 2532 // With PIC, the first instruction is actually "GR+hi(&G)". 2533 if (isPIC) 2534 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2535 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2536 2537 // Generate non-pic code that has direct accesses to the constant pool. 2538 // The address of the global is just (hi(&g)+lo(&g)). 2539 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2540 } 2541 2542 static void setUsesTOCBasePtr(MachineFunction &MF) { 2543 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2544 FuncInfo->setUsesTOCBasePtr(); 2545 } 2546 2547 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2548 setUsesTOCBasePtr(DAG.getMachineFunction()); 2549 } 2550 2551 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2552 SDValue GA) { 2553 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2554 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2555 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2556 2557 SDValue Ops[] = { GA, Reg }; 2558 return DAG.getMemIntrinsicNode( 2559 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2560 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, 2561 MachineMemOperand::MOLoad); 2562 } 2563 2564 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2565 SelectionDAG &DAG) const { 2566 EVT PtrVT = Op.getValueType(); 2567 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2568 const Constant *C = CP->getConstVal(); 2569 2570 // 64-bit SVR4 ABI code is always position-independent. 2571 // The actual address of the GlobalValue is stored in the TOC. 2572 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2573 setUsesTOCBasePtr(DAG); 2574 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2575 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2576 } 2577 2578 unsigned MOHiFlag, MOLoFlag; 2579 bool IsPIC = isPositionIndependent(); 2580 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2581 2582 if (IsPIC && Subtarget.isSVR4ABI()) { 2583 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2584 PPCII::MO_PIC_FLAG); 2585 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2586 } 2587 2588 SDValue CPIHi = 2589 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2590 SDValue CPILo = 2591 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2592 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2593 } 2594 2595 // For 64-bit PowerPC, prefer the more compact relative encodings. 2596 // This trades 32 bits per jump table entry for one or two instructions 2597 // on the jump site. 2598 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2599 if (isJumpTableRelative()) 2600 return MachineJumpTableInfo::EK_LabelDifference32; 2601 2602 return TargetLowering::getJumpTableEncoding(); 2603 } 2604 2605 bool PPCTargetLowering::isJumpTableRelative() const { 2606 if (Subtarget.isPPC64()) 2607 return true; 2608 return TargetLowering::isJumpTableRelative(); 2609 } 2610 2611 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2612 SelectionDAG &DAG) const { 2613 if (!Subtarget.isPPC64()) 2614 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2615 2616 switch (getTargetMachine().getCodeModel()) { 2617 case CodeModel::Small: 2618 case CodeModel::Medium: 2619 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2620 default: 2621 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2622 getPointerTy(DAG.getDataLayout())); 2623 } 2624 } 2625 2626 const MCExpr * 2627 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2628 unsigned JTI, 2629 MCContext &Ctx) const { 2630 if (!Subtarget.isPPC64()) 2631 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2632 2633 switch (getTargetMachine().getCodeModel()) { 2634 case CodeModel::Small: 2635 case CodeModel::Medium: 2636 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2637 default: 2638 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2639 } 2640 } 2641 2642 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2643 EVT PtrVT = Op.getValueType(); 2644 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2645 2646 // 64-bit SVR4 ABI code is always position-independent. 2647 // The actual address of the GlobalValue is stored in the TOC. 2648 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2649 setUsesTOCBasePtr(DAG); 2650 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2651 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2652 } 2653 2654 unsigned MOHiFlag, MOLoFlag; 2655 bool IsPIC = isPositionIndependent(); 2656 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2657 2658 if (IsPIC && Subtarget.isSVR4ABI()) { 2659 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2660 PPCII::MO_PIC_FLAG); 2661 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2662 } 2663 2664 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2665 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2666 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2667 } 2668 2669 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2670 SelectionDAG &DAG) const { 2671 EVT PtrVT = Op.getValueType(); 2672 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2673 const BlockAddress *BA = BASDN->getBlockAddress(); 2674 2675 // 64-bit SVR4 ABI code is always position-independent. 2676 // The actual BlockAddress is stored in the TOC. 2677 if (Subtarget.isSVR4ABI() && isPositionIndependent()) { 2678 if (Subtarget.isPPC64()) 2679 setUsesTOCBasePtr(DAG); 2680 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2681 return getTOCEntry(DAG, SDLoc(BASDN), Subtarget.isPPC64(), GA); 2682 } 2683 2684 unsigned MOHiFlag, MOLoFlag; 2685 bool IsPIC = isPositionIndependent(); 2686 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2687 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2688 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2689 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2690 } 2691 2692 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2693 SelectionDAG &DAG) const { 2694 // FIXME: TLS addresses currently use medium model code sequences, 2695 // which is the most useful form. Eventually support for small and 2696 // large models could be added if users need it, at the cost of 2697 // additional complexity. 2698 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2699 if (DAG.getTarget().useEmulatedTLS()) 2700 return LowerToTLSEmulatedModel(GA, DAG); 2701 2702 SDLoc dl(GA); 2703 const GlobalValue *GV = GA->getGlobal(); 2704 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2705 bool is64bit = Subtarget.isPPC64(); 2706 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 2707 PICLevel::Level picLevel = M->getPICLevel(); 2708 2709 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2710 2711 if (Model == TLSModel::LocalExec) { 2712 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2713 PPCII::MO_TPREL_HA); 2714 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2715 PPCII::MO_TPREL_LO); 2716 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2717 : DAG.getRegister(PPC::R2, MVT::i32); 2718 2719 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2720 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2721 } 2722 2723 if (Model == TLSModel::InitialExec) { 2724 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2725 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2726 PPCII::MO_TLS); 2727 SDValue GOTPtr; 2728 if (is64bit) { 2729 setUsesTOCBasePtr(DAG); 2730 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2731 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2732 PtrVT, GOTReg, TGA); 2733 } else 2734 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2735 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2736 PtrVT, TGA, GOTPtr); 2737 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2738 } 2739 2740 if (Model == TLSModel::GeneralDynamic) { 2741 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2742 SDValue GOTPtr; 2743 if (is64bit) { 2744 setUsesTOCBasePtr(DAG); 2745 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2746 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2747 GOTReg, TGA); 2748 } else { 2749 if (picLevel == PICLevel::SmallPIC) 2750 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2751 else 2752 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2753 } 2754 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2755 GOTPtr, TGA, TGA); 2756 } 2757 2758 if (Model == TLSModel::LocalDynamic) { 2759 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2760 SDValue GOTPtr; 2761 if (is64bit) { 2762 setUsesTOCBasePtr(DAG); 2763 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2764 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2765 GOTReg, TGA); 2766 } else { 2767 if (picLevel == PICLevel::SmallPIC) 2768 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2769 else 2770 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2771 } 2772 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2773 PtrVT, GOTPtr, TGA, TGA); 2774 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2775 PtrVT, TLSAddr, TGA); 2776 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2777 } 2778 2779 llvm_unreachable("Unknown TLS model!"); 2780 } 2781 2782 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2783 SelectionDAG &DAG) const { 2784 EVT PtrVT = Op.getValueType(); 2785 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2786 SDLoc DL(GSDN); 2787 const GlobalValue *GV = GSDN->getGlobal(); 2788 2789 // 64-bit SVR4 ABI code is always position-independent. 2790 // The actual address of the GlobalValue is stored in the TOC. 2791 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2792 setUsesTOCBasePtr(DAG); 2793 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2794 return getTOCEntry(DAG, DL, true, GA); 2795 } 2796 2797 unsigned MOHiFlag, MOLoFlag; 2798 bool IsPIC = isPositionIndependent(); 2799 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2800 2801 if (IsPIC && Subtarget.isSVR4ABI()) { 2802 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2803 GSDN->getOffset(), 2804 PPCII::MO_PIC_FLAG); 2805 return getTOCEntry(DAG, DL, false, GA); 2806 } 2807 2808 SDValue GAHi = 2809 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2810 SDValue GALo = 2811 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2812 2813 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2814 2815 // If the global reference is actually to a non-lazy-pointer, we have to do an 2816 // extra load to get the address of the global. 2817 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2818 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2819 return Ptr; 2820 } 2821 2822 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2823 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2824 SDLoc dl(Op); 2825 2826 if (Op.getValueType() == MVT::v2i64) { 2827 // When the operands themselves are v2i64 values, we need to do something 2828 // special because VSX has no underlying comparison operations for these. 2829 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2830 // Equality can be handled by casting to the legal type for Altivec 2831 // comparisons, everything else needs to be expanded. 2832 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2833 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2834 DAG.getSetCC(dl, MVT::v4i32, 2835 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2836 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2837 CC)); 2838 } 2839 2840 return SDValue(); 2841 } 2842 2843 // We handle most of these in the usual way. 2844 return Op; 2845 } 2846 2847 // If we're comparing for equality to zero, expose the fact that this is 2848 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2849 // fold the new nodes. 2850 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 2851 return V; 2852 2853 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2854 // Leave comparisons against 0 and -1 alone for now, since they're usually 2855 // optimized. FIXME: revisit this when we can custom lower all setcc 2856 // optimizations. 2857 if (C->isAllOnesValue() || C->isNullValue()) 2858 return SDValue(); 2859 } 2860 2861 // If we have an integer seteq/setne, turn it into a compare against zero 2862 // by xor'ing the rhs with the lhs, which is faster than setting a 2863 // condition register, reading it back out, and masking the correct bit. The 2864 // normal approach here uses sub to do this instead of xor. Using xor exposes 2865 // the result to other bit-twiddling opportunities. 2866 EVT LHSVT = Op.getOperand(0).getValueType(); 2867 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2868 EVT VT = Op.getValueType(); 2869 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2870 Op.getOperand(1)); 2871 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2872 } 2873 return SDValue(); 2874 } 2875 2876 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2877 SDNode *Node = Op.getNode(); 2878 EVT VT = Node->getValueType(0); 2879 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2880 SDValue InChain = Node->getOperand(0); 2881 SDValue VAListPtr = Node->getOperand(1); 2882 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2883 SDLoc dl(Node); 2884 2885 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2886 2887 // gpr_index 2888 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2889 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2890 InChain = GprIndex.getValue(1); 2891 2892 if (VT == MVT::i64) { 2893 // Check if GprIndex is even 2894 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2895 DAG.getConstant(1, dl, MVT::i32)); 2896 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2897 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2898 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2899 DAG.getConstant(1, dl, MVT::i32)); 2900 // Align GprIndex to be even if it isn't 2901 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2902 GprIndex); 2903 } 2904 2905 // fpr index is 1 byte after gpr 2906 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2907 DAG.getConstant(1, dl, MVT::i32)); 2908 2909 // fpr 2910 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2911 FprPtr, MachinePointerInfo(SV), MVT::i8); 2912 InChain = FprIndex.getValue(1); 2913 2914 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2915 DAG.getConstant(8, dl, MVT::i32)); 2916 2917 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2918 DAG.getConstant(4, dl, MVT::i32)); 2919 2920 // areas 2921 SDValue OverflowArea = 2922 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2923 InChain = OverflowArea.getValue(1); 2924 2925 SDValue RegSaveArea = 2926 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2927 InChain = RegSaveArea.getValue(1); 2928 2929 // select overflow_area if index > 8 2930 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2931 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2932 2933 // adjustment constant gpr_index * 4/8 2934 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2935 VT.isInteger() ? GprIndex : FprIndex, 2936 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2937 MVT::i32)); 2938 2939 // OurReg = RegSaveArea + RegConstant 2940 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2941 RegConstant); 2942 2943 // Floating types are 32 bytes into RegSaveArea 2944 if (VT.isFloatingPoint()) 2945 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2946 DAG.getConstant(32, dl, MVT::i32)); 2947 2948 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2949 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2950 VT.isInteger() ? GprIndex : FprIndex, 2951 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2952 MVT::i32)); 2953 2954 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2955 VT.isInteger() ? VAListPtr : FprPtr, 2956 MachinePointerInfo(SV), MVT::i8); 2957 2958 // determine if we should load from reg_save_area or overflow_area 2959 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2960 2961 // increase overflow_area by 4/8 if gpr/fpr > 8 2962 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2963 DAG.getConstant(VT.isInteger() ? 4 : 8, 2964 dl, MVT::i32)); 2965 2966 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2967 OverflowAreaPlusN); 2968 2969 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 2970 MachinePointerInfo(), MVT::i32); 2971 2972 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 2973 } 2974 2975 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2976 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2977 2978 // We have to copy the entire va_list struct: 2979 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2980 return DAG.getMemcpy(Op.getOperand(0), Op, 2981 Op.getOperand(1), Op.getOperand(2), 2982 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2983 false, MachinePointerInfo(), MachinePointerInfo()); 2984 } 2985 2986 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2987 SelectionDAG &DAG) const { 2988 return Op.getOperand(0); 2989 } 2990 2991 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2992 SelectionDAG &DAG) const { 2993 SDValue Chain = Op.getOperand(0); 2994 SDValue Trmp = Op.getOperand(1); // trampoline 2995 SDValue FPtr = Op.getOperand(2); // nested function 2996 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2997 SDLoc dl(Op); 2998 2999 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3000 bool isPPC64 = (PtrVT == MVT::i64); 3001 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3002 3003 TargetLowering::ArgListTy Args; 3004 TargetLowering::ArgListEntry Entry; 3005 3006 Entry.Ty = IntPtrTy; 3007 Entry.Node = Trmp; Args.push_back(Entry); 3008 3009 // TrampSize == (isPPC64 ? 48 : 40); 3010 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3011 isPPC64 ? MVT::i64 : MVT::i32); 3012 Args.push_back(Entry); 3013 3014 Entry.Node = FPtr; Args.push_back(Entry); 3015 Entry.Node = Nest; Args.push_back(Entry); 3016 3017 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3018 TargetLowering::CallLoweringInfo CLI(DAG); 3019 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3020 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3021 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3022 3023 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3024 return CallResult.second; 3025 } 3026 3027 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3028 MachineFunction &MF = DAG.getMachineFunction(); 3029 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3030 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3031 3032 SDLoc dl(Op); 3033 3034 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 3035 // vastart just stores the address of the VarArgsFrameIndex slot into the 3036 // memory location argument. 3037 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3038 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3039 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3040 MachinePointerInfo(SV)); 3041 } 3042 3043 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3044 // We suppose the given va_list is already allocated. 3045 // 3046 // typedef struct { 3047 // char gpr; /* index into the array of 8 GPRs 3048 // * stored in the register save area 3049 // * gpr=0 corresponds to r3, 3050 // * gpr=1 to r4, etc. 3051 // */ 3052 // char fpr; /* index into the array of 8 FPRs 3053 // * stored in the register save area 3054 // * fpr=0 corresponds to f1, 3055 // * fpr=1 to f2, etc. 3056 // */ 3057 // char *overflow_arg_area; 3058 // /* location on stack that holds 3059 // * the next overflow argument 3060 // */ 3061 // char *reg_save_area; 3062 // /* where r3:r10 and f1:f8 (if saved) 3063 // * are stored 3064 // */ 3065 // } va_list[1]; 3066 3067 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3068 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3069 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3070 PtrVT); 3071 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3072 PtrVT); 3073 3074 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3075 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3076 3077 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3078 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3079 3080 uint64_t FPROffset = 1; 3081 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3082 3083 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3084 3085 // Store first byte : number of int regs 3086 SDValue firstStore = 3087 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3088 MachinePointerInfo(SV), MVT::i8); 3089 uint64_t nextOffset = FPROffset; 3090 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3091 ConstFPROffset); 3092 3093 // Store second byte : number of float regs 3094 SDValue secondStore = 3095 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3096 MachinePointerInfo(SV, nextOffset), MVT::i8); 3097 nextOffset += StackOffset; 3098 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3099 3100 // Store second word : arguments given on stack 3101 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3102 MachinePointerInfo(SV, nextOffset)); 3103 nextOffset += FrameOffset; 3104 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3105 3106 // Store third word : arguments given in registers 3107 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3108 MachinePointerInfo(SV, nextOffset)); 3109 } 3110 3111 #include "PPCGenCallingConv.inc" 3112 3113 // Function whose sole purpose is to kill compiler warnings 3114 // stemming from unused functions included from PPCGenCallingConv.inc. 3115 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 3116 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 3117 } 3118 3119 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 3120 CCValAssign::LocInfo &LocInfo, 3121 ISD::ArgFlagsTy &ArgFlags, 3122 CCState &State) { 3123 return true; 3124 } 3125 3126 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 3127 MVT &LocVT, 3128 CCValAssign::LocInfo &LocInfo, 3129 ISD::ArgFlagsTy &ArgFlags, 3130 CCState &State) { 3131 static const MCPhysReg ArgRegs[] = { 3132 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3133 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3134 }; 3135 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3136 3137 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3138 3139 // Skip one register if the first unallocated register has an even register 3140 // number and there are still argument registers available which have not been 3141 // allocated yet. RegNum is actually an index into ArgRegs, which means we 3142 // need to skip a register if RegNum is odd. 3143 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 3144 State.AllocateReg(ArgRegs[RegNum]); 3145 } 3146 3147 // Always return false here, as this function only makes sure that the first 3148 // unallocated register has an odd register number and does not actually 3149 // allocate a register for the current argument. 3150 return false; 3151 } 3152 3153 bool 3154 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 3155 MVT &LocVT, 3156 CCValAssign::LocInfo &LocInfo, 3157 ISD::ArgFlagsTy &ArgFlags, 3158 CCState &State) { 3159 static const MCPhysReg ArgRegs[] = { 3160 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3161 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3162 }; 3163 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3164 3165 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3166 int RegsLeft = NumArgRegs - RegNum; 3167 3168 // Skip if there is not enough registers left for long double type (4 gpr regs 3169 // in soft float mode) and put long double argument on the stack. 3170 if (RegNum != NumArgRegs && RegsLeft < 4) { 3171 for (int i = 0; i < RegsLeft; i++) { 3172 State.AllocateReg(ArgRegs[RegNum + i]); 3173 } 3174 } 3175 3176 return false; 3177 } 3178 3179 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 3180 MVT &LocVT, 3181 CCValAssign::LocInfo &LocInfo, 3182 ISD::ArgFlagsTy &ArgFlags, 3183 CCState &State) { 3184 static const MCPhysReg ArgRegs[] = { 3185 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3186 PPC::F8 3187 }; 3188 3189 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3190 3191 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3192 3193 // If there is only one Floating-point register left we need to put both f64 3194 // values of a split ppc_fp128 value on the stack. 3195 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 3196 State.AllocateReg(ArgRegs[RegNum]); 3197 } 3198 3199 // Always return false here, as this function only makes sure that the two f64 3200 // values a ppc_fp128 value is split into are both passed in registers or both 3201 // passed on the stack and does not actually allocate a register for the 3202 // current argument. 3203 return false; 3204 } 3205 3206 /// FPR - The set of FP registers that should be allocated for arguments, 3207 /// on Darwin. 3208 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3209 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3210 PPC::F11, PPC::F12, PPC::F13}; 3211 3212 /// QFPR - The set of QPX registers that should be allocated for arguments. 3213 static const MCPhysReg QFPR[] = { 3214 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3215 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3216 3217 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3218 /// the stack. 3219 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3220 unsigned PtrByteSize) { 3221 unsigned ArgSize = ArgVT.getStoreSize(); 3222 if (Flags.isByVal()) 3223 ArgSize = Flags.getByValSize(); 3224 3225 // Round up to multiples of the pointer size, except for array members, 3226 // which are always packed. 3227 if (!Flags.isInConsecutiveRegs()) 3228 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3229 3230 return ArgSize; 3231 } 3232 3233 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3234 /// on the stack. 3235 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3236 ISD::ArgFlagsTy Flags, 3237 unsigned PtrByteSize) { 3238 unsigned Align = PtrByteSize; 3239 3240 // Altivec parameters are padded to a 16 byte boundary. 3241 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3242 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3243 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3244 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3245 Align = 16; 3246 // QPX vector types stored in double-precision are padded to a 32 byte 3247 // boundary. 3248 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3249 Align = 32; 3250 3251 // ByVal parameters are aligned as requested. 3252 if (Flags.isByVal()) { 3253 unsigned BVAlign = Flags.getByValAlign(); 3254 if (BVAlign > PtrByteSize) { 3255 if (BVAlign % PtrByteSize != 0) 3256 llvm_unreachable( 3257 "ByVal alignment is not a multiple of the pointer size"); 3258 3259 Align = BVAlign; 3260 } 3261 } 3262 3263 // Array members are always packed to their original alignment. 3264 if (Flags.isInConsecutiveRegs()) { 3265 // If the array member was split into multiple registers, the first 3266 // needs to be aligned to the size of the full type. (Except for 3267 // ppcf128, which is only aligned as its f64 components.) 3268 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3269 Align = OrigVT.getStoreSize(); 3270 else 3271 Align = ArgVT.getStoreSize(); 3272 } 3273 3274 return Align; 3275 } 3276 3277 /// CalculateStackSlotUsed - Return whether this argument will use its 3278 /// stack slot (instead of being passed in registers). ArgOffset, 3279 /// AvailableFPRs, and AvailableVRs must hold the current argument 3280 /// position, and will be updated to account for this argument. 3281 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3282 ISD::ArgFlagsTy Flags, 3283 unsigned PtrByteSize, 3284 unsigned LinkageSize, 3285 unsigned ParamAreaSize, 3286 unsigned &ArgOffset, 3287 unsigned &AvailableFPRs, 3288 unsigned &AvailableVRs, bool HasQPX) { 3289 bool UseMemory = false; 3290 3291 // Respect alignment of argument on the stack. 3292 unsigned Align = 3293 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3294 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3295 // If there's no space left in the argument save area, we must 3296 // use memory (this check also catches zero-sized arguments). 3297 if (ArgOffset >= LinkageSize + ParamAreaSize) 3298 UseMemory = true; 3299 3300 // Allocate argument on the stack. 3301 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3302 if (Flags.isInConsecutiveRegsLast()) 3303 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3304 // If we overran the argument save area, we must use memory 3305 // (this check catches arguments passed partially in memory) 3306 if (ArgOffset > LinkageSize + ParamAreaSize) 3307 UseMemory = true; 3308 3309 // However, if the argument is actually passed in an FPR or a VR, 3310 // we don't use memory after all. 3311 if (!Flags.isByVal()) { 3312 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3313 // QPX registers overlap with the scalar FP registers. 3314 (HasQPX && (ArgVT == MVT::v4f32 || 3315 ArgVT == MVT::v4f64 || 3316 ArgVT == MVT::v4i1))) 3317 if (AvailableFPRs > 0) { 3318 --AvailableFPRs; 3319 return false; 3320 } 3321 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3322 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3323 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3324 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3325 if (AvailableVRs > 0) { 3326 --AvailableVRs; 3327 return false; 3328 } 3329 } 3330 3331 return UseMemory; 3332 } 3333 3334 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3335 /// ensure minimum alignment required for target. 3336 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3337 unsigned NumBytes) { 3338 unsigned TargetAlign = Lowering->getStackAlignment(); 3339 unsigned AlignMask = TargetAlign - 1; 3340 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3341 return NumBytes; 3342 } 3343 3344 SDValue PPCTargetLowering::LowerFormalArguments( 3345 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3346 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3347 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3348 if (Subtarget.isSVR4ABI()) { 3349 if (Subtarget.isPPC64()) 3350 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 3351 dl, DAG, InVals); 3352 else 3353 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 3354 dl, DAG, InVals); 3355 } else { 3356 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 3357 dl, DAG, InVals); 3358 } 3359 } 3360 3361 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3362 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3363 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3364 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3365 3366 // 32-bit SVR4 ABI Stack Frame Layout: 3367 // +-----------------------------------+ 3368 // +--> | Back chain | 3369 // | +-----------------------------------+ 3370 // | | Floating-point register save area | 3371 // | +-----------------------------------+ 3372 // | | General register save area | 3373 // | +-----------------------------------+ 3374 // | | CR save word | 3375 // | +-----------------------------------+ 3376 // | | VRSAVE save word | 3377 // | +-----------------------------------+ 3378 // | | Alignment padding | 3379 // | +-----------------------------------+ 3380 // | | Vector register save area | 3381 // | +-----------------------------------+ 3382 // | | Local variable space | 3383 // | +-----------------------------------+ 3384 // | | Parameter list area | 3385 // | +-----------------------------------+ 3386 // | | LR save word | 3387 // | +-----------------------------------+ 3388 // SP--> +--- | Back chain | 3389 // +-----------------------------------+ 3390 // 3391 // Specifications: 3392 // System V Application Binary Interface PowerPC Processor Supplement 3393 // AltiVec Technology Programming Interface Manual 3394 3395 MachineFunction &MF = DAG.getMachineFunction(); 3396 MachineFrameInfo &MFI = MF.getFrameInfo(); 3397 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3398 3399 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3400 // Potential tail calls could cause overwriting of argument stack slots. 3401 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3402 (CallConv == CallingConv::Fast)); 3403 unsigned PtrByteSize = 4; 3404 3405 // Assign locations to all of the incoming arguments. 3406 SmallVector<CCValAssign, 16> ArgLocs; 3407 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3408 *DAG.getContext()); 3409 3410 // Reserve space for the linkage area on the stack. 3411 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3412 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3413 if (useSoftFloat() || hasSPE()) 3414 CCInfo.PreAnalyzeFormalArguments(Ins); 3415 3416 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3417 CCInfo.clearWasPPCF128(); 3418 3419 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3420 CCValAssign &VA = ArgLocs[i]; 3421 3422 // Arguments stored in registers. 3423 if (VA.isRegLoc()) { 3424 const TargetRegisterClass *RC; 3425 EVT ValVT = VA.getValVT(); 3426 3427 switch (ValVT.getSimpleVT().SimpleTy) { 3428 default: 3429 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3430 case MVT::i1: 3431 case MVT::i32: 3432 RC = &PPC::GPRCRegClass; 3433 break; 3434 case MVT::f32: 3435 if (Subtarget.hasP8Vector()) 3436 RC = &PPC::VSSRCRegClass; 3437 else if (Subtarget.hasSPE()) 3438 RC = &PPC::SPE4RCRegClass; 3439 else 3440 RC = &PPC::F4RCRegClass; 3441 break; 3442 case MVT::f64: 3443 if (Subtarget.hasVSX()) 3444 RC = &PPC::VSFRCRegClass; 3445 else if (Subtarget.hasSPE()) 3446 RC = &PPC::SPERCRegClass; 3447 else 3448 RC = &PPC::F8RCRegClass; 3449 break; 3450 case MVT::v16i8: 3451 case MVT::v8i16: 3452 case MVT::v4i32: 3453 RC = &PPC::VRRCRegClass; 3454 break; 3455 case MVT::v4f32: 3456 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3457 break; 3458 case MVT::v2f64: 3459 case MVT::v2i64: 3460 RC = &PPC::VRRCRegClass; 3461 break; 3462 case MVT::v4f64: 3463 RC = &PPC::QFRCRegClass; 3464 break; 3465 case MVT::v4i1: 3466 RC = &PPC::QBRCRegClass; 3467 break; 3468 } 3469 3470 // Transform the arguments stored in physical registers into virtual ones. 3471 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3472 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3473 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3474 3475 if (ValVT == MVT::i1) 3476 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3477 3478 InVals.push_back(ArgValue); 3479 } else { 3480 // Argument stored in memory. 3481 assert(VA.isMemLoc()); 3482 3483 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3484 int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(), 3485 isImmutable); 3486 3487 // Create load nodes to retrieve arguments from the stack. 3488 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3489 InVals.push_back( 3490 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3491 } 3492 } 3493 3494 // Assign locations to all of the incoming aggregate by value arguments. 3495 // Aggregates passed by value are stored in the local variable space of the 3496 // caller's stack frame, right above the parameter list area. 3497 SmallVector<CCValAssign, 16> ByValArgLocs; 3498 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3499 ByValArgLocs, *DAG.getContext()); 3500 3501 // Reserve stack space for the allocations in CCInfo. 3502 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3503 3504 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3505 3506 // Area that is at least reserved in the caller of this function. 3507 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3508 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3509 3510 // Set the size that is at least reserved in caller of this function. Tail 3511 // call optimized function's reserved stack space needs to be aligned so that 3512 // taking the difference between two stack areas will result in an aligned 3513 // stack. 3514 MinReservedArea = 3515 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3516 FuncInfo->setMinReservedArea(MinReservedArea); 3517 3518 SmallVector<SDValue, 8> MemOps; 3519 3520 // If the function takes variable number of arguments, make a frame index for 3521 // the start of the first vararg value... for expansion of llvm.va_start. 3522 if (isVarArg) { 3523 static const MCPhysReg GPArgRegs[] = { 3524 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3525 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3526 }; 3527 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3528 3529 static const MCPhysReg FPArgRegs[] = { 3530 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3531 PPC::F8 3532 }; 3533 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3534 3535 if (useSoftFloat() || hasSPE()) 3536 NumFPArgRegs = 0; 3537 3538 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3539 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3540 3541 // Make room for NumGPArgRegs and NumFPArgRegs. 3542 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3543 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3544 3545 FuncInfo->setVarArgsStackOffset( 3546 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3547 CCInfo.getNextStackOffset(), true)); 3548 3549 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3550 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3551 3552 // The fixed integer arguments of a variadic function are stored to the 3553 // VarArgsFrameIndex on the stack so that they may be loaded by 3554 // dereferencing the result of va_next. 3555 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3556 // Get an existing live-in vreg, or add a new one. 3557 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3558 if (!VReg) 3559 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3560 3561 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3562 SDValue Store = 3563 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3564 MemOps.push_back(Store); 3565 // Increment the address by four for the next argument to store 3566 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3567 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3568 } 3569 3570 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3571 // is set. 3572 // The double arguments are stored to the VarArgsFrameIndex 3573 // on the stack. 3574 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3575 // Get an existing live-in vreg, or add a new one. 3576 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3577 if (!VReg) 3578 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3579 3580 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3581 SDValue Store = 3582 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3583 MemOps.push_back(Store); 3584 // Increment the address by eight for the next argument to store 3585 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3586 PtrVT); 3587 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3588 } 3589 } 3590 3591 if (!MemOps.empty()) 3592 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3593 3594 return Chain; 3595 } 3596 3597 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3598 // value to MVT::i64 and then truncate to the correct register size. 3599 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3600 EVT ObjectVT, SelectionDAG &DAG, 3601 SDValue ArgVal, 3602 const SDLoc &dl) const { 3603 if (Flags.isSExt()) 3604 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3605 DAG.getValueType(ObjectVT)); 3606 else if (Flags.isZExt()) 3607 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3608 DAG.getValueType(ObjectVT)); 3609 3610 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3611 } 3612 3613 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3614 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3615 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3616 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3617 // TODO: add description of PPC stack frame format, or at least some docs. 3618 // 3619 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3620 bool isLittleEndian = Subtarget.isLittleEndian(); 3621 MachineFunction &MF = DAG.getMachineFunction(); 3622 MachineFrameInfo &MFI = MF.getFrameInfo(); 3623 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3624 3625 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3626 "fastcc not supported on varargs functions"); 3627 3628 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3629 // Potential tail calls could cause overwriting of argument stack slots. 3630 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3631 (CallConv == CallingConv::Fast)); 3632 unsigned PtrByteSize = 8; 3633 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3634 3635 static const MCPhysReg GPR[] = { 3636 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3637 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3638 }; 3639 static const MCPhysReg VR[] = { 3640 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3641 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3642 }; 3643 3644 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3645 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3646 const unsigned Num_VR_Regs = array_lengthof(VR); 3647 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3648 3649 // Do a first pass over the arguments to determine whether the ABI 3650 // guarantees that our caller has allocated the parameter save area 3651 // on its stack frame. In the ELFv1 ABI, this is always the case; 3652 // in the ELFv2 ABI, it is true if this is a vararg function or if 3653 // any parameter is located in a stack slot. 3654 3655 bool HasParameterArea = !isELFv2ABI || isVarArg; 3656 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3657 unsigned NumBytes = LinkageSize; 3658 unsigned AvailableFPRs = Num_FPR_Regs; 3659 unsigned AvailableVRs = Num_VR_Regs; 3660 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3661 if (Ins[i].Flags.isNest()) 3662 continue; 3663 3664 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3665 PtrByteSize, LinkageSize, ParamAreaSize, 3666 NumBytes, AvailableFPRs, AvailableVRs, 3667 Subtarget.hasQPX())) 3668 HasParameterArea = true; 3669 } 3670 3671 // Add DAG nodes to load the arguments or copy them out of registers. On 3672 // entry to a function on PPC, the arguments start after the linkage area, 3673 // although the first ones are often in registers. 3674 3675 unsigned ArgOffset = LinkageSize; 3676 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3677 unsigned &QFPR_idx = FPR_idx; 3678 SmallVector<SDValue, 8> MemOps; 3679 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3680 unsigned CurArgIdx = 0; 3681 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3682 SDValue ArgVal; 3683 bool needsLoad = false; 3684 EVT ObjectVT = Ins[ArgNo].VT; 3685 EVT OrigVT = Ins[ArgNo].ArgVT; 3686 unsigned ObjSize = ObjectVT.getStoreSize(); 3687 unsigned ArgSize = ObjSize; 3688 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3689 if (Ins[ArgNo].isOrigArg()) { 3690 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3691 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3692 } 3693 // We re-align the argument offset for each argument, except when using the 3694 // fast calling convention, when we need to make sure we do that only when 3695 // we'll actually use a stack slot. 3696 unsigned CurArgOffset, Align; 3697 auto ComputeArgOffset = [&]() { 3698 /* Respect alignment of argument on the stack. */ 3699 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3700 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3701 CurArgOffset = ArgOffset; 3702 }; 3703 3704 if (CallConv != CallingConv::Fast) { 3705 ComputeArgOffset(); 3706 3707 /* Compute GPR index associated with argument offset. */ 3708 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3709 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3710 } 3711 3712 // FIXME the codegen can be much improved in some cases. 3713 // We do not have to keep everything in memory. 3714 if (Flags.isByVal()) { 3715 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3716 3717 if (CallConv == CallingConv::Fast) 3718 ComputeArgOffset(); 3719 3720 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3721 ObjSize = Flags.getByValSize(); 3722 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3723 // Empty aggregate parameters do not take up registers. Examples: 3724 // struct { } a; 3725 // union { } b; 3726 // int c[0]; 3727 // etc. However, we have to provide a place-holder in InVals, so 3728 // pretend we have an 8-byte item at the current address for that 3729 // purpose. 3730 if (!ObjSize) { 3731 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3732 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3733 InVals.push_back(FIN); 3734 continue; 3735 } 3736 3737 // Create a stack object covering all stack doublewords occupied 3738 // by the argument. If the argument is (fully or partially) on 3739 // the stack, or if the argument is fully in registers but the 3740 // caller has allocated the parameter save anyway, we can refer 3741 // directly to the caller's stack frame. Otherwise, create a 3742 // local copy in our own frame. 3743 int FI; 3744 if (HasParameterArea || 3745 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3746 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3747 else 3748 FI = MFI.CreateStackObject(ArgSize, Align, false); 3749 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3750 3751 // Handle aggregates smaller than 8 bytes. 3752 if (ObjSize < PtrByteSize) { 3753 // The value of the object is its address, which differs from the 3754 // address of the enclosing doubleword on big-endian systems. 3755 SDValue Arg = FIN; 3756 if (!isLittleEndian) { 3757 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3758 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3759 } 3760 InVals.push_back(Arg); 3761 3762 if (GPR_idx != Num_GPR_Regs) { 3763 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3764 FuncInfo->addLiveInAttr(VReg, Flags); 3765 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3766 SDValue Store; 3767 3768 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3769 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3770 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3771 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3772 MachinePointerInfo(&*FuncArg), ObjType); 3773 } else { 3774 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3775 // store the whole register as-is to the parameter save area 3776 // slot. 3777 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3778 MachinePointerInfo(&*FuncArg)); 3779 } 3780 3781 MemOps.push_back(Store); 3782 } 3783 // Whether we copied from a register or not, advance the offset 3784 // into the parameter save area by a full doubleword. 3785 ArgOffset += PtrByteSize; 3786 continue; 3787 } 3788 3789 // The value of the object is its address, which is the address of 3790 // its first stack doubleword. 3791 InVals.push_back(FIN); 3792 3793 // Store whatever pieces of the object are in registers to memory. 3794 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3795 if (GPR_idx == Num_GPR_Regs) 3796 break; 3797 3798 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3799 FuncInfo->addLiveInAttr(VReg, Flags); 3800 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3801 SDValue Addr = FIN; 3802 if (j) { 3803 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3804 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3805 } 3806 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3807 MachinePointerInfo(&*FuncArg, j)); 3808 MemOps.push_back(Store); 3809 ++GPR_idx; 3810 } 3811 ArgOffset += ArgSize; 3812 continue; 3813 } 3814 3815 switch (ObjectVT.getSimpleVT().SimpleTy) { 3816 default: llvm_unreachable("Unhandled argument type!"); 3817 case MVT::i1: 3818 case MVT::i32: 3819 case MVT::i64: 3820 if (Flags.isNest()) { 3821 // The 'nest' parameter, if any, is passed in R11. 3822 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3823 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3824 3825 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3826 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3827 3828 break; 3829 } 3830 3831 // These can be scalar arguments or elements of an integer array type 3832 // passed directly. Clang may use those instead of "byval" aggregate 3833 // types to avoid forcing arguments to memory unnecessarily. 3834 if (GPR_idx != Num_GPR_Regs) { 3835 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3836 FuncInfo->addLiveInAttr(VReg, Flags); 3837 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3838 3839 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3840 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3841 // value to MVT::i64 and then truncate to the correct register size. 3842 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3843 } else { 3844 if (CallConv == CallingConv::Fast) 3845 ComputeArgOffset(); 3846 3847 needsLoad = true; 3848 ArgSize = PtrByteSize; 3849 } 3850 if (CallConv != CallingConv::Fast || needsLoad) 3851 ArgOffset += 8; 3852 break; 3853 3854 case MVT::f32: 3855 case MVT::f64: 3856 // These can be scalar arguments or elements of a float array type 3857 // passed directly. The latter are used to implement ELFv2 homogenous 3858 // float aggregates. 3859 if (FPR_idx != Num_FPR_Regs) { 3860 unsigned VReg; 3861 3862 if (ObjectVT == MVT::f32) 3863 VReg = MF.addLiveIn(FPR[FPR_idx], 3864 Subtarget.hasP8Vector() 3865 ? &PPC::VSSRCRegClass 3866 : &PPC::F4RCRegClass); 3867 else 3868 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3869 ? &PPC::VSFRCRegClass 3870 : &PPC::F8RCRegClass); 3871 3872 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3873 ++FPR_idx; 3874 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3875 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3876 // once we support fp <-> gpr moves. 3877 3878 // This can only ever happen in the presence of f32 array types, 3879 // since otherwise we never run out of FPRs before running out 3880 // of GPRs. 3881 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3882 FuncInfo->addLiveInAttr(VReg, Flags); 3883 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3884 3885 if (ObjectVT == MVT::f32) { 3886 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3887 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3888 DAG.getConstant(32, dl, MVT::i32)); 3889 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3890 } 3891 3892 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3893 } else { 3894 if (CallConv == CallingConv::Fast) 3895 ComputeArgOffset(); 3896 3897 needsLoad = true; 3898 } 3899 3900 // When passing an array of floats, the array occupies consecutive 3901 // space in the argument area; only round up to the next doubleword 3902 // at the end of the array. Otherwise, each float takes 8 bytes. 3903 if (CallConv != CallingConv::Fast || needsLoad) { 3904 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3905 ArgOffset += ArgSize; 3906 if (Flags.isInConsecutiveRegsLast()) 3907 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3908 } 3909 break; 3910 case MVT::v4f32: 3911 case MVT::v4i32: 3912 case MVT::v8i16: 3913 case MVT::v16i8: 3914 case MVT::v2f64: 3915 case MVT::v2i64: 3916 case MVT::v1i128: 3917 case MVT::f128: 3918 if (!Subtarget.hasQPX()) { 3919 // These can be scalar arguments or elements of a vector array type 3920 // passed directly. The latter are used to implement ELFv2 homogenous 3921 // vector aggregates. 3922 if (VR_idx != Num_VR_Regs) { 3923 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3924 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3925 ++VR_idx; 3926 } else { 3927 if (CallConv == CallingConv::Fast) 3928 ComputeArgOffset(); 3929 needsLoad = true; 3930 } 3931 if (CallConv != CallingConv::Fast || needsLoad) 3932 ArgOffset += 16; 3933 break; 3934 } // not QPX 3935 3936 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3937 "Invalid QPX parameter type"); 3938 /* fall through */ 3939 3940 case MVT::v4f64: 3941 case MVT::v4i1: 3942 // QPX vectors are treated like their scalar floating-point subregisters 3943 // (except that they're larger). 3944 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3945 if (QFPR_idx != Num_QFPR_Regs) { 3946 const TargetRegisterClass *RC; 3947 switch (ObjectVT.getSimpleVT().SimpleTy) { 3948 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3949 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3950 default: RC = &PPC::QBRCRegClass; break; 3951 } 3952 3953 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3954 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3955 ++QFPR_idx; 3956 } else { 3957 if (CallConv == CallingConv::Fast) 3958 ComputeArgOffset(); 3959 needsLoad = true; 3960 } 3961 if (CallConv != CallingConv::Fast || needsLoad) 3962 ArgOffset += Sz; 3963 break; 3964 } 3965 3966 // We need to load the argument to a virtual register if we determined 3967 // above that we ran out of physical registers of the appropriate type. 3968 if (needsLoad) { 3969 if (ObjSize < ArgSize && !isLittleEndian) 3970 CurArgOffset += ArgSize - ObjSize; 3971 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3972 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3973 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3974 } 3975 3976 InVals.push_back(ArgVal); 3977 } 3978 3979 // Area that is at least reserved in the caller of this function. 3980 unsigned MinReservedArea; 3981 if (HasParameterArea) 3982 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3983 else 3984 MinReservedArea = LinkageSize; 3985 3986 // Set the size that is at least reserved in caller of this function. Tail 3987 // call optimized functions' reserved stack space needs to be aligned so that 3988 // taking the difference between two stack areas will result in an aligned 3989 // stack. 3990 MinReservedArea = 3991 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3992 FuncInfo->setMinReservedArea(MinReservedArea); 3993 3994 // If the function takes variable number of arguments, make a frame index for 3995 // the start of the first vararg value... for expansion of llvm.va_start. 3996 if (isVarArg) { 3997 int Depth = ArgOffset; 3998 3999 FuncInfo->setVarArgsFrameIndex( 4000 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4001 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4002 4003 // If this function is vararg, store any remaining integer argument regs 4004 // to their spots on the stack so that they may be loaded by dereferencing 4005 // the result of va_next. 4006 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4007 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4008 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4009 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4010 SDValue Store = 4011 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4012 MemOps.push_back(Store); 4013 // Increment the address by four for the next argument to store 4014 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4015 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4016 } 4017 } 4018 4019 if (!MemOps.empty()) 4020 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4021 4022 return Chain; 4023 } 4024 4025 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 4026 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4027 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4028 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4029 // TODO: add description of PPC stack frame format, or at least some docs. 4030 // 4031 MachineFunction &MF = DAG.getMachineFunction(); 4032 MachineFrameInfo &MFI = MF.getFrameInfo(); 4033 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4034 4035 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4036 bool isPPC64 = PtrVT == MVT::i64; 4037 // Potential tail calls could cause overwriting of argument stack slots. 4038 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4039 (CallConv == CallingConv::Fast)); 4040 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4041 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4042 unsigned ArgOffset = LinkageSize; 4043 // Area that is at least reserved in caller of this function. 4044 unsigned MinReservedArea = ArgOffset; 4045 4046 static const MCPhysReg GPR_32[] = { // 32-bit registers. 4047 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4048 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4049 }; 4050 static const MCPhysReg GPR_64[] = { // 64-bit registers. 4051 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4052 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4053 }; 4054 static const MCPhysReg VR[] = { 4055 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4056 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4057 }; 4058 4059 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 4060 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4061 const unsigned Num_VR_Regs = array_lengthof( VR); 4062 4063 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4064 4065 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 4066 4067 // In 32-bit non-varargs functions, the stack space for vectors is after the 4068 // stack space for non-vectors. We do not use this space unless we have 4069 // too many vectors to fit in registers, something that only occurs in 4070 // constructed examples:), but we have to walk the arglist to figure 4071 // that out...for the pathological case, compute VecArgOffset as the 4072 // start of the vector parameter area. Computing VecArgOffset is the 4073 // entire point of the following loop. 4074 unsigned VecArgOffset = ArgOffset; 4075 if (!isVarArg && !isPPC64) { 4076 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 4077 ++ArgNo) { 4078 EVT ObjectVT = Ins[ArgNo].VT; 4079 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4080 4081 if (Flags.isByVal()) { 4082 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 4083 unsigned ObjSize = Flags.getByValSize(); 4084 unsigned ArgSize = 4085 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4086 VecArgOffset += ArgSize; 4087 continue; 4088 } 4089 4090 switch(ObjectVT.getSimpleVT().SimpleTy) { 4091 default: llvm_unreachable("Unhandled argument type!"); 4092 case MVT::i1: 4093 case MVT::i32: 4094 case MVT::f32: 4095 VecArgOffset += 4; 4096 break; 4097 case MVT::i64: // PPC64 4098 case MVT::f64: 4099 // FIXME: We are guaranteed to be !isPPC64 at this point. 4100 // Does MVT::i64 apply? 4101 VecArgOffset += 8; 4102 break; 4103 case MVT::v4f32: 4104 case MVT::v4i32: 4105 case MVT::v8i16: 4106 case MVT::v16i8: 4107 // Nothing to do, we're only looking at Nonvector args here. 4108 break; 4109 } 4110 } 4111 } 4112 // We've found where the vector parameter area in memory is. Skip the 4113 // first 12 parameters; these don't use that memory. 4114 VecArgOffset = ((VecArgOffset+15)/16)*16; 4115 VecArgOffset += 12*16; 4116 4117 // Add DAG nodes to load the arguments or copy them out of registers. On 4118 // entry to a function on PPC, the arguments start after the linkage area, 4119 // although the first ones are often in registers. 4120 4121 SmallVector<SDValue, 8> MemOps; 4122 unsigned nAltivecParamsAtEnd = 0; 4123 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4124 unsigned CurArgIdx = 0; 4125 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4126 SDValue ArgVal; 4127 bool needsLoad = false; 4128 EVT ObjectVT = Ins[ArgNo].VT; 4129 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 4130 unsigned ArgSize = ObjSize; 4131 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4132 if (Ins[ArgNo].isOrigArg()) { 4133 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4134 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4135 } 4136 unsigned CurArgOffset = ArgOffset; 4137 4138 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4139 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4140 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4141 if (isVarArg || isPPC64) { 4142 MinReservedArea = ((MinReservedArea+15)/16)*16; 4143 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4144 Flags, 4145 PtrByteSize); 4146 } else nAltivecParamsAtEnd++; 4147 } else 4148 // Calculate min reserved area. 4149 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4150 Flags, 4151 PtrByteSize); 4152 4153 // FIXME the codegen can be much improved in some cases. 4154 // We do not have to keep everything in memory. 4155 if (Flags.isByVal()) { 4156 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4157 4158 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4159 ObjSize = Flags.getByValSize(); 4160 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4161 // Objects of size 1 and 2 are right justified, everything else is 4162 // left justified. This means the memory address is adjusted forwards. 4163 if (ObjSize==1 || ObjSize==2) { 4164 CurArgOffset = CurArgOffset + (4 - ObjSize); 4165 } 4166 // The value of the object is its address. 4167 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4168 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4169 InVals.push_back(FIN); 4170 if (ObjSize==1 || ObjSize==2) { 4171 if (GPR_idx != Num_GPR_Regs) { 4172 unsigned VReg; 4173 if (isPPC64) 4174 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4175 else 4176 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4177 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4178 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4179 SDValue Store = 4180 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4181 MachinePointerInfo(&*FuncArg), ObjType); 4182 MemOps.push_back(Store); 4183 ++GPR_idx; 4184 } 4185 4186 ArgOffset += PtrByteSize; 4187 4188 continue; 4189 } 4190 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4191 // Store whatever pieces of the object are in registers 4192 // to memory. ArgOffset will be the address of the beginning 4193 // of the object. 4194 if (GPR_idx != Num_GPR_Regs) { 4195 unsigned VReg; 4196 if (isPPC64) 4197 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4198 else 4199 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4200 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4201 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4202 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4203 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4204 MachinePointerInfo(&*FuncArg, j)); 4205 MemOps.push_back(Store); 4206 ++GPR_idx; 4207 ArgOffset += PtrByteSize; 4208 } else { 4209 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4210 break; 4211 } 4212 } 4213 continue; 4214 } 4215 4216 switch (ObjectVT.getSimpleVT().SimpleTy) { 4217 default: llvm_unreachable("Unhandled argument type!"); 4218 case MVT::i1: 4219 case MVT::i32: 4220 if (!isPPC64) { 4221 if (GPR_idx != Num_GPR_Regs) { 4222 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4223 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4224 4225 if (ObjectVT == MVT::i1) 4226 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4227 4228 ++GPR_idx; 4229 } else { 4230 needsLoad = true; 4231 ArgSize = PtrByteSize; 4232 } 4233 // All int arguments reserve stack space in the Darwin ABI. 4234 ArgOffset += PtrByteSize; 4235 break; 4236 } 4237 LLVM_FALLTHROUGH; 4238 case MVT::i64: // PPC64 4239 if (GPR_idx != Num_GPR_Regs) { 4240 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4241 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4242 4243 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4244 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4245 // value to MVT::i64 and then truncate to the correct register size. 4246 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4247 4248 ++GPR_idx; 4249 } else { 4250 needsLoad = true; 4251 ArgSize = PtrByteSize; 4252 } 4253 // All int arguments reserve stack space in the Darwin ABI. 4254 ArgOffset += 8; 4255 break; 4256 4257 case MVT::f32: 4258 case MVT::f64: 4259 // Every 4 bytes of argument space consumes one of the GPRs available for 4260 // argument passing. 4261 if (GPR_idx != Num_GPR_Regs) { 4262 ++GPR_idx; 4263 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4264 ++GPR_idx; 4265 } 4266 if (FPR_idx != Num_FPR_Regs) { 4267 unsigned VReg; 4268 4269 if (ObjectVT == MVT::f32) 4270 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4271 else 4272 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4273 4274 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4275 ++FPR_idx; 4276 } else { 4277 needsLoad = true; 4278 } 4279 4280 // All FP arguments reserve stack space in the Darwin ABI. 4281 ArgOffset += isPPC64 ? 8 : ObjSize; 4282 break; 4283 case MVT::v4f32: 4284 case MVT::v4i32: 4285 case MVT::v8i16: 4286 case MVT::v16i8: 4287 // Note that vector arguments in registers don't reserve stack space, 4288 // except in varargs functions. 4289 if (VR_idx != Num_VR_Regs) { 4290 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4291 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4292 if (isVarArg) { 4293 while ((ArgOffset % 16) != 0) { 4294 ArgOffset += PtrByteSize; 4295 if (GPR_idx != Num_GPR_Regs) 4296 GPR_idx++; 4297 } 4298 ArgOffset += 16; 4299 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4300 } 4301 ++VR_idx; 4302 } else { 4303 if (!isVarArg && !isPPC64) { 4304 // Vectors go after all the nonvectors. 4305 CurArgOffset = VecArgOffset; 4306 VecArgOffset += 16; 4307 } else { 4308 // Vectors are aligned. 4309 ArgOffset = ((ArgOffset+15)/16)*16; 4310 CurArgOffset = ArgOffset; 4311 ArgOffset += 16; 4312 } 4313 needsLoad = true; 4314 } 4315 break; 4316 } 4317 4318 // We need to load the argument to a virtual register if we determined above 4319 // that we ran out of physical registers of the appropriate type. 4320 if (needsLoad) { 4321 int FI = MFI.CreateFixedObject(ObjSize, 4322 CurArgOffset + (ArgSize - ObjSize), 4323 isImmutable); 4324 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4325 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4326 } 4327 4328 InVals.push_back(ArgVal); 4329 } 4330 4331 // Allow for Altivec parameters at the end, if needed. 4332 if (nAltivecParamsAtEnd) { 4333 MinReservedArea = ((MinReservedArea+15)/16)*16; 4334 MinReservedArea += 16*nAltivecParamsAtEnd; 4335 } 4336 4337 // Area that is at least reserved in the caller of this function. 4338 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4339 4340 // Set the size that is at least reserved in caller of this function. Tail 4341 // call optimized functions' reserved stack space needs to be aligned so that 4342 // taking the difference between two stack areas will result in an aligned 4343 // stack. 4344 MinReservedArea = 4345 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4346 FuncInfo->setMinReservedArea(MinReservedArea); 4347 4348 // If the function takes variable number of arguments, make a frame index for 4349 // the start of the first vararg value... for expansion of llvm.va_start. 4350 if (isVarArg) { 4351 int Depth = ArgOffset; 4352 4353 FuncInfo->setVarArgsFrameIndex( 4354 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4355 Depth, true)); 4356 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4357 4358 // If this function is vararg, store any remaining integer argument regs 4359 // to their spots on the stack so that they may be loaded by dereferencing 4360 // the result of va_next. 4361 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4362 unsigned VReg; 4363 4364 if (isPPC64) 4365 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4366 else 4367 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4368 4369 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4370 SDValue Store = 4371 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4372 MemOps.push_back(Store); 4373 // Increment the address by four for the next argument to store 4374 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4375 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4376 } 4377 } 4378 4379 if (!MemOps.empty()) 4380 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4381 4382 return Chain; 4383 } 4384 4385 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4386 /// adjusted to accommodate the arguments for the tailcall. 4387 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4388 unsigned ParamSize) { 4389 4390 if (!isTailCall) return 0; 4391 4392 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4393 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4394 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4395 // Remember only if the new adjustment is bigger. 4396 if (SPDiff < FI->getTailCallSPDelta()) 4397 FI->setTailCallSPDelta(SPDiff); 4398 4399 return SPDiff; 4400 } 4401 4402 static bool isFunctionGlobalAddress(SDValue Callee); 4403 4404 static bool 4405 callsShareTOCBase(const Function *Caller, SDValue Callee, 4406 const TargetMachine &TM) { 4407 // If !G, Callee can be an external symbol. 4408 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4409 if (!G) 4410 return false; 4411 4412 // The medium and large code models are expected to provide a sufficiently 4413 // large TOC to provide all data addressing needs of a module with a 4414 // single TOC. Since each module will be addressed with a single TOC then we 4415 // only need to check that caller and callee don't cross dso boundaries. 4416 if (CodeModel::Medium == TM.getCodeModel() || 4417 CodeModel::Large == TM.getCodeModel()) 4418 return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal()); 4419 4420 // Otherwise we need to ensure callee and caller are in the same section, 4421 // since the linker may allocate multiple TOCs, and we don't know which 4422 // sections will belong to the same TOC base. 4423 4424 const GlobalValue *GV = G->getGlobal(); 4425 if (!GV->isStrongDefinitionForLinker()) 4426 return false; 4427 4428 // Any explicitly-specified sections and section prefixes must also match. 4429 // Also, if we're using -ffunction-sections, then each function is always in 4430 // a different section (the same is true for COMDAT functions). 4431 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4432 GV->getSection() != Caller->getSection()) 4433 return false; 4434 if (const auto *F = dyn_cast<Function>(GV)) { 4435 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4436 return false; 4437 } 4438 4439 // If the callee might be interposed, then we can't assume the ultimate call 4440 // target will be in the same section. Even in cases where we can assume that 4441 // interposition won't happen, in any case where the linker might insert a 4442 // stub to allow for interposition, we must generate code as though 4443 // interposition might occur. To understand why this matters, consider a 4444 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4445 // in the same section, but a is in a different module (i.e. has a different 4446 // TOC base pointer). If the linker allows for interposition between b and c, 4447 // then it will generate a stub for the call edge between b and c which will 4448 // save the TOC pointer into the designated stack slot allocated by b. If we 4449 // return true here, and therefore allow a tail call between b and c, that 4450 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4451 // pointer into the stack slot allocated by a (where the a -> b stub saved 4452 // a's TOC base pointer). If we're not considering a tail call, but rather, 4453 // whether a nop is needed after the call instruction in b, because the linker 4454 // will insert a stub, it might complain about a missing nop if we omit it 4455 // (although many don't complain in this case). 4456 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4457 return false; 4458 4459 return true; 4460 } 4461 4462 static bool 4463 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4464 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4465 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 4466 4467 const unsigned PtrByteSize = 8; 4468 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4469 4470 static const MCPhysReg GPR[] = { 4471 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4472 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4473 }; 4474 static const MCPhysReg VR[] = { 4475 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4476 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4477 }; 4478 4479 const unsigned NumGPRs = array_lengthof(GPR); 4480 const unsigned NumFPRs = 13; 4481 const unsigned NumVRs = array_lengthof(VR); 4482 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4483 4484 unsigned NumBytes = LinkageSize; 4485 unsigned AvailableFPRs = NumFPRs; 4486 unsigned AvailableVRs = NumVRs; 4487 4488 for (const ISD::OutputArg& Param : Outs) { 4489 if (Param.Flags.isNest()) continue; 4490 4491 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4492 PtrByteSize, LinkageSize, ParamAreaSize, 4493 NumBytes, AvailableFPRs, AvailableVRs, 4494 Subtarget.hasQPX())) 4495 return true; 4496 } 4497 return false; 4498 } 4499 4500 static bool 4501 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4502 if (CS.arg_size() != CallerFn->arg_size()) 4503 return false; 4504 4505 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4506 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4507 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4508 4509 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4510 const Value* CalleeArg = *CalleeArgIter; 4511 const Value* CallerArg = &(*CallerArgIter); 4512 if (CalleeArg == CallerArg) 4513 continue; 4514 4515 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4516 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4517 // } 4518 // 1st argument of callee is undef and has the same type as caller. 4519 if (CalleeArg->getType() == CallerArg->getType() && 4520 isa<UndefValue>(CalleeArg)) 4521 continue; 4522 4523 return false; 4524 } 4525 4526 return true; 4527 } 4528 4529 // Returns true if TCO is possible between the callers and callees 4530 // calling conventions. 4531 static bool 4532 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4533 CallingConv::ID CalleeCC) { 4534 // Tail calls are possible with fastcc and ccc. 4535 auto isTailCallableCC = [] (CallingConv::ID CC){ 4536 return CC == CallingConv::C || CC == CallingConv::Fast; 4537 }; 4538 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4539 return false; 4540 4541 // We can safely tail call both fastcc and ccc callees from a c calling 4542 // convention caller. If the caller is fastcc, we may have less stack space 4543 // than a non-fastcc caller with the same signature so disable tail-calls in 4544 // that case. 4545 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4546 } 4547 4548 bool 4549 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4550 SDValue Callee, 4551 CallingConv::ID CalleeCC, 4552 ImmutableCallSite CS, 4553 bool isVarArg, 4554 const SmallVectorImpl<ISD::OutputArg> &Outs, 4555 const SmallVectorImpl<ISD::InputArg> &Ins, 4556 SelectionDAG& DAG) const { 4557 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4558 4559 if (DisableSCO && !TailCallOpt) return false; 4560 4561 // Variadic argument functions are not supported. 4562 if (isVarArg) return false; 4563 4564 auto &Caller = DAG.getMachineFunction().getFunction(); 4565 // Check that the calling conventions are compatible for tco. 4566 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4567 return false; 4568 4569 // Caller contains any byval parameter is not supported. 4570 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4571 return false; 4572 4573 // Callee contains any byval parameter is not supported, too. 4574 // Note: This is a quick work around, because in some cases, e.g. 4575 // caller's stack size > callee's stack size, we are still able to apply 4576 // sibling call optimization. For example, gcc is able to do SCO for caller1 4577 // in the following example, but not for caller2. 4578 // struct test { 4579 // long int a; 4580 // char ary[56]; 4581 // } gTest; 4582 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4583 // b->a = v.a; 4584 // return 0; 4585 // } 4586 // void caller1(struct test a, struct test c, struct test *b) { 4587 // callee(gTest, b); } 4588 // void caller2(struct test *b) { callee(gTest, b); } 4589 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4590 return false; 4591 4592 // If callee and caller use different calling conventions, we cannot pass 4593 // parameters on stack since offsets for the parameter area may be different. 4594 if (Caller.getCallingConv() != CalleeCC && 4595 needStackSlotPassParameters(Subtarget, Outs)) 4596 return false; 4597 4598 // No TCO/SCO on indirect call because Caller have to restore its TOC 4599 if (!isFunctionGlobalAddress(Callee) && 4600 !isa<ExternalSymbolSDNode>(Callee)) 4601 return false; 4602 4603 // If the caller and callee potentially have different TOC bases then we 4604 // cannot tail call since we need to restore the TOC pointer after the call. 4605 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4606 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4607 return false; 4608 4609 // TCO allows altering callee ABI, so we don't have to check further. 4610 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4611 return true; 4612 4613 if (DisableSCO) return false; 4614 4615 // If callee use the same argument list that caller is using, then we can 4616 // apply SCO on this case. If it is not, then we need to check if callee needs 4617 // stack for passing arguments. 4618 if (!hasSameArgumentList(&Caller, CS) && 4619 needStackSlotPassParameters(Subtarget, Outs)) { 4620 return false; 4621 } 4622 4623 return true; 4624 } 4625 4626 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4627 /// for tail call optimization. Targets which want to do tail call 4628 /// optimization should implement this function. 4629 bool 4630 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4631 CallingConv::ID CalleeCC, 4632 bool isVarArg, 4633 const SmallVectorImpl<ISD::InputArg> &Ins, 4634 SelectionDAG& DAG) const { 4635 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4636 return false; 4637 4638 // Variable argument functions are not supported. 4639 if (isVarArg) 4640 return false; 4641 4642 MachineFunction &MF = DAG.getMachineFunction(); 4643 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4644 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4645 // Functions containing by val parameters are not supported. 4646 for (unsigned i = 0; i != Ins.size(); i++) { 4647 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4648 if (Flags.isByVal()) return false; 4649 } 4650 4651 // Non-PIC/GOT tail calls are supported. 4652 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4653 return true; 4654 4655 // At the moment we can only do local tail calls (in same module, hidden 4656 // or protected) if we are generating PIC. 4657 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4658 return G->getGlobal()->hasHiddenVisibility() 4659 || G->getGlobal()->hasProtectedVisibility(); 4660 } 4661 4662 return false; 4663 } 4664 4665 /// isCallCompatibleAddress - Return the immediate to use if the specified 4666 /// 32-bit value is representable in the immediate field of a BxA instruction. 4667 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4668 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4669 if (!C) return nullptr; 4670 4671 int Addr = C->getZExtValue(); 4672 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4673 SignExtend32<26>(Addr) != Addr) 4674 return nullptr; // Top 6 bits have to be sext of immediate. 4675 4676 return DAG 4677 .getConstant( 4678 (int)C->getZExtValue() >> 2, SDLoc(Op), 4679 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4680 .getNode(); 4681 } 4682 4683 namespace { 4684 4685 struct TailCallArgumentInfo { 4686 SDValue Arg; 4687 SDValue FrameIdxOp; 4688 int FrameIdx = 0; 4689 4690 TailCallArgumentInfo() = default; 4691 }; 4692 4693 } // end anonymous namespace 4694 4695 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4696 static void StoreTailCallArgumentsToStackSlot( 4697 SelectionDAG &DAG, SDValue Chain, 4698 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4699 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4700 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4701 SDValue Arg = TailCallArgs[i].Arg; 4702 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4703 int FI = TailCallArgs[i].FrameIdx; 4704 // Store relative to framepointer. 4705 MemOpChains.push_back(DAG.getStore( 4706 Chain, dl, Arg, FIN, 4707 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4708 } 4709 } 4710 4711 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4712 /// the appropriate stack slot for the tail call optimized function call. 4713 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4714 SDValue OldRetAddr, SDValue OldFP, 4715 int SPDiff, const SDLoc &dl) { 4716 if (SPDiff) { 4717 // Calculate the new stack slot for the return address. 4718 MachineFunction &MF = DAG.getMachineFunction(); 4719 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4720 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4721 bool isPPC64 = Subtarget.isPPC64(); 4722 int SlotSize = isPPC64 ? 8 : 4; 4723 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4724 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4725 NewRetAddrLoc, true); 4726 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4727 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4728 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4729 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4730 4731 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4732 // slot as the FP is never overwritten. 4733 if (Subtarget.isDarwinABI()) { 4734 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4735 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4736 true); 4737 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4738 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4739 MachinePointerInfo::getFixedStack( 4740 DAG.getMachineFunction(), NewFPIdx)); 4741 } 4742 } 4743 return Chain; 4744 } 4745 4746 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4747 /// the position of the argument. 4748 static void 4749 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4750 SDValue Arg, int SPDiff, unsigned ArgOffset, 4751 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4752 int Offset = ArgOffset + SPDiff; 4753 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4754 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4755 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4756 SDValue FIN = DAG.getFrameIndex(FI, VT); 4757 TailCallArgumentInfo Info; 4758 Info.Arg = Arg; 4759 Info.FrameIdxOp = FIN; 4760 Info.FrameIdx = FI; 4761 TailCallArguments.push_back(Info); 4762 } 4763 4764 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4765 /// stack slot. Returns the chain as result and the loaded frame pointers in 4766 /// LROpOut/FPOpout. Used when tail calling. 4767 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4768 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4769 SDValue &FPOpOut, const SDLoc &dl) const { 4770 if (SPDiff) { 4771 // Load the LR and FP stack slot for later adjusting. 4772 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4773 LROpOut = getReturnAddrFrameIndex(DAG); 4774 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4775 Chain = SDValue(LROpOut.getNode(), 1); 4776 4777 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4778 // slot as the FP is never overwritten. 4779 if (Subtarget.isDarwinABI()) { 4780 FPOpOut = getFramePointerFrameIndex(DAG); 4781 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4782 Chain = SDValue(FPOpOut.getNode(), 1); 4783 } 4784 } 4785 return Chain; 4786 } 4787 4788 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4789 /// by "Src" to address "Dst" of size "Size". Alignment information is 4790 /// specified by the specific parameter attribute. The copy will be passed as 4791 /// a byval function parameter. 4792 /// Sometimes what we are copying is the end of a larger object, the part that 4793 /// does not fit in registers. 4794 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4795 SDValue Chain, ISD::ArgFlagsTy Flags, 4796 SelectionDAG &DAG, const SDLoc &dl) { 4797 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4798 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4799 false, false, false, MachinePointerInfo(), 4800 MachinePointerInfo()); 4801 } 4802 4803 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4804 /// tail calls. 4805 static void LowerMemOpCallTo( 4806 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4807 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4808 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4809 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4810 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4811 if (!isTailCall) { 4812 if (isVector) { 4813 SDValue StackPtr; 4814 if (isPPC64) 4815 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4816 else 4817 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4818 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4819 DAG.getConstant(ArgOffset, dl, PtrVT)); 4820 } 4821 MemOpChains.push_back( 4822 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4823 // Calculate and remember argument location. 4824 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4825 TailCallArguments); 4826 } 4827 4828 static void 4829 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4830 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4831 SDValue FPOp, 4832 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4833 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4834 // might overwrite each other in case of tail call optimization. 4835 SmallVector<SDValue, 8> MemOpChains2; 4836 // Do not flag preceding copytoreg stuff together with the following stuff. 4837 InFlag = SDValue(); 4838 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4839 MemOpChains2, dl); 4840 if (!MemOpChains2.empty()) 4841 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4842 4843 // Store the return address to the appropriate stack slot. 4844 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4845 4846 // Emit callseq_end just before tailcall node. 4847 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4848 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4849 InFlag = Chain.getValue(1); 4850 } 4851 4852 // Is this global address that of a function that can be called by name? (as 4853 // opposed to something that must hold a descriptor for an indirect call). 4854 static bool isFunctionGlobalAddress(SDValue Callee) { 4855 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4856 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4857 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4858 return false; 4859 4860 return G->getGlobal()->getValueType()->isFunctionTy(); 4861 } 4862 4863 return false; 4864 } 4865 4866 static unsigned 4867 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4868 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4869 bool isPatchPoint, bool hasNest, 4870 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4871 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4872 ImmutableCallSite CS, const PPCSubtarget &Subtarget) { 4873 bool isPPC64 = Subtarget.isPPC64(); 4874 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4875 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4876 4877 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4878 NodeTys.push_back(MVT::Other); // Returns a chain 4879 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4880 4881 unsigned CallOpc = PPCISD::CALL; 4882 4883 bool needIndirectCall = true; 4884 if (!isSVR4ABI || !isPPC64) 4885 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4886 // If this is an absolute destination address, use the munged value. 4887 Callee = SDValue(Dest, 0); 4888 needIndirectCall = false; 4889 } 4890 4891 // PC-relative references to external symbols should go through $stub, unless 4892 // we're building with the leopard linker or later, which automatically 4893 // synthesizes these stubs. 4894 const TargetMachine &TM = DAG.getTarget(); 4895 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 4896 const GlobalValue *GV = nullptr; 4897 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4898 GV = G->getGlobal(); 4899 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4900 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4901 4902 if (isFunctionGlobalAddress(Callee)) { 4903 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4904 // A call to a TLS address is actually an indirect call to a 4905 // thread-specific pointer. 4906 unsigned OpFlags = 0; 4907 if (UsePlt) 4908 OpFlags = PPCII::MO_PLT; 4909 4910 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4911 // every direct call is) turn it into a TargetGlobalAddress / 4912 // TargetExternalSymbol node so that legalize doesn't hack it. 4913 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4914 Callee.getValueType(), 0, OpFlags); 4915 needIndirectCall = false; 4916 } 4917 4918 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4919 unsigned char OpFlags = 0; 4920 4921 if (UsePlt) 4922 OpFlags = PPCII::MO_PLT; 4923 4924 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4925 OpFlags); 4926 needIndirectCall = false; 4927 } 4928 4929 if (isPatchPoint) { 4930 // We'll form an invalid direct call when lowering a patchpoint; the full 4931 // sequence for an indirect call is complicated, and many of the 4932 // instructions introduced might have side effects (and, thus, can't be 4933 // removed later). The call itself will be removed as soon as the 4934 // argument/return lowering is complete, so the fact that it has the wrong 4935 // kind of operands should not really matter. 4936 needIndirectCall = false; 4937 } 4938 4939 if (needIndirectCall) { 4940 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4941 // to do the call, we can't use PPCISD::CALL. 4942 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4943 4944 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4945 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4946 // entry point, but to the function descriptor (the function entry point 4947 // address is part of the function descriptor though). 4948 // The function descriptor is a three doubleword structure with the 4949 // following fields: function entry point, TOC base address and 4950 // environment pointer. 4951 // Thus for a call through a function pointer, the following actions need 4952 // to be performed: 4953 // 1. Save the TOC of the caller in the TOC save area of its stack 4954 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4955 // 2. Load the address of the function entry point from the function 4956 // descriptor. 4957 // 3. Load the TOC of the callee from the function descriptor into r2. 4958 // 4. Load the environment pointer from the function descriptor into 4959 // r11. 4960 // 5. Branch to the function entry point address. 4961 // 6. On return of the callee, the TOC of the caller needs to be 4962 // restored (this is done in FinishCall()). 4963 // 4964 // The loads are scheduled at the beginning of the call sequence, and the 4965 // register copies are flagged together to ensure that no other 4966 // operations can be scheduled in between. E.g. without flagging the 4967 // copies together, a TOC access in the caller could be scheduled between 4968 // the assignment of the callee TOC and the branch to the callee, which 4969 // results in the TOC access going through the TOC of the callee instead 4970 // of going through the TOC of the caller, which leads to incorrect code. 4971 4972 // Load the address of the function entry point from the function 4973 // descriptor. 4974 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4975 if (LDChain.getValueType() == MVT::Glue) 4976 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4977 4978 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 4979 ? (MachineMemOperand::MODereferenceable | 4980 MachineMemOperand::MOInvariant) 4981 : MachineMemOperand::MONone; 4982 4983 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 4984 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4985 /* Alignment = */ 8, MMOFlags); 4986 4987 // Load environment pointer into r11. 4988 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4989 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4990 SDValue LoadEnvPtr = 4991 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 4992 /* Alignment = */ 8, MMOFlags); 4993 4994 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4995 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4996 SDValue TOCPtr = 4997 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 4998 /* Alignment = */ 8, MMOFlags); 4999 5000 setUsesTOCBasePtr(DAG); 5001 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 5002 InFlag); 5003 Chain = TOCVal.getValue(0); 5004 InFlag = TOCVal.getValue(1); 5005 5006 // If the function call has an explicit 'nest' parameter, it takes the 5007 // place of the environment pointer. 5008 if (!hasNest) { 5009 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 5010 InFlag); 5011 5012 Chain = EnvVal.getValue(0); 5013 InFlag = EnvVal.getValue(1); 5014 } 5015 5016 MTCTROps[0] = Chain; 5017 MTCTROps[1] = LoadFuncPtr; 5018 MTCTROps[2] = InFlag; 5019 } 5020 5021 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 5022 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 5023 InFlag = Chain.getValue(1); 5024 5025 NodeTys.clear(); 5026 NodeTys.push_back(MVT::Other); 5027 NodeTys.push_back(MVT::Glue); 5028 Ops.push_back(Chain); 5029 CallOpc = PPCISD::BCTRL; 5030 Callee.setNode(nullptr); 5031 // Add use of X11 (holding environment pointer) 5032 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 5033 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 5034 // Add CTR register as callee so a bctr can be emitted later. 5035 if (isTailCall) 5036 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 5037 } 5038 5039 // If this is a direct call, pass the chain and the callee. 5040 if (Callee.getNode()) { 5041 Ops.push_back(Chain); 5042 Ops.push_back(Callee); 5043 } 5044 // If this is a tail call add stack pointer delta. 5045 if (isTailCall) 5046 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5047 5048 // Add argument registers to the end of the list so that they are known live 5049 // into the call. 5050 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5051 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5052 RegsToPass[i].second.getValueType())); 5053 5054 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 5055 // into the call. 5056 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 5057 setUsesTOCBasePtr(DAG); 5058 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 5059 } 5060 5061 return CallOpc; 5062 } 5063 5064 SDValue PPCTargetLowering::LowerCallResult( 5065 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 5066 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5067 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 5068 SmallVector<CCValAssign, 16> RVLocs; 5069 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5070 *DAG.getContext()); 5071 5072 CCRetInfo.AnalyzeCallResult( 5073 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 5074 ? RetCC_PPC_Cold 5075 : RetCC_PPC); 5076 5077 // Copy all of the result registers out of their specified physreg. 5078 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 5079 CCValAssign &VA = RVLocs[i]; 5080 assert(VA.isRegLoc() && "Can only return in registers!"); 5081 5082 SDValue Val = DAG.getCopyFromReg(Chain, dl, 5083 VA.getLocReg(), VA.getLocVT(), InFlag); 5084 Chain = Val.getValue(1); 5085 InFlag = Val.getValue(2); 5086 5087 switch (VA.getLocInfo()) { 5088 default: llvm_unreachable("Unknown loc info!"); 5089 case CCValAssign::Full: break; 5090 case CCValAssign::AExt: 5091 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5092 break; 5093 case CCValAssign::ZExt: 5094 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 5095 DAG.getValueType(VA.getValVT())); 5096 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5097 break; 5098 case CCValAssign::SExt: 5099 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 5100 DAG.getValueType(VA.getValVT())); 5101 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5102 break; 5103 } 5104 5105 InVals.push_back(Val); 5106 } 5107 5108 return Chain; 5109 } 5110 5111 SDValue PPCTargetLowering::FinishCall( 5112 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 5113 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 5114 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 5115 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5116 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5117 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 5118 std::vector<EVT> NodeTys; 5119 SmallVector<SDValue, 8> Ops; 5120 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 5121 SPDiff, isTailCall, isPatchPoint, hasNest, 5122 RegsToPass, Ops, NodeTys, CS, Subtarget); 5123 5124 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5125 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 5126 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5127 5128 // When performing tail call optimization the callee pops its arguments off 5129 // the stack. Account for this here so these bytes can be pushed back on in 5130 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5131 int BytesCalleePops = 5132 (CallConv == CallingConv::Fast && 5133 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 5134 5135 // Add a register mask operand representing the call-preserved registers. 5136 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5137 const uint32_t *Mask = 5138 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 5139 assert(Mask && "Missing call preserved mask for calling convention"); 5140 Ops.push_back(DAG.getRegisterMask(Mask)); 5141 5142 if (InFlag.getNode()) 5143 Ops.push_back(InFlag); 5144 5145 // Emit tail call. 5146 if (isTailCall) { 5147 assert(((Callee.getOpcode() == ISD::Register && 5148 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5149 Callee.getOpcode() == ISD::TargetExternalSymbol || 5150 Callee.getOpcode() == ISD::TargetGlobalAddress || 5151 isa<ConstantSDNode>(Callee)) && 5152 "Expecting an global address, external symbol, absolute value or register"); 5153 5154 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5155 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 5156 } 5157 5158 // Add a NOP immediately after the branch instruction when using the 64-bit 5159 // SVR4 ABI. At link time, if caller and callee are in a different module and 5160 // thus have a different TOC, the call will be replaced with a call to a stub 5161 // function which saves the current TOC, loads the TOC of the callee and 5162 // branches to the callee. The NOP will be replaced with a load instruction 5163 // which restores the TOC of the caller from the TOC save slot of the current 5164 // stack frame. If caller and callee belong to the same module (and have the 5165 // same TOC), the NOP will remain unchanged. 5166 5167 MachineFunction &MF = DAG.getMachineFunction(); 5168 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 5169 !isPatchPoint) { 5170 if (CallOpc == PPCISD::BCTRL) { 5171 // This is a call through a function pointer. 5172 // Restore the caller TOC from the save area into R2. 5173 // See PrepareCall() for more information about calls through function 5174 // pointers in the 64-bit SVR4 ABI. 5175 // We are using a target-specific load with r2 hard coded, because the 5176 // result of a target-independent load would never go directly into r2, 5177 // since r2 is a reserved register (which prevents the register allocator 5178 // from allocating it), resulting in an additional register being 5179 // allocated and an unnecessary move instruction being generated. 5180 CallOpc = PPCISD::BCTRL_LOAD_TOC; 5181 5182 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5183 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 5184 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5185 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5186 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 5187 5188 // The address needs to go after the chain input but before the flag (or 5189 // any other variadic arguments). 5190 Ops.insert(std::next(Ops.begin()), AddTOC); 5191 } else if (CallOpc == PPCISD::CALL && 5192 !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) { 5193 // Otherwise insert NOP for non-local calls. 5194 CallOpc = PPCISD::CALL_NOP; 5195 } 5196 } 5197 5198 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 5199 InFlag = Chain.getValue(1); 5200 5201 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5202 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5203 InFlag, dl); 5204 if (!Ins.empty()) 5205 InFlag = Chain.getValue(1); 5206 5207 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 5208 Ins, dl, DAG, InVals); 5209 } 5210 5211 SDValue 5212 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5213 SmallVectorImpl<SDValue> &InVals) const { 5214 SelectionDAG &DAG = CLI.DAG; 5215 SDLoc &dl = CLI.DL; 5216 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5217 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5218 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5219 SDValue Chain = CLI.Chain; 5220 SDValue Callee = CLI.Callee; 5221 bool &isTailCall = CLI.IsTailCall; 5222 CallingConv::ID CallConv = CLI.CallConv; 5223 bool isVarArg = CLI.IsVarArg; 5224 bool isPatchPoint = CLI.IsPatchPoint; 5225 ImmutableCallSite CS = CLI.CS; 5226 5227 if (isTailCall) { 5228 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5229 isTailCall = false; 5230 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5231 isTailCall = 5232 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5233 isVarArg, Outs, Ins, DAG); 5234 else 5235 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5236 Ins, DAG); 5237 if (isTailCall) { 5238 ++NumTailCalls; 5239 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5240 ++NumSiblingCalls; 5241 5242 assert(isa<GlobalAddressSDNode>(Callee) && 5243 "Callee should be an llvm::Function object."); 5244 LLVM_DEBUG( 5245 const GlobalValue *GV = 5246 cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5247 const unsigned Width = 5248 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0"); 5249 dbgs() << "TCO caller: " 5250 << left_justify(DAG.getMachineFunction().getName(), Width) 5251 << ", callee linkage: " << GV->getVisibility() << ", " 5252 << GV->getLinkage() << "\n"); 5253 } 5254 } 5255 5256 if (!isTailCall && CS && CS.isMustTailCall()) 5257 report_fatal_error("failed to perform tail call elimination on a call " 5258 "site marked musttail"); 5259 5260 // When long calls (i.e. indirect calls) are always used, calls are always 5261 // made via function pointer. If we have a function name, first translate it 5262 // into a pointer. 5263 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5264 !isTailCall) 5265 Callee = LowerGlobalAddress(Callee, DAG); 5266 5267 if (Subtarget.isSVR4ABI()) { 5268 if (Subtarget.isPPC64()) 5269 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5270 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5271 dl, DAG, InVals, CS); 5272 else 5273 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5274 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5275 dl, DAG, InVals, CS); 5276 } 5277 5278 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5279 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5280 dl, DAG, InVals, CS); 5281 } 5282 5283 SDValue PPCTargetLowering::LowerCall_32SVR4( 5284 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5285 bool isTailCall, bool isPatchPoint, 5286 const SmallVectorImpl<ISD::OutputArg> &Outs, 5287 const SmallVectorImpl<SDValue> &OutVals, 5288 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5289 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5290 ImmutableCallSite CS) const { 5291 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5292 // of the 32-bit SVR4 ABI stack frame layout. 5293 5294 assert((CallConv == CallingConv::C || 5295 CallConv == CallingConv::Cold || 5296 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5297 5298 unsigned PtrByteSize = 4; 5299 5300 MachineFunction &MF = DAG.getMachineFunction(); 5301 5302 // Mark this function as potentially containing a function that contains a 5303 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5304 // and restoring the callers stack pointer in this functions epilog. This is 5305 // done because by tail calling the called function might overwrite the value 5306 // in this function's (MF) stack pointer stack slot 0(SP). 5307 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5308 CallConv == CallingConv::Fast) 5309 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5310 5311 // Count how many bytes are to be pushed on the stack, including the linkage 5312 // area, parameter list area and the part of the local variable space which 5313 // contains copies of aggregates which are passed by value. 5314 5315 // Assign locations to all of the outgoing arguments. 5316 SmallVector<CCValAssign, 16> ArgLocs; 5317 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5318 5319 // Reserve space for the linkage area on the stack. 5320 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5321 PtrByteSize); 5322 if (useSoftFloat()) 5323 CCInfo.PreAnalyzeCallOperands(Outs); 5324 5325 if (isVarArg) { 5326 // Handle fixed and variable vector arguments differently. 5327 // Fixed vector arguments go into registers as long as registers are 5328 // available. Variable vector arguments always go into memory. 5329 unsigned NumArgs = Outs.size(); 5330 5331 for (unsigned i = 0; i != NumArgs; ++i) { 5332 MVT ArgVT = Outs[i].VT; 5333 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5334 bool Result; 5335 5336 if (Outs[i].IsFixed) { 5337 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5338 CCInfo); 5339 } else { 5340 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5341 ArgFlags, CCInfo); 5342 } 5343 5344 if (Result) { 5345 #ifndef NDEBUG 5346 errs() << "Call operand #" << i << " has unhandled type " 5347 << EVT(ArgVT).getEVTString() << "\n"; 5348 #endif 5349 llvm_unreachable(nullptr); 5350 } 5351 } 5352 } else { 5353 // All arguments are treated the same. 5354 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5355 } 5356 CCInfo.clearWasPPCF128(); 5357 5358 // Assign locations to all of the outgoing aggregate by value arguments. 5359 SmallVector<CCValAssign, 16> ByValArgLocs; 5360 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5361 5362 // Reserve stack space for the allocations in CCInfo. 5363 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5364 5365 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5366 5367 // Size of the linkage area, parameter list area and the part of the local 5368 // space variable where copies of aggregates which are passed by value are 5369 // stored. 5370 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5371 5372 // Calculate by how many bytes the stack has to be adjusted in case of tail 5373 // call optimization. 5374 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5375 5376 // Adjust the stack pointer for the new arguments... 5377 // These operations are automatically eliminated by the prolog/epilog pass 5378 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5379 SDValue CallSeqStart = Chain; 5380 5381 // Load the return address and frame pointer so it can be moved somewhere else 5382 // later. 5383 SDValue LROp, FPOp; 5384 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5385 5386 // Set up a copy of the stack pointer for use loading and storing any 5387 // arguments that may not fit in the registers available for argument 5388 // passing. 5389 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5390 5391 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5392 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5393 SmallVector<SDValue, 8> MemOpChains; 5394 5395 bool seenFloatArg = false; 5396 // Walk the register/memloc assignments, inserting copies/loads. 5397 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 5398 i != e; 5399 ++i) { 5400 CCValAssign &VA = ArgLocs[i]; 5401 SDValue Arg = OutVals[i]; 5402 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5403 5404 if (Flags.isByVal()) { 5405 // Argument is an aggregate which is passed by value, thus we need to 5406 // create a copy of it in the local variable space of the current stack 5407 // frame (which is the stack frame of the caller) and pass the address of 5408 // this copy to the callee. 5409 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5410 CCValAssign &ByValVA = ByValArgLocs[j++]; 5411 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5412 5413 // Memory reserved in the local variable space of the callers stack frame. 5414 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5415 5416 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5417 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5418 StackPtr, PtrOff); 5419 5420 // Create a copy of the argument in the local area of the current 5421 // stack frame. 5422 SDValue MemcpyCall = 5423 CreateCopyOfByValArgument(Arg, PtrOff, 5424 CallSeqStart.getNode()->getOperand(0), 5425 Flags, DAG, dl); 5426 5427 // This must go outside the CALLSEQ_START..END. 5428 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5429 SDLoc(MemcpyCall)); 5430 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5431 NewCallSeqStart.getNode()); 5432 Chain = CallSeqStart = NewCallSeqStart; 5433 5434 // Pass the address of the aggregate copy on the stack either in a 5435 // physical register or in the parameter list area of the current stack 5436 // frame to the callee. 5437 Arg = PtrOff; 5438 } 5439 5440 if (VA.isRegLoc()) { 5441 if (Arg.getValueType() == MVT::i1) 5442 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 5443 5444 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5445 // Put argument in a physical register. 5446 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5447 } else { 5448 // Put argument in the parameter list area of the current stack frame. 5449 assert(VA.isMemLoc()); 5450 unsigned LocMemOffset = VA.getLocMemOffset(); 5451 5452 if (!isTailCall) { 5453 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5454 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5455 StackPtr, PtrOff); 5456 5457 MemOpChains.push_back( 5458 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5459 } else { 5460 // Calculate and remember argument location. 5461 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5462 TailCallArguments); 5463 } 5464 } 5465 } 5466 5467 if (!MemOpChains.empty()) 5468 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5469 5470 // Build a sequence of copy-to-reg nodes chained together with token chain 5471 // and flag operands which copy the outgoing args into the appropriate regs. 5472 SDValue InFlag; 5473 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5474 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5475 RegsToPass[i].second, InFlag); 5476 InFlag = Chain.getValue(1); 5477 } 5478 5479 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5480 // registers. 5481 if (isVarArg) { 5482 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5483 SDValue Ops[] = { Chain, InFlag }; 5484 5485 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5486 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5487 5488 InFlag = Chain.getValue(1); 5489 } 5490 5491 if (isTailCall) 5492 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5493 TailCallArguments); 5494 5495 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5496 /* unused except on PPC64 ELFv1 */ false, DAG, 5497 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5498 NumBytes, Ins, InVals, CS); 5499 } 5500 5501 // Copy an argument into memory, being careful to do this outside the 5502 // call sequence for the call to which the argument belongs. 5503 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5504 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5505 SelectionDAG &DAG, const SDLoc &dl) const { 5506 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5507 CallSeqStart.getNode()->getOperand(0), 5508 Flags, DAG, dl); 5509 // The MEMCPY must go outside the CALLSEQ_START..END. 5510 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5511 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5512 SDLoc(MemcpyCall)); 5513 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5514 NewCallSeqStart.getNode()); 5515 return NewCallSeqStart; 5516 } 5517 5518 SDValue PPCTargetLowering::LowerCall_64SVR4( 5519 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5520 bool isTailCall, bool isPatchPoint, 5521 const SmallVectorImpl<ISD::OutputArg> &Outs, 5522 const SmallVectorImpl<SDValue> &OutVals, 5523 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5524 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5525 ImmutableCallSite CS) const { 5526 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5527 bool isLittleEndian = Subtarget.isLittleEndian(); 5528 unsigned NumOps = Outs.size(); 5529 bool hasNest = false; 5530 bool IsSibCall = false; 5531 5532 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5533 unsigned PtrByteSize = 8; 5534 5535 MachineFunction &MF = DAG.getMachineFunction(); 5536 5537 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5538 IsSibCall = true; 5539 5540 // Mark this function as potentially containing a function that contains a 5541 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5542 // and restoring the callers stack pointer in this functions epilog. This is 5543 // done because by tail calling the called function might overwrite the value 5544 // in this function's (MF) stack pointer stack slot 0(SP). 5545 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5546 CallConv == CallingConv::Fast) 5547 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5548 5549 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5550 "fastcc not supported on varargs functions"); 5551 5552 // Count how many bytes are to be pushed on the stack, including the linkage 5553 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5554 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5555 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5556 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5557 unsigned NumBytes = LinkageSize; 5558 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5559 unsigned &QFPR_idx = FPR_idx; 5560 5561 static const MCPhysReg GPR[] = { 5562 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5563 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5564 }; 5565 static const MCPhysReg VR[] = { 5566 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5567 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5568 }; 5569 5570 const unsigned NumGPRs = array_lengthof(GPR); 5571 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5572 const unsigned NumVRs = array_lengthof(VR); 5573 const unsigned NumQFPRs = NumFPRs; 5574 5575 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5576 // can be passed to the callee in registers. 5577 // For the fast calling convention, there is another check below. 5578 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5579 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5580 if (!HasParameterArea) { 5581 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5582 unsigned AvailableFPRs = NumFPRs; 5583 unsigned AvailableVRs = NumVRs; 5584 unsigned NumBytesTmp = NumBytes; 5585 for (unsigned i = 0; i != NumOps; ++i) { 5586 if (Outs[i].Flags.isNest()) continue; 5587 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5588 PtrByteSize, LinkageSize, ParamAreaSize, 5589 NumBytesTmp, AvailableFPRs, AvailableVRs, 5590 Subtarget.hasQPX())) 5591 HasParameterArea = true; 5592 } 5593 } 5594 5595 // When using the fast calling convention, we don't provide backing for 5596 // arguments that will be in registers. 5597 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5598 5599 // Avoid allocating parameter area for fastcc functions if all the arguments 5600 // can be passed in the registers. 5601 if (CallConv == CallingConv::Fast) 5602 HasParameterArea = false; 5603 5604 // Add up all the space actually used. 5605 for (unsigned i = 0; i != NumOps; ++i) { 5606 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5607 EVT ArgVT = Outs[i].VT; 5608 EVT OrigVT = Outs[i].ArgVT; 5609 5610 if (Flags.isNest()) 5611 continue; 5612 5613 if (CallConv == CallingConv::Fast) { 5614 if (Flags.isByVal()) { 5615 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5616 if (NumGPRsUsed > NumGPRs) 5617 HasParameterArea = true; 5618 } else { 5619 switch (ArgVT.getSimpleVT().SimpleTy) { 5620 default: llvm_unreachable("Unexpected ValueType for argument!"); 5621 case MVT::i1: 5622 case MVT::i32: 5623 case MVT::i64: 5624 if (++NumGPRsUsed <= NumGPRs) 5625 continue; 5626 break; 5627 case MVT::v4i32: 5628 case MVT::v8i16: 5629 case MVT::v16i8: 5630 case MVT::v2f64: 5631 case MVT::v2i64: 5632 case MVT::v1i128: 5633 case MVT::f128: 5634 if (++NumVRsUsed <= NumVRs) 5635 continue; 5636 break; 5637 case MVT::v4f32: 5638 // When using QPX, this is handled like a FP register, otherwise, it 5639 // is an Altivec register. 5640 if (Subtarget.hasQPX()) { 5641 if (++NumFPRsUsed <= NumFPRs) 5642 continue; 5643 } else { 5644 if (++NumVRsUsed <= NumVRs) 5645 continue; 5646 } 5647 break; 5648 case MVT::f32: 5649 case MVT::f64: 5650 case MVT::v4f64: // QPX 5651 case MVT::v4i1: // QPX 5652 if (++NumFPRsUsed <= NumFPRs) 5653 continue; 5654 break; 5655 } 5656 HasParameterArea = true; 5657 } 5658 } 5659 5660 /* Respect alignment of argument on the stack. */ 5661 unsigned Align = 5662 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5663 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5664 5665 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5666 if (Flags.isInConsecutiveRegsLast()) 5667 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5668 } 5669 5670 unsigned NumBytesActuallyUsed = NumBytes; 5671 5672 // In the old ELFv1 ABI, 5673 // the prolog code of the callee may store up to 8 GPR argument registers to 5674 // the stack, allowing va_start to index over them in memory if its varargs. 5675 // Because we cannot tell if this is needed on the caller side, we have to 5676 // conservatively assume that it is needed. As such, make sure we have at 5677 // least enough stack space for the caller to store the 8 GPRs. 5678 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5679 // really requires memory operands, e.g. a vararg function. 5680 if (HasParameterArea) 5681 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5682 else 5683 NumBytes = LinkageSize; 5684 5685 // Tail call needs the stack to be aligned. 5686 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5687 CallConv == CallingConv::Fast) 5688 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5689 5690 int SPDiff = 0; 5691 5692 // Calculate by how many bytes the stack has to be adjusted in case of tail 5693 // call optimization. 5694 if (!IsSibCall) 5695 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5696 5697 // To protect arguments on the stack from being clobbered in a tail call, 5698 // force all the loads to happen before doing any other lowering. 5699 if (isTailCall) 5700 Chain = DAG.getStackArgumentTokenFactor(Chain); 5701 5702 // Adjust the stack pointer for the new arguments... 5703 // These operations are automatically eliminated by the prolog/epilog pass 5704 if (!IsSibCall) 5705 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5706 SDValue CallSeqStart = Chain; 5707 5708 // Load the return address and frame pointer so it can be move somewhere else 5709 // later. 5710 SDValue LROp, FPOp; 5711 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5712 5713 // Set up a copy of the stack pointer for use loading and storing any 5714 // arguments that may not fit in the registers available for argument 5715 // passing. 5716 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5717 5718 // Figure out which arguments are going to go in registers, and which in 5719 // memory. Also, if this is a vararg function, floating point operations 5720 // must be stored to our stack, and loaded into integer regs as well, if 5721 // any integer regs are available for argument passing. 5722 unsigned ArgOffset = LinkageSize; 5723 5724 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5725 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5726 5727 SmallVector<SDValue, 8> MemOpChains; 5728 for (unsigned i = 0; i != NumOps; ++i) { 5729 SDValue Arg = OutVals[i]; 5730 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5731 EVT ArgVT = Outs[i].VT; 5732 EVT OrigVT = Outs[i].ArgVT; 5733 5734 // PtrOff will be used to store the current argument to the stack if a 5735 // register cannot be found for it. 5736 SDValue PtrOff; 5737 5738 // We re-align the argument offset for each argument, except when using the 5739 // fast calling convention, when we need to make sure we do that only when 5740 // we'll actually use a stack slot. 5741 auto ComputePtrOff = [&]() { 5742 /* Respect alignment of argument on the stack. */ 5743 unsigned Align = 5744 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5745 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5746 5747 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5748 5749 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5750 }; 5751 5752 if (CallConv != CallingConv::Fast) { 5753 ComputePtrOff(); 5754 5755 /* Compute GPR index associated with argument offset. */ 5756 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5757 GPR_idx = std::min(GPR_idx, NumGPRs); 5758 } 5759 5760 // Promote integers to 64-bit values. 5761 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5762 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5763 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5764 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5765 } 5766 5767 // FIXME memcpy is used way more than necessary. Correctness first. 5768 // Note: "by value" is code for passing a structure by value, not 5769 // basic types. 5770 if (Flags.isByVal()) { 5771 // Note: Size includes alignment padding, so 5772 // struct x { short a; char b; } 5773 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5774 // These are the proper values we need for right-justifying the 5775 // aggregate in a parameter register. 5776 unsigned Size = Flags.getByValSize(); 5777 5778 // An empty aggregate parameter takes up no storage and no 5779 // registers. 5780 if (Size == 0) 5781 continue; 5782 5783 if (CallConv == CallingConv::Fast) 5784 ComputePtrOff(); 5785 5786 // All aggregates smaller than 8 bytes must be passed right-justified. 5787 if (Size==1 || Size==2 || Size==4) { 5788 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5789 if (GPR_idx != NumGPRs) { 5790 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5791 MachinePointerInfo(), VT); 5792 MemOpChains.push_back(Load.getValue(1)); 5793 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5794 5795 ArgOffset += PtrByteSize; 5796 continue; 5797 } 5798 } 5799 5800 if (GPR_idx == NumGPRs && Size < 8) { 5801 SDValue AddPtr = PtrOff; 5802 if (!isLittleEndian) { 5803 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5804 PtrOff.getValueType()); 5805 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5806 } 5807 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5808 CallSeqStart, 5809 Flags, DAG, dl); 5810 ArgOffset += PtrByteSize; 5811 continue; 5812 } 5813 // Copy entire object into memory. There are cases where gcc-generated 5814 // code assumes it is there, even if it could be put entirely into 5815 // registers. (This is not what the doc says.) 5816 5817 // FIXME: The above statement is likely due to a misunderstanding of the 5818 // documents. All arguments must be copied into the parameter area BY 5819 // THE CALLEE in the event that the callee takes the address of any 5820 // formal argument. That has not yet been implemented. However, it is 5821 // reasonable to use the stack area as a staging area for the register 5822 // load. 5823 5824 // Skip this for small aggregates, as we will use the same slot for a 5825 // right-justified copy, below. 5826 if (Size >= 8) 5827 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5828 CallSeqStart, 5829 Flags, DAG, dl); 5830 5831 // When a register is available, pass a small aggregate right-justified. 5832 if (Size < 8 && GPR_idx != NumGPRs) { 5833 // The easiest way to get this right-justified in a register 5834 // is to copy the structure into the rightmost portion of a 5835 // local variable slot, then load the whole slot into the 5836 // register. 5837 // FIXME: The memcpy seems to produce pretty awful code for 5838 // small aggregates, particularly for packed ones. 5839 // FIXME: It would be preferable to use the slot in the 5840 // parameter save area instead of a new local variable. 5841 SDValue AddPtr = PtrOff; 5842 if (!isLittleEndian) { 5843 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5844 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5845 } 5846 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5847 CallSeqStart, 5848 Flags, DAG, dl); 5849 5850 // Load the slot into the register. 5851 SDValue Load = 5852 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5853 MemOpChains.push_back(Load.getValue(1)); 5854 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5855 5856 // Done with this argument. 5857 ArgOffset += PtrByteSize; 5858 continue; 5859 } 5860 5861 // For aggregates larger than PtrByteSize, copy the pieces of the 5862 // object that fit into registers from the parameter save area. 5863 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5864 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5865 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5866 if (GPR_idx != NumGPRs) { 5867 SDValue Load = 5868 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5869 MemOpChains.push_back(Load.getValue(1)); 5870 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5871 ArgOffset += PtrByteSize; 5872 } else { 5873 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5874 break; 5875 } 5876 } 5877 continue; 5878 } 5879 5880 switch (Arg.getSimpleValueType().SimpleTy) { 5881 default: llvm_unreachable("Unexpected ValueType for argument!"); 5882 case MVT::i1: 5883 case MVT::i32: 5884 case MVT::i64: 5885 if (Flags.isNest()) { 5886 // The 'nest' parameter, if any, is passed in R11. 5887 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5888 hasNest = true; 5889 break; 5890 } 5891 5892 // These can be scalar arguments or elements of an integer array type 5893 // passed directly. Clang may use those instead of "byval" aggregate 5894 // types to avoid forcing arguments to memory unnecessarily. 5895 if (GPR_idx != NumGPRs) { 5896 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5897 } else { 5898 if (CallConv == CallingConv::Fast) 5899 ComputePtrOff(); 5900 5901 assert(HasParameterArea && 5902 "Parameter area must exist to pass an argument in memory."); 5903 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5904 true, isTailCall, false, MemOpChains, 5905 TailCallArguments, dl); 5906 if (CallConv == CallingConv::Fast) 5907 ArgOffset += PtrByteSize; 5908 } 5909 if (CallConv != CallingConv::Fast) 5910 ArgOffset += PtrByteSize; 5911 break; 5912 case MVT::f32: 5913 case MVT::f64: { 5914 // These can be scalar arguments or elements of a float array type 5915 // passed directly. The latter are used to implement ELFv2 homogenous 5916 // float aggregates. 5917 5918 // Named arguments go into FPRs first, and once they overflow, the 5919 // remaining arguments go into GPRs and then the parameter save area. 5920 // Unnamed arguments for vararg functions always go to GPRs and 5921 // then the parameter save area. For now, put all arguments to vararg 5922 // routines always in both locations (FPR *and* GPR or stack slot). 5923 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5924 bool NeededLoad = false; 5925 5926 // First load the argument into the next available FPR. 5927 if (FPR_idx != NumFPRs) 5928 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5929 5930 // Next, load the argument into GPR or stack slot if needed. 5931 if (!NeedGPROrStack) 5932 ; 5933 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5934 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5935 // once we support fp <-> gpr moves. 5936 5937 // In the non-vararg case, this can only ever happen in the 5938 // presence of f32 array types, since otherwise we never run 5939 // out of FPRs before running out of GPRs. 5940 SDValue ArgVal; 5941 5942 // Double values are always passed in a single GPR. 5943 if (Arg.getValueType() != MVT::f32) { 5944 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5945 5946 // Non-array float values are extended and passed in a GPR. 5947 } else if (!Flags.isInConsecutiveRegs()) { 5948 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5949 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5950 5951 // If we have an array of floats, we collect every odd element 5952 // together with its predecessor into one GPR. 5953 } else if (ArgOffset % PtrByteSize != 0) { 5954 SDValue Lo, Hi; 5955 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5956 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5957 if (!isLittleEndian) 5958 std::swap(Lo, Hi); 5959 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5960 5961 // The final element, if even, goes into the first half of a GPR. 5962 } else if (Flags.isInConsecutiveRegsLast()) { 5963 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5964 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5965 if (!isLittleEndian) 5966 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5967 DAG.getConstant(32, dl, MVT::i32)); 5968 5969 // Non-final even elements are skipped; they will be handled 5970 // together the with subsequent argument on the next go-around. 5971 } else 5972 ArgVal = SDValue(); 5973 5974 if (ArgVal.getNode()) 5975 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5976 } else { 5977 if (CallConv == CallingConv::Fast) 5978 ComputePtrOff(); 5979 5980 // Single-precision floating-point values are mapped to the 5981 // second (rightmost) word of the stack doubleword. 5982 if (Arg.getValueType() == MVT::f32 && 5983 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5984 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5985 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5986 } 5987 5988 assert(HasParameterArea && 5989 "Parameter area must exist to pass an argument in memory."); 5990 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5991 true, isTailCall, false, MemOpChains, 5992 TailCallArguments, dl); 5993 5994 NeededLoad = true; 5995 } 5996 // When passing an array of floats, the array occupies consecutive 5997 // space in the argument area; only round up to the next doubleword 5998 // at the end of the array. Otherwise, each float takes 8 bytes. 5999 if (CallConv != CallingConv::Fast || NeededLoad) { 6000 ArgOffset += (Arg.getValueType() == MVT::f32 && 6001 Flags.isInConsecutiveRegs()) ? 4 : 8; 6002 if (Flags.isInConsecutiveRegsLast()) 6003 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6004 } 6005 break; 6006 } 6007 case MVT::v4f32: 6008 case MVT::v4i32: 6009 case MVT::v8i16: 6010 case MVT::v16i8: 6011 case MVT::v2f64: 6012 case MVT::v2i64: 6013 case MVT::v1i128: 6014 case MVT::f128: 6015 if (!Subtarget.hasQPX()) { 6016 // These can be scalar arguments or elements of a vector array type 6017 // passed directly. The latter are used to implement ELFv2 homogenous 6018 // vector aggregates. 6019 6020 // For a varargs call, named arguments go into VRs or on the stack as 6021 // usual; unnamed arguments always go to the stack or the corresponding 6022 // GPRs when within range. For now, we always put the value in both 6023 // locations (or even all three). 6024 if (isVarArg) { 6025 assert(HasParameterArea && 6026 "Parameter area must exist if we have a varargs call."); 6027 // We could elide this store in the case where the object fits 6028 // entirely in R registers. Maybe later. 6029 SDValue Store = 6030 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6031 MemOpChains.push_back(Store); 6032 if (VR_idx != NumVRs) { 6033 SDValue Load = 6034 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6035 MemOpChains.push_back(Load.getValue(1)); 6036 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6037 } 6038 ArgOffset += 16; 6039 for (unsigned i=0; i<16; i+=PtrByteSize) { 6040 if (GPR_idx == NumGPRs) 6041 break; 6042 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6043 DAG.getConstant(i, dl, PtrVT)); 6044 SDValue Load = 6045 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6046 MemOpChains.push_back(Load.getValue(1)); 6047 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6048 } 6049 break; 6050 } 6051 6052 // Non-varargs Altivec params go into VRs or on the stack. 6053 if (VR_idx != NumVRs) { 6054 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6055 } else { 6056 if (CallConv == CallingConv::Fast) 6057 ComputePtrOff(); 6058 6059 assert(HasParameterArea && 6060 "Parameter area must exist to pass an argument in memory."); 6061 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6062 true, isTailCall, true, MemOpChains, 6063 TailCallArguments, dl); 6064 if (CallConv == CallingConv::Fast) 6065 ArgOffset += 16; 6066 } 6067 6068 if (CallConv != CallingConv::Fast) 6069 ArgOffset += 16; 6070 break; 6071 } // not QPX 6072 6073 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 6074 "Invalid QPX parameter type"); 6075 6076 /* fall through */ 6077 case MVT::v4f64: 6078 case MVT::v4i1: { 6079 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 6080 if (isVarArg) { 6081 assert(HasParameterArea && 6082 "Parameter area must exist if we have a varargs call."); 6083 // We could elide this store in the case where the object fits 6084 // entirely in R registers. Maybe later. 6085 SDValue Store = 6086 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6087 MemOpChains.push_back(Store); 6088 if (QFPR_idx != NumQFPRs) { 6089 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 6090 PtrOff, MachinePointerInfo()); 6091 MemOpChains.push_back(Load.getValue(1)); 6092 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 6093 } 6094 ArgOffset += (IsF32 ? 16 : 32); 6095 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 6096 if (GPR_idx == NumGPRs) 6097 break; 6098 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6099 DAG.getConstant(i, dl, PtrVT)); 6100 SDValue Load = 6101 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6102 MemOpChains.push_back(Load.getValue(1)); 6103 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6104 } 6105 break; 6106 } 6107 6108 // Non-varargs QPX params go into registers or on the stack. 6109 if (QFPR_idx != NumQFPRs) { 6110 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 6111 } else { 6112 if (CallConv == CallingConv::Fast) 6113 ComputePtrOff(); 6114 6115 assert(HasParameterArea && 6116 "Parameter area must exist to pass an argument in memory."); 6117 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6118 true, isTailCall, true, MemOpChains, 6119 TailCallArguments, dl); 6120 if (CallConv == CallingConv::Fast) 6121 ArgOffset += (IsF32 ? 16 : 32); 6122 } 6123 6124 if (CallConv != CallingConv::Fast) 6125 ArgOffset += (IsF32 ? 16 : 32); 6126 break; 6127 } 6128 } 6129 } 6130 6131 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6132 "mismatch in size of parameter area"); 6133 (void)NumBytesActuallyUsed; 6134 6135 if (!MemOpChains.empty()) 6136 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6137 6138 // Check if this is an indirect call (MTCTR/BCTRL). 6139 // See PrepareCall() for more information about calls through function 6140 // pointers in the 64-bit SVR4 ABI. 6141 if (!isTailCall && !isPatchPoint && 6142 !isFunctionGlobalAddress(Callee) && 6143 !isa<ExternalSymbolSDNode>(Callee)) { 6144 // Load r2 into a virtual register and store it to the TOC save area. 6145 setUsesTOCBasePtr(DAG); 6146 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6147 // TOC save area offset. 6148 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6149 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6150 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6151 Chain = DAG.getStore( 6152 Val.getValue(1), dl, Val, AddPtr, 6153 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 6154 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6155 // This does not mean the MTCTR instruction must use R12; it's easier 6156 // to model this as an extra parameter, so do that. 6157 if (isELFv2ABI && !isPatchPoint) 6158 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6159 } 6160 6161 // Build a sequence of copy-to-reg nodes chained together with token chain 6162 // and flag operands which copy the outgoing args into the appropriate regs. 6163 SDValue InFlag; 6164 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6165 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6166 RegsToPass[i].second, InFlag); 6167 InFlag = Chain.getValue(1); 6168 } 6169 6170 if (isTailCall && !IsSibCall) 6171 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6172 TailCallArguments); 6173 6174 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 6175 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 6176 SPDiff, NumBytes, Ins, InVals, CS); 6177 } 6178 6179 SDValue PPCTargetLowering::LowerCall_Darwin( 6180 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6181 bool isTailCall, bool isPatchPoint, 6182 const SmallVectorImpl<ISD::OutputArg> &Outs, 6183 const SmallVectorImpl<SDValue> &OutVals, 6184 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6185 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6186 ImmutableCallSite CS) const { 6187 unsigned NumOps = Outs.size(); 6188 6189 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6190 bool isPPC64 = PtrVT == MVT::i64; 6191 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6192 6193 MachineFunction &MF = DAG.getMachineFunction(); 6194 6195 // Mark this function as potentially containing a function that contains a 6196 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6197 // and restoring the callers stack pointer in this functions epilog. This is 6198 // done because by tail calling the called function might overwrite the value 6199 // in this function's (MF) stack pointer stack slot 0(SP). 6200 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6201 CallConv == CallingConv::Fast) 6202 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6203 6204 // Count how many bytes are to be pushed on the stack, including the linkage 6205 // area, and parameter passing area. We start with 24/48 bytes, which is 6206 // prereserved space for [SP][CR][LR][3 x unused]. 6207 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6208 unsigned NumBytes = LinkageSize; 6209 6210 // Add up all the space actually used. 6211 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6212 // they all go in registers, but we must reserve stack space for them for 6213 // possible use by the caller. In varargs or 64-bit calls, parameters are 6214 // assigned stack space in order, with padding so Altivec parameters are 6215 // 16-byte aligned. 6216 unsigned nAltivecParamsAtEnd = 0; 6217 for (unsigned i = 0; i != NumOps; ++i) { 6218 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6219 EVT ArgVT = Outs[i].VT; 6220 // Varargs Altivec parameters are padded to a 16 byte boundary. 6221 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6222 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6223 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6224 if (!isVarArg && !isPPC64) { 6225 // Non-varargs Altivec parameters go after all the non-Altivec 6226 // parameters; handle those later so we know how much padding we need. 6227 nAltivecParamsAtEnd++; 6228 continue; 6229 } 6230 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6231 NumBytes = ((NumBytes+15)/16)*16; 6232 } 6233 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6234 } 6235 6236 // Allow for Altivec parameters at the end, if needed. 6237 if (nAltivecParamsAtEnd) { 6238 NumBytes = ((NumBytes+15)/16)*16; 6239 NumBytes += 16*nAltivecParamsAtEnd; 6240 } 6241 6242 // The prolog code of the callee may store up to 8 GPR argument registers to 6243 // the stack, allowing va_start to index over them in memory if its varargs. 6244 // Because we cannot tell if this is needed on the caller side, we have to 6245 // conservatively assume that it is needed. As such, make sure we have at 6246 // least enough stack space for the caller to store the 8 GPRs. 6247 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6248 6249 // Tail call needs the stack to be aligned. 6250 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6251 CallConv == CallingConv::Fast) 6252 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6253 6254 // Calculate by how many bytes the stack has to be adjusted in case of tail 6255 // call optimization. 6256 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6257 6258 // To protect arguments on the stack from being clobbered in a tail call, 6259 // force all the loads to happen before doing any other lowering. 6260 if (isTailCall) 6261 Chain = DAG.getStackArgumentTokenFactor(Chain); 6262 6263 // Adjust the stack pointer for the new arguments... 6264 // These operations are automatically eliminated by the prolog/epilog pass 6265 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6266 SDValue CallSeqStart = Chain; 6267 6268 // Load the return address and frame pointer so it can be move somewhere else 6269 // later. 6270 SDValue LROp, FPOp; 6271 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6272 6273 // Set up a copy of the stack pointer for use loading and storing any 6274 // arguments that may not fit in the registers available for argument 6275 // passing. 6276 SDValue StackPtr; 6277 if (isPPC64) 6278 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6279 else 6280 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6281 6282 // Figure out which arguments are going to go in registers, and which in 6283 // memory. Also, if this is a vararg function, floating point operations 6284 // must be stored to our stack, and loaded into integer regs as well, if 6285 // any integer regs are available for argument passing. 6286 unsigned ArgOffset = LinkageSize; 6287 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6288 6289 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6290 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6291 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6292 }; 6293 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6294 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6295 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6296 }; 6297 static const MCPhysReg VR[] = { 6298 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6299 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6300 }; 6301 const unsigned NumGPRs = array_lengthof(GPR_32); 6302 const unsigned NumFPRs = 13; 6303 const unsigned NumVRs = array_lengthof(VR); 6304 6305 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6306 6307 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6308 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6309 6310 SmallVector<SDValue, 8> MemOpChains; 6311 for (unsigned i = 0; i != NumOps; ++i) { 6312 SDValue Arg = OutVals[i]; 6313 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6314 6315 // PtrOff will be used to store the current argument to the stack if a 6316 // register cannot be found for it. 6317 SDValue PtrOff; 6318 6319 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6320 6321 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6322 6323 // On PPC64, promote integers to 64-bit values. 6324 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6325 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6326 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6327 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6328 } 6329 6330 // FIXME memcpy is used way more than necessary. Correctness first. 6331 // Note: "by value" is code for passing a structure by value, not 6332 // basic types. 6333 if (Flags.isByVal()) { 6334 unsigned Size = Flags.getByValSize(); 6335 // Very small objects are passed right-justified. Everything else is 6336 // passed left-justified. 6337 if (Size==1 || Size==2) { 6338 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6339 if (GPR_idx != NumGPRs) { 6340 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6341 MachinePointerInfo(), VT); 6342 MemOpChains.push_back(Load.getValue(1)); 6343 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6344 6345 ArgOffset += PtrByteSize; 6346 } else { 6347 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6348 PtrOff.getValueType()); 6349 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6350 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6351 CallSeqStart, 6352 Flags, DAG, dl); 6353 ArgOffset += PtrByteSize; 6354 } 6355 continue; 6356 } 6357 // Copy entire object into memory. There are cases where gcc-generated 6358 // code assumes it is there, even if it could be put entirely into 6359 // registers. (This is not what the doc says.) 6360 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6361 CallSeqStart, 6362 Flags, DAG, dl); 6363 6364 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6365 // copy the pieces of the object that fit into registers from the 6366 // parameter save area. 6367 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6368 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6369 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6370 if (GPR_idx != NumGPRs) { 6371 SDValue Load = 6372 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6373 MemOpChains.push_back(Load.getValue(1)); 6374 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6375 ArgOffset += PtrByteSize; 6376 } else { 6377 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6378 break; 6379 } 6380 } 6381 continue; 6382 } 6383 6384 switch (Arg.getSimpleValueType().SimpleTy) { 6385 default: llvm_unreachable("Unexpected ValueType for argument!"); 6386 case MVT::i1: 6387 case MVT::i32: 6388 case MVT::i64: 6389 if (GPR_idx != NumGPRs) { 6390 if (Arg.getValueType() == MVT::i1) 6391 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6392 6393 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6394 } else { 6395 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6396 isPPC64, isTailCall, false, MemOpChains, 6397 TailCallArguments, dl); 6398 } 6399 ArgOffset += PtrByteSize; 6400 break; 6401 case MVT::f32: 6402 case MVT::f64: 6403 if (FPR_idx != NumFPRs) { 6404 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6405 6406 if (isVarArg) { 6407 SDValue Store = 6408 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6409 MemOpChains.push_back(Store); 6410 6411 // Float varargs are always shadowed in available integer registers 6412 if (GPR_idx != NumGPRs) { 6413 SDValue Load = 6414 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6415 MemOpChains.push_back(Load.getValue(1)); 6416 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6417 } 6418 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6419 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6420 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6421 SDValue Load = 6422 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6423 MemOpChains.push_back(Load.getValue(1)); 6424 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6425 } 6426 } else { 6427 // If we have any FPRs remaining, we may also have GPRs remaining. 6428 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6429 // GPRs. 6430 if (GPR_idx != NumGPRs) 6431 ++GPR_idx; 6432 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6433 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6434 ++GPR_idx; 6435 } 6436 } else 6437 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6438 isPPC64, isTailCall, false, MemOpChains, 6439 TailCallArguments, dl); 6440 if (isPPC64) 6441 ArgOffset += 8; 6442 else 6443 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6444 break; 6445 case MVT::v4f32: 6446 case MVT::v4i32: 6447 case MVT::v8i16: 6448 case MVT::v16i8: 6449 if (isVarArg) { 6450 // These go aligned on the stack, or in the corresponding R registers 6451 // when within range. The Darwin PPC ABI doc claims they also go in 6452 // V registers; in fact gcc does this only for arguments that are 6453 // prototyped, not for those that match the ... We do it for all 6454 // arguments, seems to work. 6455 while (ArgOffset % 16 !=0) { 6456 ArgOffset += PtrByteSize; 6457 if (GPR_idx != NumGPRs) 6458 GPR_idx++; 6459 } 6460 // We could elide this store in the case where the object fits 6461 // entirely in R registers. Maybe later. 6462 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6463 DAG.getConstant(ArgOffset, dl, PtrVT)); 6464 SDValue Store = 6465 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6466 MemOpChains.push_back(Store); 6467 if (VR_idx != NumVRs) { 6468 SDValue Load = 6469 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6470 MemOpChains.push_back(Load.getValue(1)); 6471 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6472 } 6473 ArgOffset += 16; 6474 for (unsigned i=0; i<16; i+=PtrByteSize) { 6475 if (GPR_idx == NumGPRs) 6476 break; 6477 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6478 DAG.getConstant(i, dl, PtrVT)); 6479 SDValue Load = 6480 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6481 MemOpChains.push_back(Load.getValue(1)); 6482 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6483 } 6484 break; 6485 } 6486 6487 // Non-varargs Altivec params generally go in registers, but have 6488 // stack space allocated at the end. 6489 if (VR_idx != NumVRs) { 6490 // Doesn't have GPR space allocated. 6491 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6492 } else if (nAltivecParamsAtEnd==0) { 6493 // We are emitting Altivec params in order. 6494 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6495 isPPC64, isTailCall, true, MemOpChains, 6496 TailCallArguments, dl); 6497 ArgOffset += 16; 6498 } 6499 break; 6500 } 6501 } 6502 // If all Altivec parameters fit in registers, as they usually do, 6503 // they get stack space following the non-Altivec parameters. We 6504 // don't track this here because nobody below needs it. 6505 // If there are more Altivec parameters than fit in registers emit 6506 // the stores here. 6507 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6508 unsigned j = 0; 6509 // Offset is aligned; skip 1st 12 params which go in V registers. 6510 ArgOffset = ((ArgOffset+15)/16)*16; 6511 ArgOffset += 12*16; 6512 for (unsigned i = 0; i != NumOps; ++i) { 6513 SDValue Arg = OutVals[i]; 6514 EVT ArgType = Outs[i].VT; 6515 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6516 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6517 if (++j > NumVRs) { 6518 SDValue PtrOff; 6519 // We are emitting Altivec params in order. 6520 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6521 isPPC64, isTailCall, true, MemOpChains, 6522 TailCallArguments, dl); 6523 ArgOffset += 16; 6524 } 6525 } 6526 } 6527 } 6528 6529 if (!MemOpChains.empty()) 6530 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6531 6532 // On Darwin, R12 must contain the address of an indirect callee. This does 6533 // not mean the MTCTR instruction must use R12; it's easier to model this as 6534 // an extra parameter, so do that. 6535 if (!isTailCall && 6536 !isFunctionGlobalAddress(Callee) && 6537 !isa<ExternalSymbolSDNode>(Callee) && 6538 !isBLACompatibleAddress(Callee, DAG)) 6539 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6540 PPC::R12), Callee)); 6541 6542 // Build a sequence of copy-to-reg nodes chained together with token chain 6543 // and flag operands which copy the outgoing args into the appropriate regs. 6544 SDValue InFlag; 6545 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6546 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6547 RegsToPass[i].second, InFlag); 6548 InFlag = Chain.getValue(1); 6549 } 6550 6551 if (isTailCall) 6552 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6553 TailCallArguments); 6554 6555 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6556 /* unused except on PPC64 ELFv1 */ false, DAG, 6557 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6558 NumBytes, Ins, InVals, CS); 6559 } 6560 6561 bool 6562 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6563 MachineFunction &MF, bool isVarArg, 6564 const SmallVectorImpl<ISD::OutputArg> &Outs, 6565 LLVMContext &Context) const { 6566 SmallVector<CCValAssign, 16> RVLocs; 6567 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6568 return CCInfo.CheckReturn( 6569 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6570 ? RetCC_PPC_Cold 6571 : RetCC_PPC); 6572 } 6573 6574 SDValue 6575 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6576 bool isVarArg, 6577 const SmallVectorImpl<ISD::OutputArg> &Outs, 6578 const SmallVectorImpl<SDValue> &OutVals, 6579 const SDLoc &dl, SelectionDAG &DAG) const { 6580 SmallVector<CCValAssign, 16> RVLocs; 6581 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6582 *DAG.getContext()); 6583 CCInfo.AnalyzeReturn(Outs, 6584 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6585 ? RetCC_PPC_Cold 6586 : RetCC_PPC); 6587 6588 SDValue Flag; 6589 SmallVector<SDValue, 4> RetOps(1, Chain); 6590 6591 // Copy the result values into the output registers. 6592 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6593 CCValAssign &VA = RVLocs[i]; 6594 assert(VA.isRegLoc() && "Can only return in registers!"); 6595 6596 SDValue Arg = OutVals[i]; 6597 6598 switch (VA.getLocInfo()) { 6599 default: llvm_unreachable("Unknown loc info!"); 6600 case CCValAssign::Full: break; 6601 case CCValAssign::AExt: 6602 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6603 break; 6604 case CCValAssign::ZExt: 6605 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6606 break; 6607 case CCValAssign::SExt: 6608 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6609 break; 6610 } 6611 6612 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6613 Flag = Chain.getValue(1); 6614 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6615 } 6616 6617 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6618 const MCPhysReg *I = 6619 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6620 if (I) { 6621 for (; *I; ++I) { 6622 6623 if (PPC::G8RCRegClass.contains(*I)) 6624 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6625 else if (PPC::F8RCRegClass.contains(*I)) 6626 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6627 else if (PPC::CRRCRegClass.contains(*I)) 6628 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6629 else if (PPC::VRRCRegClass.contains(*I)) 6630 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6631 else 6632 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6633 } 6634 } 6635 6636 RetOps[0] = Chain; // Update chain. 6637 6638 // Add the flag if we have it. 6639 if (Flag.getNode()) 6640 RetOps.push_back(Flag); 6641 6642 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6643 } 6644 6645 SDValue 6646 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6647 SelectionDAG &DAG) const { 6648 SDLoc dl(Op); 6649 6650 // Get the correct type for integers. 6651 EVT IntVT = Op.getValueType(); 6652 6653 // Get the inputs. 6654 SDValue Chain = Op.getOperand(0); 6655 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6656 // Build a DYNAREAOFFSET node. 6657 SDValue Ops[2] = {Chain, FPSIdx}; 6658 SDVTList VTs = DAG.getVTList(IntVT); 6659 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6660 } 6661 6662 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6663 SelectionDAG &DAG) const { 6664 // When we pop the dynamic allocation we need to restore the SP link. 6665 SDLoc dl(Op); 6666 6667 // Get the correct type for pointers. 6668 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6669 6670 // Construct the stack pointer operand. 6671 bool isPPC64 = Subtarget.isPPC64(); 6672 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6673 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6674 6675 // Get the operands for the STACKRESTORE. 6676 SDValue Chain = Op.getOperand(0); 6677 SDValue SaveSP = Op.getOperand(1); 6678 6679 // Load the old link SP. 6680 SDValue LoadLinkSP = 6681 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6682 6683 // Restore the stack pointer. 6684 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6685 6686 // Store the old link SP. 6687 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6688 } 6689 6690 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6691 MachineFunction &MF = DAG.getMachineFunction(); 6692 bool isPPC64 = Subtarget.isPPC64(); 6693 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6694 6695 // Get current frame pointer save index. The users of this index will be 6696 // primarily DYNALLOC instructions. 6697 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6698 int RASI = FI->getReturnAddrSaveIndex(); 6699 6700 // If the frame pointer save index hasn't been defined yet. 6701 if (!RASI) { 6702 // Find out what the fix offset of the frame pointer save area. 6703 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6704 // Allocate the frame index for frame pointer save area. 6705 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6706 // Save the result. 6707 FI->setReturnAddrSaveIndex(RASI); 6708 } 6709 return DAG.getFrameIndex(RASI, PtrVT); 6710 } 6711 6712 SDValue 6713 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6714 MachineFunction &MF = DAG.getMachineFunction(); 6715 bool isPPC64 = Subtarget.isPPC64(); 6716 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6717 6718 // Get current frame pointer save index. The users of this index will be 6719 // primarily DYNALLOC instructions. 6720 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6721 int FPSI = FI->getFramePointerSaveIndex(); 6722 6723 // If the frame pointer save index hasn't been defined yet. 6724 if (!FPSI) { 6725 // Find out what the fix offset of the frame pointer save area. 6726 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6727 // Allocate the frame index for frame pointer save area. 6728 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6729 // Save the result. 6730 FI->setFramePointerSaveIndex(FPSI); 6731 } 6732 return DAG.getFrameIndex(FPSI, PtrVT); 6733 } 6734 6735 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6736 SelectionDAG &DAG) const { 6737 // Get the inputs. 6738 SDValue Chain = Op.getOperand(0); 6739 SDValue Size = Op.getOperand(1); 6740 SDLoc dl(Op); 6741 6742 // Get the correct type for pointers. 6743 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6744 // Negate the size. 6745 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6746 DAG.getConstant(0, dl, PtrVT), Size); 6747 // Construct a node for the frame pointer save index. 6748 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6749 // Build a DYNALLOC node. 6750 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6751 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6752 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6753 } 6754 6755 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 6756 SelectionDAG &DAG) const { 6757 MachineFunction &MF = DAG.getMachineFunction(); 6758 6759 bool isPPC64 = Subtarget.isPPC64(); 6760 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6761 6762 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 6763 return DAG.getFrameIndex(FI, PtrVT); 6764 } 6765 6766 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6767 SelectionDAG &DAG) const { 6768 SDLoc DL(Op); 6769 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6770 DAG.getVTList(MVT::i32, MVT::Other), 6771 Op.getOperand(0), Op.getOperand(1)); 6772 } 6773 6774 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6775 SelectionDAG &DAG) const { 6776 SDLoc DL(Op); 6777 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6778 Op.getOperand(0), Op.getOperand(1)); 6779 } 6780 6781 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6782 if (Op.getValueType().isVector()) 6783 return LowerVectorLoad(Op, DAG); 6784 6785 assert(Op.getValueType() == MVT::i1 && 6786 "Custom lowering only for i1 loads"); 6787 6788 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6789 6790 SDLoc dl(Op); 6791 LoadSDNode *LD = cast<LoadSDNode>(Op); 6792 6793 SDValue Chain = LD->getChain(); 6794 SDValue BasePtr = LD->getBasePtr(); 6795 MachineMemOperand *MMO = LD->getMemOperand(); 6796 6797 SDValue NewLD = 6798 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6799 BasePtr, MVT::i8, MMO); 6800 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6801 6802 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6803 return DAG.getMergeValues(Ops, dl); 6804 } 6805 6806 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6807 if (Op.getOperand(1).getValueType().isVector()) 6808 return LowerVectorStore(Op, DAG); 6809 6810 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6811 "Custom lowering only for i1 stores"); 6812 6813 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6814 6815 SDLoc dl(Op); 6816 StoreSDNode *ST = cast<StoreSDNode>(Op); 6817 6818 SDValue Chain = ST->getChain(); 6819 SDValue BasePtr = ST->getBasePtr(); 6820 SDValue Value = ST->getValue(); 6821 MachineMemOperand *MMO = ST->getMemOperand(); 6822 6823 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6824 Value); 6825 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6826 } 6827 6828 // FIXME: Remove this once the ANDI glue bug is fixed: 6829 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6830 assert(Op.getValueType() == MVT::i1 && 6831 "Custom lowering only for i1 results"); 6832 6833 SDLoc DL(Op); 6834 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6835 Op.getOperand(0)); 6836 } 6837 6838 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6839 /// possible. 6840 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6841 // Not FP? Not a fsel. 6842 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6843 !Op.getOperand(2).getValueType().isFloatingPoint()) 6844 return Op; 6845 6846 // We might be able to do better than this under some circumstances, but in 6847 // general, fsel-based lowering of select is a finite-math-only optimization. 6848 // For more information, see section F.3 of the 2.06 ISA specification. 6849 if (!DAG.getTarget().Options.NoInfsFPMath || 6850 !DAG.getTarget().Options.NoNaNsFPMath) 6851 return Op; 6852 // TODO: Propagate flags from the select rather than global settings. 6853 SDNodeFlags Flags; 6854 Flags.setNoInfs(true); 6855 Flags.setNoNaNs(true); 6856 6857 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6858 6859 EVT ResVT = Op.getValueType(); 6860 EVT CmpVT = Op.getOperand(0).getValueType(); 6861 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6862 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6863 SDLoc dl(Op); 6864 6865 // If the RHS of the comparison is a 0.0, we don't need to do the 6866 // subtraction at all. 6867 SDValue Sel1; 6868 if (isFloatingPointZero(RHS)) 6869 switch (CC) { 6870 default: break; // SETUO etc aren't handled by fsel. 6871 case ISD::SETNE: 6872 std::swap(TV, FV); 6873 LLVM_FALLTHROUGH; 6874 case ISD::SETEQ: 6875 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6876 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6877 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6878 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6879 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6880 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6881 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6882 case ISD::SETULT: 6883 case ISD::SETLT: 6884 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6885 LLVM_FALLTHROUGH; 6886 case ISD::SETOGE: 6887 case ISD::SETGE: 6888 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6889 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6890 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6891 case ISD::SETUGT: 6892 case ISD::SETGT: 6893 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6894 LLVM_FALLTHROUGH; 6895 case ISD::SETOLE: 6896 case ISD::SETLE: 6897 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6898 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6899 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6900 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6901 } 6902 6903 SDValue Cmp; 6904 switch (CC) { 6905 default: break; // SETUO etc aren't handled by fsel. 6906 case ISD::SETNE: 6907 std::swap(TV, FV); 6908 LLVM_FALLTHROUGH; 6909 case ISD::SETEQ: 6910 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6911 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6912 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6913 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6914 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6915 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6916 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6917 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6918 case ISD::SETULT: 6919 case ISD::SETLT: 6920 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6921 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6922 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6923 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6924 case ISD::SETOGE: 6925 case ISD::SETGE: 6926 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6927 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6928 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6929 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6930 case ISD::SETUGT: 6931 case ISD::SETGT: 6932 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6933 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6934 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6935 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6936 case ISD::SETOLE: 6937 case ISD::SETLE: 6938 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6939 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6940 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6941 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6942 } 6943 return Op; 6944 } 6945 6946 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6947 SelectionDAG &DAG, 6948 const SDLoc &dl) const { 6949 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6950 SDValue Src = Op.getOperand(0); 6951 if (Src.getValueType() == MVT::f32) 6952 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6953 6954 SDValue Tmp; 6955 switch (Op.getSimpleValueType().SimpleTy) { 6956 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6957 case MVT::i32: 6958 Tmp = DAG.getNode( 6959 Op.getOpcode() == ISD::FP_TO_SINT 6960 ? PPCISD::FCTIWZ 6961 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6962 dl, MVT::f64, Src); 6963 break; 6964 case MVT::i64: 6965 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6966 "i64 FP_TO_UINT is supported only with FPCVT"); 6967 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6968 PPCISD::FCTIDUZ, 6969 dl, MVT::f64, Src); 6970 break; 6971 } 6972 6973 // Convert the FP value to an int value through memory. 6974 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6975 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6976 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6977 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6978 MachinePointerInfo MPI = 6979 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6980 6981 // Emit a store to the stack slot. 6982 SDValue Chain; 6983 if (i32Stack) { 6984 MachineFunction &MF = DAG.getMachineFunction(); 6985 MachineMemOperand *MMO = 6986 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6987 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6988 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6989 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6990 } else 6991 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 6992 6993 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6994 // add in a bias on big endian. 6995 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6996 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6997 DAG.getConstant(4, dl, FIPtr.getValueType())); 6998 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6999 } 7000 7001 RLI.Chain = Chain; 7002 RLI.Ptr = FIPtr; 7003 RLI.MPI = MPI; 7004 } 7005 7006 /// Custom lowers floating point to integer conversions to use 7007 /// the direct move instructions available in ISA 2.07 to avoid the 7008 /// need for load/store combinations. 7009 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 7010 SelectionDAG &DAG, 7011 const SDLoc &dl) const { 7012 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7013 SDValue Src = Op.getOperand(0); 7014 7015 if (Src.getValueType() == MVT::f32) 7016 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7017 7018 SDValue Tmp; 7019 switch (Op.getSimpleValueType().SimpleTy) { 7020 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7021 case MVT::i32: 7022 Tmp = DAG.getNode( 7023 Op.getOpcode() == ISD::FP_TO_SINT 7024 ? PPCISD::FCTIWZ 7025 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7026 dl, MVT::f64, Src); 7027 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 7028 break; 7029 case MVT::i64: 7030 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7031 "i64 FP_TO_UINT is supported only with FPCVT"); 7032 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7033 PPCISD::FCTIDUZ, 7034 dl, MVT::f64, Src); 7035 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 7036 break; 7037 } 7038 return Tmp; 7039 } 7040 7041 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 7042 const SDLoc &dl) const { 7043 7044 // FP to INT conversions are legal for f128. 7045 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128)) 7046 return Op; 7047 7048 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 7049 // PPC (the libcall is not available). 7050 if (Op.getOperand(0).getValueType() == MVT::ppcf128) { 7051 if (Op.getValueType() == MVT::i32) { 7052 if (Op.getOpcode() == ISD::FP_TO_SINT) { 7053 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7054 MVT::f64, Op.getOperand(0), 7055 DAG.getIntPtrConstant(0, dl)); 7056 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7057 MVT::f64, Op.getOperand(0), 7058 DAG.getIntPtrConstant(1, dl)); 7059 7060 // Add the two halves of the long double in round-to-zero mode. 7061 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7062 7063 // Now use a smaller FP_TO_SINT. 7064 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 7065 } 7066 if (Op.getOpcode() == ISD::FP_TO_UINT) { 7067 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 7068 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 7069 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128); 7070 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 7071 // FIXME: generated code sucks. 7072 // TODO: Are there fast-math-flags to propagate to this FSUB? 7073 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, 7074 Op.getOperand(0), Tmp); 7075 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 7076 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, 7077 DAG.getConstant(0x80000000, dl, MVT::i32)); 7078 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, 7079 Op.getOperand(0)); 7080 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False, 7081 ISD::SETGE); 7082 } 7083 } 7084 7085 return SDValue(); 7086 } 7087 7088 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 7089 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 7090 7091 ReuseLoadInfo RLI; 7092 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7093 7094 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7095 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7096 } 7097 7098 // We're trying to insert a regular store, S, and then a load, L. If the 7099 // incoming value, O, is a load, we might just be able to have our load use the 7100 // address used by O. However, we don't know if anything else will store to 7101 // that address before we can load from it. To prevent this situation, we need 7102 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7103 // the same chain operand as O, we create a token factor from the chain results 7104 // of O and L, and we replace all uses of O's chain result with that token 7105 // factor (see spliceIntoChain below for this last part). 7106 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7107 ReuseLoadInfo &RLI, 7108 SelectionDAG &DAG, 7109 ISD::LoadExtType ET) const { 7110 SDLoc dl(Op); 7111 if (ET == ISD::NON_EXTLOAD && 7112 (Op.getOpcode() == ISD::FP_TO_UINT || 7113 Op.getOpcode() == ISD::FP_TO_SINT) && 7114 isOperationLegalOrCustom(Op.getOpcode(), 7115 Op.getOperand(0).getValueType())) { 7116 7117 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7118 return true; 7119 } 7120 7121 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7122 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7123 LD->isNonTemporal()) 7124 return false; 7125 if (LD->getMemoryVT() != MemVT) 7126 return false; 7127 7128 RLI.Ptr = LD->getBasePtr(); 7129 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7130 assert(LD->getAddressingMode() == ISD::PRE_INC && 7131 "Non-pre-inc AM on PPC?"); 7132 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7133 LD->getOffset()); 7134 } 7135 7136 RLI.Chain = LD->getChain(); 7137 RLI.MPI = LD->getPointerInfo(); 7138 RLI.IsDereferenceable = LD->isDereferenceable(); 7139 RLI.IsInvariant = LD->isInvariant(); 7140 RLI.Alignment = LD->getAlignment(); 7141 RLI.AAInfo = LD->getAAInfo(); 7142 RLI.Ranges = LD->getRanges(); 7143 7144 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7145 return true; 7146 } 7147 7148 // Given the head of the old chain, ResChain, insert a token factor containing 7149 // it and NewResChain, and make users of ResChain now be users of that token 7150 // factor. 7151 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7152 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7153 SDValue NewResChain, 7154 SelectionDAG &DAG) const { 7155 if (!ResChain) 7156 return; 7157 7158 SDLoc dl(NewResChain); 7159 7160 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7161 NewResChain, DAG.getUNDEF(MVT::Other)); 7162 assert(TF.getNode() != NewResChain.getNode() && 7163 "A new TF really is required here"); 7164 7165 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7166 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7167 } 7168 7169 /// Analyze profitability of direct move 7170 /// prefer float load to int load plus direct move 7171 /// when there is no integer use of int load 7172 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7173 SDNode *Origin = Op.getOperand(0).getNode(); 7174 if (Origin->getOpcode() != ISD::LOAD) 7175 return true; 7176 7177 // If there is no LXSIBZX/LXSIHZX, like Power8, 7178 // prefer direct move if the memory size is 1 or 2 bytes. 7179 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7180 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7181 return true; 7182 7183 for (SDNode::use_iterator UI = Origin->use_begin(), 7184 UE = Origin->use_end(); 7185 UI != UE; ++UI) { 7186 7187 // Only look at the users of the loaded value. 7188 if (UI.getUse().get().getResNo() != 0) 7189 continue; 7190 7191 if (UI->getOpcode() != ISD::SINT_TO_FP && 7192 UI->getOpcode() != ISD::UINT_TO_FP) 7193 return true; 7194 } 7195 7196 return false; 7197 } 7198 7199 /// Custom lowers integer to floating point conversions to use 7200 /// the direct move instructions available in ISA 2.07 to avoid the 7201 /// need for load/store combinations. 7202 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7203 SelectionDAG &DAG, 7204 const SDLoc &dl) const { 7205 assert((Op.getValueType() == MVT::f32 || 7206 Op.getValueType() == MVT::f64) && 7207 "Invalid floating point type as target of conversion"); 7208 assert(Subtarget.hasFPCVT() && 7209 "Int to FP conversions with direct moves require FPCVT"); 7210 SDValue FP; 7211 SDValue Src = Op.getOperand(0); 7212 bool SinglePrec = Op.getValueType() == MVT::f32; 7213 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7214 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 7215 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 7216 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 7217 7218 if (WordInt) { 7219 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7220 dl, MVT::f64, Src); 7221 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7222 } 7223 else { 7224 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7225 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7226 } 7227 7228 return FP; 7229 } 7230 7231 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 7232 SelectionDAG &DAG) const { 7233 SDLoc dl(Op); 7234 7235 // Conversions to f128 are legal. 7236 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128)) 7237 return Op; 7238 7239 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 7240 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 7241 return SDValue(); 7242 7243 SDValue Value = Op.getOperand(0); 7244 // The values are now known to be -1 (false) or 1 (true). To convert this 7245 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7246 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7247 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7248 7249 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7250 7251 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7252 7253 if (Op.getValueType() != MVT::v4f64) 7254 Value = DAG.getNode(ISD::FP_ROUND, dl, 7255 Op.getValueType(), Value, 7256 DAG.getIntPtrConstant(1, dl)); 7257 return Value; 7258 } 7259 7260 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 7261 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 7262 return SDValue(); 7263 7264 if (Op.getOperand(0).getValueType() == MVT::i1) 7265 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 7266 DAG.getConstantFP(1.0, dl, Op.getValueType()), 7267 DAG.getConstantFP(0.0, dl, Op.getValueType())); 7268 7269 // If we have direct moves, we can do all the conversion, skip the store/load 7270 // however, without FPCVT we can't do most conversions. 7271 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 7272 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 7273 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 7274 7275 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 7276 "UINT_TO_FP is supported only with FPCVT"); 7277 7278 // If we have FCFIDS, then use it when converting to single-precision. 7279 // Otherwise, convert to double-precision and then round. 7280 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7281 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 7282 : PPCISD::FCFIDS) 7283 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 7284 : PPCISD::FCFID); 7285 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7286 ? MVT::f32 7287 : MVT::f64; 7288 7289 if (Op.getOperand(0).getValueType() == MVT::i64) { 7290 SDValue SINT = Op.getOperand(0); 7291 // When converting to single-precision, we actually need to convert 7292 // to double-precision first and then round to single-precision. 7293 // To avoid double-rounding effects during that operation, we have 7294 // to prepare the input operand. Bits that might be truncated when 7295 // converting to double-precision are replaced by a bit that won't 7296 // be lost at this stage, but is below the single-precision rounding 7297 // position. 7298 // 7299 // However, if -enable-unsafe-fp-math is in effect, accept double 7300 // rounding to avoid the extra overhead. 7301 if (Op.getValueType() == MVT::f32 && 7302 !Subtarget.hasFPCVT() && 7303 !DAG.getTarget().Options.UnsafeFPMath) { 7304 7305 // Twiddle input to make sure the low 11 bits are zero. (If this 7306 // is the case, we are guaranteed the value will fit into the 53 bit 7307 // mantissa of an IEEE double-precision value without rounding.) 7308 // If any of those low 11 bits were not zero originally, make sure 7309 // bit 12 (value 2048) is set instead, so that the final rounding 7310 // to single-precision gets the correct result. 7311 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7312 SINT, DAG.getConstant(2047, dl, MVT::i64)); 7313 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 7314 Round, DAG.getConstant(2047, dl, MVT::i64)); 7315 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 7316 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7317 Round, DAG.getConstant(-2048, dl, MVT::i64)); 7318 7319 // However, we cannot use that value unconditionally: if the magnitude 7320 // of the input value is small, the bit-twiddling we did above might 7321 // end up visibly changing the output. Fortunately, in that case, we 7322 // don't need to twiddle bits since the original input will convert 7323 // exactly to double-precision floating-point already. Therefore, 7324 // construct a conditional to use the original value if the top 11 7325 // bits are all sign-bit copies, and use the rounded value computed 7326 // above otherwise. 7327 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 7328 SINT, DAG.getConstant(53, dl, MVT::i32)); 7329 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 7330 Cond, DAG.getConstant(1, dl, MVT::i64)); 7331 Cond = DAG.getSetCC(dl, MVT::i32, 7332 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 7333 7334 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 7335 } 7336 7337 ReuseLoadInfo RLI; 7338 SDValue Bits; 7339 7340 MachineFunction &MF = DAG.getMachineFunction(); 7341 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 7342 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7343 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7344 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7345 } else if (Subtarget.hasLFIWAX() && 7346 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 7347 MachineMemOperand *MMO = 7348 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7349 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7350 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7351 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 7352 DAG.getVTList(MVT::f64, MVT::Other), 7353 Ops, MVT::i32, MMO); 7354 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7355 } else if (Subtarget.hasFPCVT() && 7356 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 7357 MachineMemOperand *MMO = 7358 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7359 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7360 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7361 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 7362 DAG.getVTList(MVT::f64, MVT::Other), 7363 Ops, MVT::i32, MMO); 7364 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7365 } else if (((Subtarget.hasLFIWAX() && 7366 SINT.getOpcode() == ISD::SIGN_EXTEND) || 7367 (Subtarget.hasFPCVT() && 7368 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 7369 SINT.getOperand(0).getValueType() == MVT::i32) { 7370 MachineFrameInfo &MFI = MF.getFrameInfo(); 7371 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7372 7373 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7374 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7375 7376 SDValue Store = 7377 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 7378 MachinePointerInfo::getFixedStack( 7379 DAG.getMachineFunction(), FrameIdx)); 7380 7381 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7382 "Expected an i32 store"); 7383 7384 RLI.Ptr = FIdx; 7385 RLI.Chain = Store; 7386 RLI.MPI = 7387 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7388 RLI.Alignment = 4; 7389 7390 MachineMemOperand *MMO = 7391 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7392 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7393 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7394 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 7395 PPCISD::LFIWZX : PPCISD::LFIWAX, 7396 dl, DAG.getVTList(MVT::f64, MVT::Other), 7397 Ops, MVT::i32, MMO); 7398 } else 7399 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 7400 7401 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 7402 7403 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7404 FP = DAG.getNode(ISD::FP_ROUND, dl, 7405 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 7406 return FP; 7407 } 7408 7409 assert(Op.getOperand(0).getValueType() == MVT::i32 && 7410 "Unhandled INT_TO_FP type in custom expander!"); 7411 // Since we only generate this in 64-bit mode, we can take advantage of 7412 // 64-bit registers. In particular, sign extend the input value into the 7413 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 7414 // then lfd it and fcfid it. 7415 MachineFunction &MF = DAG.getMachineFunction(); 7416 MachineFrameInfo &MFI = MF.getFrameInfo(); 7417 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7418 7419 SDValue Ld; 7420 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 7421 ReuseLoadInfo RLI; 7422 bool ReusingLoad; 7423 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 7424 DAG))) { 7425 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7426 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7427 7428 SDValue Store = 7429 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7430 MachinePointerInfo::getFixedStack( 7431 DAG.getMachineFunction(), FrameIdx)); 7432 7433 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7434 "Expected an i32 store"); 7435 7436 RLI.Ptr = FIdx; 7437 RLI.Chain = Store; 7438 RLI.MPI = 7439 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7440 RLI.Alignment = 4; 7441 } 7442 7443 MachineMemOperand *MMO = 7444 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7445 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7446 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7447 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 7448 PPCISD::LFIWZX : PPCISD::LFIWAX, 7449 dl, DAG.getVTList(MVT::f64, MVT::Other), 7450 Ops, MVT::i32, MMO); 7451 if (ReusingLoad) 7452 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 7453 } else { 7454 assert(Subtarget.isPPC64() && 7455 "i32->FP without LFIWAX supported only on PPC64"); 7456 7457 int FrameIdx = MFI.CreateStackObject(8, 8, false); 7458 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7459 7460 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 7461 Op.getOperand(0)); 7462 7463 // STD the extended value into the stack slot. 7464 SDValue Store = DAG.getStore( 7465 DAG.getEntryNode(), dl, Ext64, FIdx, 7466 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7467 7468 // Load the value as a double. 7469 Ld = DAG.getLoad( 7470 MVT::f64, dl, Store, FIdx, 7471 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7472 } 7473 7474 // FCFID it and return it. 7475 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 7476 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7477 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 7478 DAG.getIntPtrConstant(0, dl)); 7479 return FP; 7480 } 7481 7482 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 7483 SelectionDAG &DAG) const { 7484 SDLoc dl(Op); 7485 /* 7486 The rounding mode is in bits 30:31 of FPSR, and has the following 7487 settings: 7488 00 Round to nearest 7489 01 Round to 0 7490 10 Round to +inf 7491 11 Round to -inf 7492 7493 FLT_ROUNDS, on the other hand, expects the following: 7494 -1 Undefined 7495 0 Round to 0 7496 1 Round to nearest 7497 2 Round to +inf 7498 3 Round to -inf 7499 7500 To perform the conversion, we do: 7501 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7502 */ 7503 7504 MachineFunction &MF = DAG.getMachineFunction(); 7505 EVT VT = Op.getValueType(); 7506 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7507 7508 // Save FP Control Word to register 7509 EVT NodeTys[] = { 7510 MVT::f64, // return register 7511 MVT::Glue // unused in this context 7512 }; 7513 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7514 7515 // Save FP register to stack slot 7516 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7517 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7518 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7519 MachinePointerInfo()); 7520 7521 // Load FP Control Word from low 32 bits of stack slot. 7522 SDValue Four = DAG.getConstant(4, dl, PtrVT); 7523 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 7524 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 7525 7526 // Transform as necessary 7527 SDValue CWD1 = 7528 DAG.getNode(ISD::AND, dl, MVT::i32, 7529 CWD, DAG.getConstant(3, dl, MVT::i32)); 7530 SDValue CWD2 = 7531 DAG.getNode(ISD::SRL, dl, MVT::i32, 7532 DAG.getNode(ISD::AND, dl, MVT::i32, 7533 DAG.getNode(ISD::XOR, dl, MVT::i32, 7534 CWD, DAG.getConstant(3, dl, MVT::i32)), 7535 DAG.getConstant(3, dl, MVT::i32)), 7536 DAG.getConstant(1, dl, MVT::i32)); 7537 7538 SDValue RetVal = 7539 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 7540 7541 return DAG.getNode((VT.getSizeInBits() < 16 ? 7542 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 7543 } 7544 7545 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7546 EVT VT = Op.getValueType(); 7547 unsigned BitWidth = VT.getSizeInBits(); 7548 SDLoc dl(Op); 7549 assert(Op.getNumOperands() == 3 && 7550 VT == Op.getOperand(1).getValueType() && 7551 "Unexpected SHL!"); 7552 7553 // Expand into a bunch of logical ops. Note that these ops 7554 // depend on the PPC behavior for oversized shift amounts. 7555 SDValue Lo = Op.getOperand(0); 7556 SDValue Hi = Op.getOperand(1); 7557 SDValue Amt = Op.getOperand(2); 7558 EVT AmtVT = Amt.getValueType(); 7559 7560 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7561 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7562 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 7563 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 7564 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 7565 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7566 DAG.getConstant(-BitWidth, dl, AmtVT)); 7567 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 7568 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7569 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 7570 SDValue OutOps[] = { OutLo, OutHi }; 7571 return DAG.getMergeValues(OutOps, dl); 7572 } 7573 7574 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7575 EVT VT = Op.getValueType(); 7576 SDLoc dl(Op); 7577 unsigned BitWidth = VT.getSizeInBits(); 7578 assert(Op.getNumOperands() == 3 && 7579 VT == Op.getOperand(1).getValueType() && 7580 "Unexpected SRL!"); 7581 7582 // Expand into a bunch of logical ops. Note that these ops 7583 // depend on the PPC behavior for oversized shift amounts. 7584 SDValue Lo = Op.getOperand(0); 7585 SDValue Hi = Op.getOperand(1); 7586 SDValue Amt = Op.getOperand(2); 7587 EVT AmtVT = Amt.getValueType(); 7588 7589 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7590 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7591 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7592 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7593 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7594 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7595 DAG.getConstant(-BitWidth, dl, AmtVT)); 7596 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 7597 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7598 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 7599 SDValue OutOps[] = { OutLo, OutHi }; 7600 return DAG.getMergeValues(OutOps, dl); 7601 } 7602 7603 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 7604 SDLoc dl(Op); 7605 EVT VT = Op.getValueType(); 7606 unsigned BitWidth = VT.getSizeInBits(); 7607 assert(Op.getNumOperands() == 3 && 7608 VT == Op.getOperand(1).getValueType() && 7609 "Unexpected SRA!"); 7610 7611 // Expand into a bunch of logical ops, followed by a select_cc. 7612 SDValue Lo = Op.getOperand(0); 7613 SDValue Hi = Op.getOperand(1); 7614 SDValue Amt = Op.getOperand(2); 7615 EVT AmtVT = Amt.getValueType(); 7616 7617 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7618 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7619 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7620 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7621 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7622 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7623 DAG.getConstant(-BitWidth, dl, AmtVT)); 7624 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 7625 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 7626 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 7627 Tmp4, Tmp6, ISD::SETLE); 7628 SDValue OutOps[] = { OutLo, OutHi }; 7629 return DAG.getMergeValues(OutOps, dl); 7630 } 7631 7632 //===----------------------------------------------------------------------===// 7633 // Vector related lowering. 7634 // 7635 7636 /// BuildSplatI - Build a canonical splati of Val with an element size of 7637 /// SplatSize. Cast the result to VT. 7638 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7639 SelectionDAG &DAG, const SDLoc &dl) { 7640 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7641 7642 static const MVT VTys[] = { // canonical VT to use for each size. 7643 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7644 }; 7645 7646 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7647 7648 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7649 if (Val == -1) 7650 SplatSize = 1; 7651 7652 EVT CanonicalVT = VTys[SplatSize-1]; 7653 7654 // Build a canonical splat for this value. 7655 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7656 } 7657 7658 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7659 /// specified intrinsic ID. 7660 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7661 const SDLoc &dl, EVT DestVT = MVT::Other) { 7662 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7663 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7664 DAG.getConstant(IID, dl, MVT::i32), Op); 7665 } 7666 7667 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7668 /// specified intrinsic ID. 7669 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7670 SelectionDAG &DAG, const SDLoc &dl, 7671 EVT DestVT = MVT::Other) { 7672 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7673 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7674 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7675 } 7676 7677 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7678 /// specified intrinsic ID. 7679 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7680 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7681 EVT DestVT = MVT::Other) { 7682 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7683 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7684 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7685 } 7686 7687 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7688 /// amount. The result has the specified value type. 7689 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7690 SelectionDAG &DAG, const SDLoc &dl) { 7691 // Force LHS/RHS to be the right type. 7692 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7693 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7694 7695 int Ops[16]; 7696 for (unsigned i = 0; i != 16; ++i) 7697 Ops[i] = i + Amt; 7698 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7699 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7700 } 7701 7702 /// Do we have an efficient pattern in a .td file for this node? 7703 /// 7704 /// \param V - pointer to the BuildVectorSDNode being matched 7705 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 7706 /// 7707 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 7708 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 7709 /// the opposite is true (expansion is beneficial) are: 7710 /// - The node builds a vector out of integers that are not 32 or 64-bits 7711 /// - The node builds a vector out of constants 7712 /// - The node is a "load-and-splat" 7713 /// In all other cases, we will choose to keep the BUILD_VECTOR. 7714 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 7715 bool HasDirectMove, 7716 bool HasP8Vector) { 7717 EVT VecVT = V->getValueType(0); 7718 bool RightType = VecVT == MVT::v2f64 || 7719 (HasP8Vector && VecVT == MVT::v4f32) || 7720 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 7721 if (!RightType) 7722 return false; 7723 7724 bool IsSplat = true; 7725 bool IsLoad = false; 7726 SDValue Op0 = V->getOperand(0); 7727 7728 // This function is called in a block that confirms the node is not a constant 7729 // splat. So a constant BUILD_VECTOR here means the vector is built out of 7730 // different constants. 7731 if (V->isConstant()) 7732 return false; 7733 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 7734 if (V->getOperand(i).isUndef()) 7735 return false; 7736 // We want to expand nodes that represent load-and-splat even if the 7737 // loaded value is a floating point truncation or conversion to int. 7738 if (V->getOperand(i).getOpcode() == ISD::LOAD || 7739 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 7740 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7741 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 7742 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7743 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 7744 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 7745 IsLoad = true; 7746 // If the operands are different or the input is not a load and has more 7747 // uses than just this BV node, then it isn't a splat. 7748 if (V->getOperand(i) != Op0 || 7749 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 7750 IsSplat = false; 7751 } 7752 return !(IsSplat && IsLoad); 7753 } 7754 7755 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 7756 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 7757 7758 SDLoc dl(Op); 7759 SDValue Op0 = Op->getOperand(0); 7760 7761 if (!EnableQuadPrecision || 7762 (Op.getValueType() != MVT::f128 ) || 7763 (Op0.getOpcode() != ISD::BUILD_PAIR) || 7764 (Op0.getOperand(0).getValueType() != MVT::i64) || 7765 (Op0.getOperand(1).getValueType() != MVT::i64)) 7766 return SDValue(); 7767 7768 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 7769 Op0.getOperand(1)); 7770 } 7771 7772 // If this is a case we can't handle, return null and let the default 7773 // expansion code take care of it. If we CAN select this case, and if it 7774 // selects to a single instruction, return Op. Otherwise, if we can codegen 7775 // this case more efficiently than a constant pool load, lower it to the 7776 // sequence of ops that should be used. 7777 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7778 SelectionDAG &DAG) const { 7779 SDLoc dl(Op); 7780 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7781 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7782 7783 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7784 // We first build an i32 vector, load it into a QPX register, 7785 // then convert it to a floating-point vector and compare it 7786 // to a zero vector to get the boolean result. 7787 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7788 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7789 MachinePointerInfo PtrInfo = 7790 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7791 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7792 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7793 7794 assert(BVN->getNumOperands() == 4 && 7795 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7796 7797 bool IsConst = true; 7798 for (unsigned i = 0; i < 4; ++i) { 7799 if (BVN->getOperand(i).isUndef()) continue; 7800 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7801 IsConst = false; 7802 break; 7803 } 7804 } 7805 7806 if (IsConst) { 7807 Constant *One = 7808 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7809 Constant *NegOne = 7810 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7811 7812 Constant *CV[4]; 7813 for (unsigned i = 0; i < 4; ++i) { 7814 if (BVN->getOperand(i).isUndef()) 7815 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7816 else if (isNullConstant(BVN->getOperand(i))) 7817 CV[i] = NegOne; 7818 else 7819 CV[i] = One; 7820 } 7821 7822 Constant *CP = ConstantVector::get(CV); 7823 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7824 16 /* alignment */); 7825 7826 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7827 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7828 return DAG.getMemIntrinsicNode( 7829 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7830 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7831 } 7832 7833 SmallVector<SDValue, 4> Stores; 7834 for (unsigned i = 0; i < 4; ++i) { 7835 if (BVN->getOperand(i).isUndef()) continue; 7836 7837 unsigned Offset = 4*i; 7838 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7839 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7840 7841 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7842 if (StoreSize > 4) { 7843 Stores.push_back( 7844 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7845 PtrInfo.getWithOffset(Offset), MVT::i32)); 7846 } else { 7847 SDValue StoreValue = BVN->getOperand(i); 7848 if (StoreSize < 4) 7849 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7850 7851 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7852 PtrInfo.getWithOffset(Offset))); 7853 } 7854 } 7855 7856 SDValue StoreChain; 7857 if (!Stores.empty()) 7858 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7859 else 7860 StoreChain = DAG.getEntryNode(); 7861 7862 // Now load from v4i32 into the QPX register; this will extend it to 7863 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7864 // is typed as v4f64 because the QPX register integer states are not 7865 // explicitly represented. 7866 7867 SDValue Ops[] = {StoreChain, 7868 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7869 FIdx}; 7870 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7871 7872 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7873 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7874 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7875 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7876 LoadedVect); 7877 7878 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7879 7880 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7881 } 7882 7883 // All other QPX vectors are handled by generic code. 7884 if (Subtarget.hasQPX()) 7885 return SDValue(); 7886 7887 // Check if this is a splat of a constant value. 7888 APInt APSplatBits, APSplatUndef; 7889 unsigned SplatBitSize; 7890 bool HasAnyUndefs; 7891 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7892 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7893 SplatBitSize > 32) { 7894 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 7895 // lowered to VSX instructions under certain conditions. 7896 // Without VSX, there is no pattern more efficient than expanding the node. 7897 if (Subtarget.hasVSX() && 7898 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 7899 Subtarget.hasP8Vector())) 7900 return Op; 7901 return SDValue(); 7902 } 7903 7904 unsigned SplatBits = APSplatBits.getZExtValue(); 7905 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7906 unsigned SplatSize = SplatBitSize / 8; 7907 7908 // First, handle single instruction cases. 7909 7910 // All zeros? 7911 if (SplatBits == 0) { 7912 // Canonicalize all zero vectors to be v4i32. 7913 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7914 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7915 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7916 } 7917 return Op; 7918 } 7919 7920 // We have XXSPLTIB for constant splats one byte wide 7921 if (Subtarget.hasP9Vector() && SplatSize == 1) { 7922 // This is a splat of 1-byte elements with some elements potentially undef. 7923 // Rather than trying to match undef in the SDAG patterns, ensure that all 7924 // elements are the same constant. 7925 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 7926 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 7927 dl, MVT::i32)); 7928 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 7929 if (Op.getValueType() != MVT::v16i8) 7930 return DAG.getBitcast(Op.getValueType(), NewBV); 7931 return NewBV; 7932 } 7933 7934 // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll 7935 // detect that constant splats like v8i16: 0xABAB are really just splats 7936 // of a 1-byte constant. In this case, we need to convert the node to a 7937 // splat of v16i8 and a bitcast. 7938 if (Op.getValueType() != MVT::v16i8) 7939 return DAG.getBitcast(Op.getValueType(), 7940 DAG.getConstant(SplatBits, dl, MVT::v16i8)); 7941 7942 return Op; 7943 } 7944 7945 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7946 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7947 (32-SplatBitSize)); 7948 if (SextVal >= -16 && SextVal <= 15) 7949 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7950 7951 // Two instruction sequences. 7952 7953 // If this value is in the range [-32,30] and is even, use: 7954 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7955 // If this value is in the range [17,31] and is odd, use: 7956 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7957 // If this value is in the range [-31,-17] and is odd, use: 7958 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7959 // Note the last two are three-instruction sequences. 7960 if (SextVal >= -32 && SextVal <= 31) { 7961 // To avoid having these optimizations undone by constant folding, 7962 // we convert to a pseudo that will be expanded later into one of 7963 // the above forms. 7964 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7965 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7966 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7967 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7968 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7969 if (VT == Op.getValueType()) 7970 return RetVal; 7971 else 7972 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7973 } 7974 7975 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7976 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7977 // for fneg/fabs. 7978 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7979 // Make -1 and vspltisw -1: 7980 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7981 7982 // Make the VSLW intrinsic, computing 0x8000_0000. 7983 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7984 OnesV, DAG, dl); 7985 7986 // xor by OnesV to invert it. 7987 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7988 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7989 } 7990 7991 // Check to see if this is a wide variety of vsplti*, binop self cases. 7992 static const signed char SplatCsts[] = { 7993 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7994 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7995 }; 7996 7997 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7998 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7999 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 8000 int i = SplatCsts[idx]; 8001 8002 // Figure out what shift amount will be used by altivec if shifted by i in 8003 // this splat size. 8004 unsigned TypeShiftAmt = i & (SplatBitSize-1); 8005 8006 // vsplti + shl self. 8007 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 8008 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8009 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8010 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 8011 Intrinsic::ppc_altivec_vslw 8012 }; 8013 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8014 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8015 } 8016 8017 // vsplti + srl self. 8018 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8019 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8020 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8021 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 8022 Intrinsic::ppc_altivec_vsrw 8023 }; 8024 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8025 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8026 } 8027 8028 // vsplti + sra self. 8029 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8030 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8031 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8032 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 8033 Intrinsic::ppc_altivec_vsraw 8034 }; 8035 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8036 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8037 } 8038 8039 // vsplti + rol self. 8040 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 8041 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 8042 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8043 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8044 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 8045 Intrinsic::ppc_altivec_vrlw 8046 }; 8047 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8048 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8049 } 8050 8051 // t = vsplti c, result = vsldoi t, t, 1 8052 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 8053 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8054 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 8055 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8056 } 8057 // t = vsplti c, result = vsldoi t, t, 2 8058 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 8059 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8060 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 8061 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8062 } 8063 // t = vsplti c, result = vsldoi t, t, 3 8064 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 8065 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8066 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 8067 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8068 } 8069 } 8070 8071 return SDValue(); 8072 } 8073 8074 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 8075 /// the specified operations to build the shuffle. 8076 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 8077 SDValue RHS, SelectionDAG &DAG, 8078 const SDLoc &dl) { 8079 unsigned OpNum = (PFEntry >> 26) & 0x0F; 8080 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8081 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8082 8083 enum { 8084 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 8085 OP_VMRGHW, 8086 OP_VMRGLW, 8087 OP_VSPLTISW0, 8088 OP_VSPLTISW1, 8089 OP_VSPLTISW2, 8090 OP_VSPLTISW3, 8091 OP_VSLDOI4, 8092 OP_VSLDOI8, 8093 OP_VSLDOI12 8094 }; 8095 8096 if (OpNum == OP_COPY) { 8097 if (LHSID == (1*9+2)*9+3) return LHS; 8098 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 8099 return RHS; 8100 } 8101 8102 SDValue OpLHS, OpRHS; 8103 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 8104 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 8105 8106 int ShufIdxs[16]; 8107 switch (OpNum) { 8108 default: llvm_unreachable("Unknown i32 permute!"); 8109 case OP_VMRGHW: 8110 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 8111 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 8112 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 8113 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 8114 break; 8115 case OP_VMRGLW: 8116 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 8117 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 8118 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 8119 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 8120 break; 8121 case OP_VSPLTISW0: 8122 for (unsigned i = 0; i != 16; ++i) 8123 ShufIdxs[i] = (i&3)+0; 8124 break; 8125 case OP_VSPLTISW1: 8126 for (unsigned i = 0; i != 16; ++i) 8127 ShufIdxs[i] = (i&3)+4; 8128 break; 8129 case OP_VSPLTISW2: 8130 for (unsigned i = 0; i != 16; ++i) 8131 ShufIdxs[i] = (i&3)+8; 8132 break; 8133 case OP_VSPLTISW3: 8134 for (unsigned i = 0; i != 16; ++i) 8135 ShufIdxs[i] = (i&3)+12; 8136 break; 8137 case OP_VSLDOI4: 8138 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 8139 case OP_VSLDOI8: 8140 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 8141 case OP_VSLDOI12: 8142 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 8143 } 8144 EVT VT = OpLHS.getValueType(); 8145 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 8146 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 8147 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 8148 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8149 } 8150 8151 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 8152 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 8153 /// SDValue. 8154 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 8155 SelectionDAG &DAG) const { 8156 const unsigned BytesInVector = 16; 8157 bool IsLE = Subtarget.isLittleEndian(); 8158 SDLoc dl(N); 8159 SDValue V1 = N->getOperand(0); 8160 SDValue V2 = N->getOperand(1); 8161 unsigned ShiftElts = 0, InsertAtByte = 0; 8162 bool Swap = false; 8163 8164 // Shifts required to get the byte we want at element 7. 8165 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 8166 0, 15, 14, 13, 12, 11, 10, 9}; 8167 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 8168 1, 2, 3, 4, 5, 6, 7, 8}; 8169 8170 ArrayRef<int> Mask = N->getMask(); 8171 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 8172 8173 // For each mask element, find out if we're just inserting something 8174 // from V2 into V1 or vice versa. 8175 // Possible permutations inserting an element from V2 into V1: 8176 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8177 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8178 // ... 8179 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 8180 // Inserting from V1 into V2 will be similar, except mask range will be 8181 // [16,31]. 8182 8183 bool FoundCandidate = false; 8184 // If both vector operands for the shuffle are the same vector, the mask 8185 // will contain only elements from the first one and the second one will be 8186 // undef. 8187 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 8188 // Go through the mask of half-words to find an element that's being moved 8189 // from one vector to the other. 8190 for (unsigned i = 0; i < BytesInVector; ++i) { 8191 unsigned CurrentElement = Mask[i]; 8192 // If 2nd operand is undefined, we should only look for element 7 in the 8193 // Mask. 8194 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 8195 continue; 8196 8197 bool OtherElementsInOrder = true; 8198 // Examine the other elements in the Mask to see if they're in original 8199 // order. 8200 for (unsigned j = 0; j < BytesInVector; ++j) { 8201 if (j == i) 8202 continue; 8203 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 8204 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 8205 // in which we always assume we're always picking from the 1st operand. 8206 int MaskOffset = 8207 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 8208 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 8209 OtherElementsInOrder = false; 8210 break; 8211 } 8212 } 8213 // If other elements are in original order, we record the number of shifts 8214 // we need to get the element we want into element 7. Also record which byte 8215 // in the vector we should insert into. 8216 if (OtherElementsInOrder) { 8217 // If 2nd operand is undefined, we assume no shifts and no swapping. 8218 if (V2.isUndef()) { 8219 ShiftElts = 0; 8220 Swap = false; 8221 } else { 8222 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 8223 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 8224 : BigEndianShifts[CurrentElement & 0xF]; 8225 Swap = CurrentElement < BytesInVector; 8226 } 8227 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 8228 FoundCandidate = true; 8229 break; 8230 } 8231 } 8232 8233 if (!FoundCandidate) 8234 return SDValue(); 8235 8236 // Candidate found, construct the proper SDAG sequence with VINSERTB, 8237 // optionally with VECSHL if shift is required. 8238 if (Swap) 8239 std::swap(V1, V2); 8240 if (V2.isUndef()) 8241 V2 = V1; 8242 if (ShiftElts) { 8243 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8244 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8245 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 8246 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8247 } 8248 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 8249 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8250 } 8251 8252 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 8253 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 8254 /// SDValue. 8255 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 8256 SelectionDAG &DAG) const { 8257 const unsigned NumHalfWords = 8; 8258 const unsigned BytesInVector = NumHalfWords * 2; 8259 // Check that the shuffle is on half-words. 8260 if (!isNByteElemShuffleMask(N, 2, 1)) 8261 return SDValue(); 8262 8263 bool IsLE = Subtarget.isLittleEndian(); 8264 SDLoc dl(N); 8265 SDValue V1 = N->getOperand(0); 8266 SDValue V2 = N->getOperand(1); 8267 unsigned ShiftElts = 0, InsertAtByte = 0; 8268 bool Swap = false; 8269 8270 // Shifts required to get the half-word we want at element 3. 8271 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 8272 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 8273 8274 uint32_t Mask = 0; 8275 uint32_t OriginalOrderLow = 0x1234567; 8276 uint32_t OriginalOrderHigh = 0x89ABCDEF; 8277 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 8278 // 32-bit space, only need 4-bit nibbles per element. 8279 for (unsigned i = 0; i < NumHalfWords; ++i) { 8280 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8281 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 8282 } 8283 8284 // For each mask element, find out if we're just inserting something 8285 // from V2 into V1 or vice versa. Possible permutations inserting an element 8286 // from V2 into V1: 8287 // X, 1, 2, 3, 4, 5, 6, 7 8288 // 0, X, 2, 3, 4, 5, 6, 7 8289 // 0, 1, X, 3, 4, 5, 6, 7 8290 // 0, 1, 2, X, 4, 5, 6, 7 8291 // 0, 1, 2, 3, X, 5, 6, 7 8292 // 0, 1, 2, 3, 4, X, 6, 7 8293 // 0, 1, 2, 3, 4, 5, X, 7 8294 // 0, 1, 2, 3, 4, 5, 6, X 8295 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 8296 8297 bool FoundCandidate = false; 8298 // Go through the mask of half-words to find an element that's being moved 8299 // from one vector to the other. 8300 for (unsigned i = 0; i < NumHalfWords; ++i) { 8301 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8302 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 8303 uint32_t MaskOtherElts = ~(0xF << MaskShift); 8304 uint32_t TargetOrder = 0x0; 8305 8306 // If both vector operands for the shuffle are the same vector, the mask 8307 // will contain only elements from the first one and the second one will be 8308 // undef. 8309 if (V2.isUndef()) { 8310 ShiftElts = 0; 8311 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 8312 TargetOrder = OriginalOrderLow; 8313 Swap = false; 8314 // Skip if not the correct element or mask of other elements don't equal 8315 // to our expected order. 8316 if (MaskOneElt == VINSERTHSrcElem && 8317 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8318 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8319 FoundCandidate = true; 8320 break; 8321 } 8322 } else { // If both operands are defined. 8323 // Target order is [8,15] if the current mask is between [0,7]. 8324 TargetOrder = 8325 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 8326 // Skip if mask of other elements don't equal our expected order. 8327 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8328 // We only need the last 3 bits for the number of shifts. 8329 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 8330 : BigEndianShifts[MaskOneElt & 0x7]; 8331 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8332 Swap = MaskOneElt < NumHalfWords; 8333 FoundCandidate = true; 8334 break; 8335 } 8336 } 8337 } 8338 8339 if (!FoundCandidate) 8340 return SDValue(); 8341 8342 // Candidate found, construct the proper SDAG sequence with VINSERTH, 8343 // optionally with VECSHL if shift is required. 8344 if (Swap) 8345 std::swap(V1, V2); 8346 if (V2.isUndef()) 8347 V2 = V1; 8348 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8349 if (ShiftElts) { 8350 // Double ShiftElts because we're left shifting on v16i8 type. 8351 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8352 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 8353 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 8354 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8355 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8356 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8357 } 8358 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 8359 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8360 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8361 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8362 } 8363 8364 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 8365 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 8366 /// return the code it can be lowered into. Worst case, it can always be 8367 /// lowered into a vperm. 8368 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 8369 SelectionDAG &DAG) const { 8370 SDLoc dl(Op); 8371 SDValue V1 = Op.getOperand(0); 8372 SDValue V2 = Op.getOperand(1); 8373 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 8374 EVT VT = Op.getValueType(); 8375 bool isLittleEndian = Subtarget.isLittleEndian(); 8376 8377 unsigned ShiftElts, InsertAtByte; 8378 bool Swap = false; 8379 if (Subtarget.hasP9Vector() && 8380 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 8381 isLittleEndian)) { 8382 if (Swap) 8383 std::swap(V1, V2); 8384 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8385 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 8386 if (ShiftElts) { 8387 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 8388 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8389 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 8390 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8391 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8392 } 8393 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 8394 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8395 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8396 } 8397 8398 if (Subtarget.hasP9Altivec()) { 8399 SDValue NewISDNode; 8400 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 8401 return NewISDNode; 8402 8403 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 8404 return NewISDNode; 8405 } 8406 8407 if (Subtarget.hasVSX() && 8408 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8409 if (Swap) 8410 std::swap(V1, V2); 8411 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8412 SDValue Conv2 = 8413 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 8414 8415 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 8416 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8417 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 8418 } 8419 8420 if (Subtarget.hasVSX() && 8421 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8422 if (Swap) 8423 std::swap(V1, V2); 8424 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8425 SDValue Conv2 = 8426 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 8427 8428 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 8429 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8430 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 8431 } 8432 8433 if (Subtarget.hasP9Vector()) { 8434 if (PPC::isXXBRHShuffleMask(SVOp)) { 8435 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8436 SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv); 8437 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 8438 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 8439 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8440 SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv); 8441 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 8442 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 8443 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8444 SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv); 8445 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 8446 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 8447 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 8448 SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv); 8449 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 8450 } 8451 } 8452 8453 if (Subtarget.hasVSX()) { 8454 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 8455 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 8456 8457 // If the source for the shuffle is a scalar_to_vector that came from a 8458 // 32-bit load, it will have used LXVWSX so we don't need to splat again. 8459 if (Subtarget.hasP9Vector() && 8460 ((isLittleEndian && SplatIdx == 3) || 8461 (!isLittleEndian && SplatIdx == 0))) { 8462 SDValue Src = V1.getOperand(0); 8463 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR && 8464 Src.getOperand(0).getOpcode() == ISD::LOAD && 8465 Src.getOperand(0).hasOneUse()) 8466 return V1; 8467 } 8468 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8469 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 8470 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8471 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 8472 } 8473 8474 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 8475 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 8476 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 8477 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 8478 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 8479 } 8480 } 8481 8482 if (Subtarget.hasQPX()) { 8483 if (VT.getVectorNumElements() != 4) 8484 return SDValue(); 8485 8486 if (V2.isUndef()) V2 = V1; 8487 8488 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 8489 if (AlignIdx != -1) { 8490 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 8491 DAG.getConstant(AlignIdx, dl, MVT::i32)); 8492 } else if (SVOp->isSplat()) { 8493 int SplatIdx = SVOp->getSplatIndex(); 8494 if (SplatIdx >= 4) { 8495 std::swap(V1, V2); 8496 SplatIdx -= 4; 8497 } 8498 8499 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 8500 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8501 } 8502 8503 // Lower this into a qvgpci/qvfperm pair. 8504 8505 // Compute the qvgpci literal 8506 unsigned idx = 0; 8507 for (unsigned i = 0; i < 4; ++i) { 8508 int m = SVOp->getMaskElt(i); 8509 unsigned mm = m >= 0 ? (unsigned) m : i; 8510 idx |= mm << (3-i)*3; 8511 } 8512 8513 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 8514 DAG.getConstant(idx, dl, MVT::i32)); 8515 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 8516 } 8517 8518 // Cases that are handled by instructions that take permute immediates 8519 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 8520 // selected by the instruction selector. 8521 if (V2.isUndef()) { 8522 if (PPC::isSplatShuffleMask(SVOp, 1) || 8523 PPC::isSplatShuffleMask(SVOp, 2) || 8524 PPC::isSplatShuffleMask(SVOp, 4) || 8525 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 8526 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 8527 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 8528 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 8529 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 8530 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 8531 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 8532 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 8533 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 8534 (Subtarget.hasP8Altivec() && ( 8535 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 8536 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 8537 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 8538 return Op; 8539 } 8540 } 8541 8542 // Altivec has a variety of "shuffle immediates" that take two vector inputs 8543 // and produce a fixed permutation. If any of these match, do not lower to 8544 // VPERM. 8545 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 8546 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 8547 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 8548 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 8549 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8550 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8551 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8552 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8553 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8554 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8555 (Subtarget.hasP8Altivec() && ( 8556 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 8557 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 8558 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 8559 return Op; 8560 8561 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 8562 // perfect shuffle table to emit an optimal matching sequence. 8563 ArrayRef<int> PermMask = SVOp->getMask(); 8564 8565 unsigned PFIndexes[4]; 8566 bool isFourElementShuffle = true; 8567 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 8568 unsigned EltNo = 8; // Start out undef. 8569 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 8570 if (PermMask[i*4+j] < 0) 8571 continue; // Undef, ignore it. 8572 8573 unsigned ByteSource = PermMask[i*4+j]; 8574 if ((ByteSource & 3) != j) { 8575 isFourElementShuffle = false; 8576 break; 8577 } 8578 8579 if (EltNo == 8) { 8580 EltNo = ByteSource/4; 8581 } else if (EltNo != ByteSource/4) { 8582 isFourElementShuffle = false; 8583 break; 8584 } 8585 } 8586 PFIndexes[i] = EltNo; 8587 } 8588 8589 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 8590 // perfect shuffle vector to determine if it is cost effective to do this as 8591 // discrete instructions, or whether we should use a vperm. 8592 // For now, we skip this for little endian until such time as we have a 8593 // little-endian perfect shuffle table. 8594 if (isFourElementShuffle && !isLittleEndian) { 8595 // Compute the index in the perfect shuffle table. 8596 unsigned PFTableIndex = 8597 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 8598 8599 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 8600 unsigned Cost = (PFEntry >> 30); 8601 8602 // Determining when to avoid vperm is tricky. Many things affect the cost 8603 // of vperm, particularly how many times the perm mask needs to be computed. 8604 // For example, if the perm mask can be hoisted out of a loop or is already 8605 // used (perhaps because there are multiple permutes with the same shuffle 8606 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 8607 // the loop requires an extra register. 8608 // 8609 // As a compromise, we only emit discrete instructions if the shuffle can be 8610 // generated in 3 or fewer operations. When we have loop information 8611 // available, if this block is within a loop, we should avoid using vperm 8612 // for 3-operation perms and use a constant pool load instead. 8613 if (Cost < 3) 8614 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8615 } 8616 8617 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 8618 // vector that will get spilled to the constant pool. 8619 if (V2.isUndef()) V2 = V1; 8620 8621 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 8622 // that it is in input element units, not in bytes. Convert now. 8623 8624 // For little endian, the order of the input vectors is reversed, and 8625 // the permutation mask is complemented with respect to 31. This is 8626 // necessary to produce proper semantics with the big-endian-biased vperm 8627 // instruction. 8628 EVT EltVT = V1.getValueType().getVectorElementType(); 8629 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 8630 8631 SmallVector<SDValue, 16> ResultMask; 8632 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 8633 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 8634 8635 for (unsigned j = 0; j != BytesPerElement; ++j) 8636 if (isLittleEndian) 8637 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 8638 dl, MVT::i32)); 8639 else 8640 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 8641 MVT::i32)); 8642 } 8643 8644 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 8645 if (isLittleEndian) 8646 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8647 V2, V1, VPermMask); 8648 else 8649 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8650 V1, V2, VPermMask); 8651 } 8652 8653 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 8654 /// vector comparison. If it is, return true and fill in Opc/isDot with 8655 /// information about the intrinsic. 8656 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 8657 bool &isDot, const PPCSubtarget &Subtarget) { 8658 unsigned IntrinsicID = 8659 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 8660 CompareOpc = -1; 8661 isDot = false; 8662 switch (IntrinsicID) { 8663 default: 8664 return false; 8665 // Comparison predicates. 8666 case Intrinsic::ppc_altivec_vcmpbfp_p: 8667 CompareOpc = 966; 8668 isDot = true; 8669 break; 8670 case Intrinsic::ppc_altivec_vcmpeqfp_p: 8671 CompareOpc = 198; 8672 isDot = true; 8673 break; 8674 case Intrinsic::ppc_altivec_vcmpequb_p: 8675 CompareOpc = 6; 8676 isDot = true; 8677 break; 8678 case Intrinsic::ppc_altivec_vcmpequh_p: 8679 CompareOpc = 70; 8680 isDot = true; 8681 break; 8682 case Intrinsic::ppc_altivec_vcmpequw_p: 8683 CompareOpc = 134; 8684 isDot = true; 8685 break; 8686 case Intrinsic::ppc_altivec_vcmpequd_p: 8687 if (Subtarget.hasP8Altivec()) { 8688 CompareOpc = 199; 8689 isDot = true; 8690 } else 8691 return false; 8692 break; 8693 case Intrinsic::ppc_altivec_vcmpneb_p: 8694 case Intrinsic::ppc_altivec_vcmpneh_p: 8695 case Intrinsic::ppc_altivec_vcmpnew_p: 8696 case Intrinsic::ppc_altivec_vcmpnezb_p: 8697 case Intrinsic::ppc_altivec_vcmpnezh_p: 8698 case Intrinsic::ppc_altivec_vcmpnezw_p: 8699 if (Subtarget.hasP9Altivec()) { 8700 switch (IntrinsicID) { 8701 default: 8702 llvm_unreachable("Unknown comparison intrinsic."); 8703 case Intrinsic::ppc_altivec_vcmpneb_p: 8704 CompareOpc = 7; 8705 break; 8706 case Intrinsic::ppc_altivec_vcmpneh_p: 8707 CompareOpc = 71; 8708 break; 8709 case Intrinsic::ppc_altivec_vcmpnew_p: 8710 CompareOpc = 135; 8711 break; 8712 case Intrinsic::ppc_altivec_vcmpnezb_p: 8713 CompareOpc = 263; 8714 break; 8715 case Intrinsic::ppc_altivec_vcmpnezh_p: 8716 CompareOpc = 327; 8717 break; 8718 case Intrinsic::ppc_altivec_vcmpnezw_p: 8719 CompareOpc = 391; 8720 break; 8721 } 8722 isDot = true; 8723 } else 8724 return false; 8725 break; 8726 case Intrinsic::ppc_altivec_vcmpgefp_p: 8727 CompareOpc = 454; 8728 isDot = true; 8729 break; 8730 case Intrinsic::ppc_altivec_vcmpgtfp_p: 8731 CompareOpc = 710; 8732 isDot = true; 8733 break; 8734 case Intrinsic::ppc_altivec_vcmpgtsb_p: 8735 CompareOpc = 774; 8736 isDot = true; 8737 break; 8738 case Intrinsic::ppc_altivec_vcmpgtsh_p: 8739 CompareOpc = 838; 8740 isDot = true; 8741 break; 8742 case Intrinsic::ppc_altivec_vcmpgtsw_p: 8743 CompareOpc = 902; 8744 isDot = true; 8745 break; 8746 case Intrinsic::ppc_altivec_vcmpgtsd_p: 8747 if (Subtarget.hasP8Altivec()) { 8748 CompareOpc = 967; 8749 isDot = true; 8750 } else 8751 return false; 8752 break; 8753 case Intrinsic::ppc_altivec_vcmpgtub_p: 8754 CompareOpc = 518; 8755 isDot = true; 8756 break; 8757 case Intrinsic::ppc_altivec_vcmpgtuh_p: 8758 CompareOpc = 582; 8759 isDot = true; 8760 break; 8761 case Intrinsic::ppc_altivec_vcmpgtuw_p: 8762 CompareOpc = 646; 8763 isDot = true; 8764 break; 8765 case Intrinsic::ppc_altivec_vcmpgtud_p: 8766 if (Subtarget.hasP8Altivec()) { 8767 CompareOpc = 711; 8768 isDot = true; 8769 } else 8770 return false; 8771 break; 8772 8773 // VSX predicate comparisons use the same infrastructure 8774 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8775 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8776 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8777 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8778 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8779 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8780 if (Subtarget.hasVSX()) { 8781 switch (IntrinsicID) { 8782 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8783 CompareOpc = 99; 8784 break; 8785 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8786 CompareOpc = 115; 8787 break; 8788 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8789 CompareOpc = 107; 8790 break; 8791 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8792 CompareOpc = 67; 8793 break; 8794 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8795 CompareOpc = 83; 8796 break; 8797 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8798 CompareOpc = 75; 8799 break; 8800 } 8801 isDot = true; 8802 } else 8803 return false; 8804 break; 8805 8806 // Normal Comparisons. 8807 case Intrinsic::ppc_altivec_vcmpbfp: 8808 CompareOpc = 966; 8809 break; 8810 case Intrinsic::ppc_altivec_vcmpeqfp: 8811 CompareOpc = 198; 8812 break; 8813 case Intrinsic::ppc_altivec_vcmpequb: 8814 CompareOpc = 6; 8815 break; 8816 case Intrinsic::ppc_altivec_vcmpequh: 8817 CompareOpc = 70; 8818 break; 8819 case Intrinsic::ppc_altivec_vcmpequw: 8820 CompareOpc = 134; 8821 break; 8822 case Intrinsic::ppc_altivec_vcmpequd: 8823 if (Subtarget.hasP8Altivec()) 8824 CompareOpc = 199; 8825 else 8826 return false; 8827 break; 8828 case Intrinsic::ppc_altivec_vcmpneb: 8829 case Intrinsic::ppc_altivec_vcmpneh: 8830 case Intrinsic::ppc_altivec_vcmpnew: 8831 case Intrinsic::ppc_altivec_vcmpnezb: 8832 case Intrinsic::ppc_altivec_vcmpnezh: 8833 case Intrinsic::ppc_altivec_vcmpnezw: 8834 if (Subtarget.hasP9Altivec()) 8835 switch (IntrinsicID) { 8836 default: 8837 llvm_unreachable("Unknown comparison intrinsic."); 8838 case Intrinsic::ppc_altivec_vcmpneb: 8839 CompareOpc = 7; 8840 break; 8841 case Intrinsic::ppc_altivec_vcmpneh: 8842 CompareOpc = 71; 8843 break; 8844 case Intrinsic::ppc_altivec_vcmpnew: 8845 CompareOpc = 135; 8846 break; 8847 case Intrinsic::ppc_altivec_vcmpnezb: 8848 CompareOpc = 263; 8849 break; 8850 case Intrinsic::ppc_altivec_vcmpnezh: 8851 CompareOpc = 327; 8852 break; 8853 case Intrinsic::ppc_altivec_vcmpnezw: 8854 CompareOpc = 391; 8855 break; 8856 } 8857 else 8858 return false; 8859 break; 8860 case Intrinsic::ppc_altivec_vcmpgefp: 8861 CompareOpc = 454; 8862 break; 8863 case Intrinsic::ppc_altivec_vcmpgtfp: 8864 CompareOpc = 710; 8865 break; 8866 case Intrinsic::ppc_altivec_vcmpgtsb: 8867 CompareOpc = 774; 8868 break; 8869 case Intrinsic::ppc_altivec_vcmpgtsh: 8870 CompareOpc = 838; 8871 break; 8872 case Intrinsic::ppc_altivec_vcmpgtsw: 8873 CompareOpc = 902; 8874 break; 8875 case Intrinsic::ppc_altivec_vcmpgtsd: 8876 if (Subtarget.hasP8Altivec()) 8877 CompareOpc = 967; 8878 else 8879 return false; 8880 break; 8881 case Intrinsic::ppc_altivec_vcmpgtub: 8882 CompareOpc = 518; 8883 break; 8884 case Intrinsic::ppc_altivec_vcmpgtuh: 8885 CompareOpc = 582; 8886 break; 8887 case Intrinsic::ppc_altivec_vcmpgtuw: 8888 CompareOpc = 646; 8889 break; 8890 case Intrinsic::ppc_altivec_vcmpgtud: 8891 if (Subtarget.hasP8Altivec()) 8892 CompareOpc = 711; 8893 else 8894 return false; 8895 break; 8896 } 8897 return true; 8898 } 8899 8900 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 8901 /// lower, do it, otherwise return null. 8902 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 8903 SelectionDAG &DAG) const { 8904 unsigned IntrinsicID = 8905 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8906 8907 SDLoc dl(Op); 8908 8909 if (IntrinsicID == Intrinsic::thread_pointer) { 8910 // Reads the thread pointer register, used for __builtin_thread_pointer. 8911 if (Subtarget.isPPC64()) 8912 return DAG.getRegister(PPC::X13, MVT::i64); 8913 return DAG.getRegister(PPC::R2, MVT::i32); 8914 } 8915 8916 // We are looking for absolute values here. 8917 // The idea is to try to fit one of two patterns: 8918 // max (a, (0-a)) OR max ((0-a), a) 8919 if (Subtarget.hasP9Vector() && 8920 (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw || 8921 IntrinsicID == Intrinsic::ppc_altivec_vmaxsh || 8922 IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) { 8923 SDValue V1 = Op.getOperand(1); 8924 SDValue V2 = Op.getOperand(2); 8925 if (V1.getSimpleValueType() == V2.getSimpleValueType() && 8926 (V1.getSimpleValueType() == MVT::v4i32 || 8927 V1.getSimpleValueType() == MVT::v8i16 || 8928 V1.getSimpleValueType() == MVT::v16i8)) { 8929 if ( V1.getOpcode() == ISD::SUB && 8930 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 8931 V1.getOperand(1) == V2 ) { 8932 // Generate the abs instruction with the operands 8933 return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2); 8934 } 8935 8936 if ( V2.getOpcode() == ISD::SUB && 8937 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 8938 V2.getOperand(1) == V1 ) { 8939 // Generate the abs instruction with the operands 8940 return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1); 8941 } 8942 } 8943 } 8944 8945 // If this is a lowered altivec predicate compare, CompareOpc is set to the 8946 // opcode number of the comparison. 8947 int CompareOpc; 8948 bool isDot; 8949 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 8950 return SDValue(); // Don't custom lower most intrinsics. 8951 8952 // If this is a non-dot comparison, make the VCMP node and we are done. 8953 if (!isDot) { 8954 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 8955 Op.getOperand(1), Op.getOperand(2), 8956 DAG.getConstant(CompareOpc, dl, MVT::i32)); 8957 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 8958 } 8959 8960 // Create the PPCISD altivec 'dot' comparison node. 8961 SDValue Ops[] = { 8962 Op.getOperand(2), // LHS 8963 Op.getOperand(3), // RHS 8964 DAG.getConstant(CompareOpc, dl, MVT::i32) 8965 }; 8966 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 8967 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 8968 8969 // Now that we have the comparison, emit a copy from the CR to a GPR. 8970 // This is flagged to the above dot comparison. 8971 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 8972 DAG.getRegister(PPC::CR6, MVT::i32), 8973 CompNode.getValue(1)); 8974 8975 // Unpack the result based on how the target uses it. 8976 unsigned BitNo; // Bit # of CR6. 8977 bool InvertBit; // Invert result? 8978 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 8979 default: // Can't happen, don't crash on invalid number though. 8980 case 0: // Return the value of the EQ bit of CR6. 8981 BitNo = 0; InvertBit = false; 8982 break; 8983 case 1: // Return the inverted value of the EQ bit of CR6. 8984 BitNo = 0; InvertBit = true; 8985 break; 8986 case 2: // Return the value of the LT bit of CR6. 8987 BitNo = 2; InvertBit = false; 8988 break; 8989 case 3: // Return the inverted value of the LT bit of CR6. 8990 BitNo = 2; InvertBit = true; 8991 break; 8992 } 8993 8994 // Shift the bit into the low position. 8995 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 8996 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 8997 // Isolate the bit. 8998 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 8999 DAG.getConstant(1, dl, MVT::i32)); 9000 9001 // If we are supposed to, toggle the bit. 9002 if (InvertBit) 9003 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 9004 DAG.getConstant(1, dl, MVT::i32)); 9005 return Flags; 9006 } 9007 9008 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 9009 SelectionDAG &DAG) const { 9010 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 9011 // the beginning of the argument list. 9012 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 9013 SDLoc DL(Op); 9014 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 9015 case Intrinsic::ppc_cfence: { 9016 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 9017 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 9018 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 9019 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 9020 Op.getOperand(ArgStart + 1)), 9021 Op.getOperand(0)), 9022 0); 9023 } 9024 default: 9025 break; 9026 } 9027 return SDValue(); 9028 } 9029 9030 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 9031 // Check for a DIV with the same operands as this REM. 9032 for (auto UI : Op.getOperand(1)->uses()) { 9033 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 9034 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 9035 if (UI->getOperand(0) == Op.getOperand(0) && 9036 UI->getOperand(1) == Op.getOperand(1)) 9037 return SDValue(); 9038 } 9039 return Op; 9040 } 9041 9042 // Lower scalar BSWAP64 to xxbrd. 9043 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 9044 SDLoc dl(Op); 9045 // MTVSRDD 9046 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 9047 Op.getOperand(0)); 9048 // XXBRD 9049 Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op); 9050 // MFVSRD 9051 int VectorIndex = 0; 9052 if (Subtarget.isLittleEndian()) 9053 VectorIndex = 1; 9054 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 9055 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 9056 return Op; 9057 } 9058 9059 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 9060 // compared to a value that is atomically loaded (atomic loads zero-extend). 9061 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 9062 SelectionDAG &DAG) const { 9063 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 9064 "Expecting an atomic compare-and-swap here."); 9065 SDLoc dl(Op); 9066 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 9067 EVT MemVT = AtomicNode->getMemoryVT(); 9068 if (MemVT.getSizeInBits() >= 32) 9069 return Op; 9070 9071 SDValue CmpOp = Op.getOperand(2); 9072 // If this is already correctly zero-extended, leave it alone. 9073 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 9074 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 9075 return Op; 9076 9077 // Clear the high bits of the compare operand. 9078 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 9079 SDValue NewCmpOp = 9080 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 9081 DAG.getConstant(MaskVal, dl, MVT::i32)); 9082 9083 // Replace the existing compare operand with the properly zero-extended one. 9084 SmallVector<SDValue, 4> Ops; 9085 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 9086 Ops.push_back(AtomicNode->getOperand(i)); 9087 Ops[2] = NewCmpOp; 9088 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 9089 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 9090 auto NodeTy = 9091 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 9092 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 9093 } 9094 9095 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 9096 SelectionDAG &DAG) const { 9097 SDLoc dl(Op); 9098 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 9099 // instructions), but for smaller types, we need to first extend up to v2i32 9100 // before doing going farther. 9101 if (Op.getValueType() == MVT::v2i64) { 9102 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 9103 if (ExtVT != MVT::v2i32) { 9104 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 9105 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 9106 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 9107 ExtVT.getVectorElementType(), 4))); 9108 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 9109 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 9110 DAG.getValueType(MVT::v2i32)); 9111 } 9112 9113 return Op; 9114 } 9115 9116 return SDValue(); 9117 } 9118 9119 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 9120 SelectionDAG &DAG) const { 9121 SDLoc dl(Op); 9122 // Create a stack slot that is 16-byte aligned. 9123 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9124 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9125 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9126 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9127 9128 // Store the input value into Value#0 of the stack slot. 9129 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 9130 MachinePointerInfo()); 9131 // Load it out. 9132 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 9133 } 9134 9135 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 9136 SelectionDAG &DAG) const { 9137 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 9138 "Should only be called for ISD::INSERT_VECTOR_ELT"); 9139 9140 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 9141 // We have legal lowering for constant indices but not for variable ones. 9142 if (!C) 9143 return SDValue(); 9144 9145 EVT VT = Op.getValueType(); 9146 SDLoc dl(Op); 9147 SDValue V1 = Op.getOperand(0); 9148 SDValue V2 = Op.getOperand(1); 9149 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 9150 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 9151 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 9152 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 9153 unsigned InsertAtElement = C->getZExtValue(); 9154 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 9155 if (Subtarget.isLittleEndian()) { 9156 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 9157 } 9158 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 9159 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9160 } 9161 return Op; 9162 } 9163 9164 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 9165 SelectionDAG &DAG) const { 9166 SDLoc dl(Op); 9167 SDNode *N = Op.getNode(); 9168 9169 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 9170 "Unknown extract_vector_elt type"); 9171 9172 SDValue Value = N->getOperand(0); 9173 9174 // The first part of this is like the store lowering except that we don't 9175 // need to track the chain. 9176 9177 // The values are now known to be -1 (false) or 1 (true). To convert this 9178 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9179 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9180 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9181 9182 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9183 // understand how to form the extending load. 9184 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9185 9186 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9187 9188 // Now convert to an integer and store. 9189 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9190 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9191 Value); 9192 9193 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9194 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9195 MachinePointerInfo PtrInfo = 9196 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9197 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9198 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9199 9200 SDValue StoreChain = DAG.getEntryNode(); 9201 SDValue Ops[] = {StoreChain, 9202 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9203 Value, FIdx}; 9204 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9205 9206 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9207 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9208 9209 // Extract the value requested. 9210 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9211 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9212 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9213 9214 SDValue IntVal = 9215 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 9216 9217 if (!Subtarget.useCRBits()) 9218 return IntVal; 9219 9220 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 9221 } 9222 9223 /// Lowering for QPX v4i1 loads 9224 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 9225 SelectionDAG &DAG) const { 9226 SDLoc dl(Op); 9227 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 9228 SDValue LoadChain = LN->getChain(); 9229 SDValue BasePtr = LN->getBasePtr(); 9230 9231 if (Op.getValueType() == MVT::v4f64 || 9232 Op.getValueType() == MVT::v4f32) { 9233 EVT MemVT = LN->getMemoryVT(); 9234 unsigned Alignment = LN->getAlignment(); 9235 9236 // If this load is properly aligned, then it is legal. 9237 if (Alignment >= MemVT.getStoreSize()) 9238 return Op; 9239 9240 EVT ScalarVT = Op.getValueType().getScalarType(), 9241 ScalarMemVT = MemVT.getScalarType(); 9242 unsigned Stride = ScalarMemVT.getStoreSize(); 9243 9244 SDValue Vals[4], LoadChains[4]; 9245 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9246 SDValue Load; 9247 if (ScalarVT != ScalarMemVT) 9248 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 9249 BasePtr, 9250 LN->getPointerInfo().getWithOffset(Idx * Stride), 9251 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9252 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9253 else 9254 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 9255 LN->getPointerInfo().getWithOffset(Idx * Stride), 9256 MinAlign(Alignment, Idx * Stride), 9257 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9258 9259 if (Idx == 0 && LN->isIndexed()) { 9260 assert(LN->getAddressingMode() == ISD::PRE_INC && 9261 "Unknown addressing mode on vector load"); 9262 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 9263 LN->getAddressingMode()); 9264 } 9265 9266 Vals[Idx] = Load; 9267 LoadChains[Idx] = Load.getValue(1); 9268 9269 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9270 DAG.getConstant(Stride, dl, 9271 BasePtr.getValueType())); 9272 } 9273 9274 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9275 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 9276 9277 if (LN->isIndexed()) { 9278 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 9279 return DAG.getMergeValues(RetOps, dl); 9280 } 9281 9282 SDValue RetOps[] = { Value, TF }; 9283 return DAG.getMergeValues(RetOps, dl); 9284 } 9285 9286 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 9287 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 9288 9289 // To lower v4i1 from a byte array, we load the byte elements of the 9290 // vector and then reuse the BUILD_VECTOR logic. 9291 9292 SDValue VectElmts[4], VectElmtChains[4]; 9293 for (unsigned i = 0; i < 4; ++i) { 9294 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9295 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9296 9297 VectElmts[i] = DAG.getExtLoad( 9298 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 9299 LN->getPointerInfo().getWithOffset(i), MVT::i8, 9300 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9301 VectElmtChains[i] = VectElmts[i].getValue(1); 9302 } 9303 9304 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 9305 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 9306 9307 SDValue RVals[] = { Value, LoadChain }; 9308 return DAG.getMergeValues(RVals, dl); 9309 } 9310 9311 /// Lowering for QPX v4i1 stores 9312 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 9313 SelectionDAG &DAG) const { 9314 SDLoc dl(Op); 9315 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 9316 SDValue StoreChain = SN->getChain(); 9317 SDValue BasePtr = SN->getBasePtr(); 9318 SDValue Value = SN->getValue(); 9319 9320 if (Value.getValueType() == MVT::v4f64 || 9321 Value.getValueType() == MVT::v4f32) { 9322 EVT MemVT = SN->getMemoryVT(); 9323 unsigned Alignment = SN->getAlignment(); 9324 9325 // If this store is properly aligned, then it is legal. 9326 if (Alignment >= MemVT.getStoreSize()) 9327 return Op; 9328 9329 EVT ScalarVT = Value.getValueType().getScalarType(), 9330 ScalarMemVT = MemVT.getScalarType(); 9331 unsigned Stride = ScalarMemVT.getStoreSize(); 9332 9333 SDValue Stores[4]; 9334 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9335 SDValue Ex = DAG.getNode( 9336 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 9337 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 9338 SDValue Store; 9339 if (ScalarVT != ScalarMemVT) 9340 Store = 9341 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 9342 SN->getPointerInfo().getWithOffset(Idx * Stride), 9343 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9344 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9345 else 9346 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 9347 SN->getPointerInfo().getWithOffset(Idx * Stride), 9348 MinAlign(Alignment, Idx * Stride), 9349 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9350 9351 if (Idx == 0 && SN->isIndexed()) { 9352 assert(SN->getAddressingMode() == ISD::PRE_INC && 9353 "Unknown addressing mode on vector store"); 9354 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 9355 SN->getAddressingMode()); 9356 } 9357 9358 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9359 DAG.getConstant(Stride, dl, 9360 BasePtr.getValueType())); 9361 Stores[Idx] = Store; 9362 } 9363 9364 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9365 9366 if (SN->isIndexed()) { 9367 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 9368 return DAG.getMergeValues(RetOps, dl); 9369 } 9370 9371 return TF; 9372 } 9373 9374 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 9375 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 9376 9377 // The values are now known to be -1 (false) or 1 (true). To convert this 9378 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9379 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9380 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9381 9382 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9383 // understand how to form the extending load. 9384 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9385 9386 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9387 9388 // Now convert to an integer and store. 9389 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9390 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9391 Value); 9392 9393 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9394 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9395 MachinePointerInfo PtrInfo = 9396 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9397 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9398 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9399 9400 SDValue Ops[] = {StoreChain, 9401 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9402 Value, FIdx}; 9403 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9404 9405 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9406 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9407 9408 // Move data into the byte array. 9409 SDValue Loads[4], LoadChains[4]; 9410 for (unsigned i = 0; i < 4; ++i) { 9411 unsigned Offset = 4*i; 9412 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9413 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9414 9415 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 9416 PtrInfo.getWithOffset(Offset)); 9417 LoadChains[i] = Loads[i].getValue(1); 9418 } 9419 9420 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9421 9422 SDValue Stores[4]; 9423 for (unsigned i = 0; i < 4; ++i) { 9424 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9425 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9426 9427 Stores[i] = DAG.getTruncStore( 9428 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 9429 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 9430 SN->getAAInfo()); 9431 } 9432 9433 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9434 9435 return StoreChain; 9436 } 9437 9438 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 9439 SDLoc dl(Op); 9440 if (Op.getValueType() == MVT::v4i32) { 9441 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9442 9443 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 9444 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 9445 9446 SDValue RHSSwap = // = vrlw RHS, 16 9447 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 9448 9449 // Shrinkify inputs to v8i16. 9450 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 9451 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 9452 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 9453 9454 // Low parts multiplied together, generating 32-bit results (we ignore the 9455 // top parts). 9456 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 9457 LHS, RHS, DAG, dl, MVT::v4i32); 9458 9459 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 9460 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 9461 // Shift the high parts up 16 bits. 9462 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 9463 Neg16, DAG, dl); 9464 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 9465 } else if (Op.getValueType() == MVT::v8i16) { 9466 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9467 9468 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 9469 9470 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 9471 LHS, RHS, Zero, DAG, dl); 9472 } else if (Op.getValueType() == MVT::v16i8) { 9473 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9474 bool isLittleEndian = Subtarget.isLittleEndian(); 9475 9476 // Multiply the even 8-bit parts, producing 16-bit sums. 9477 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 9478 LHS, RHS, DAG, dl, MVT::v8i16); 9479 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 9480 9481 // Multiply the odd 8-bit parts, producing 16-bit sums. 9482 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 9483 LHS, RHS, DAG, dl, MVT::v8i16); 9484 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 9485 9486 // Merge the results together. Because vmuleub and vmuloub are 9487 // instructions with a big-endian bias, we must reverse the 9488 // element numbering and reverse the meaning of "odd" and "even" 9489 // when generating little endian code. 9490 int Ops[16]; 9491 for (unsigned i = 0; i != 8; ++i) { 9492 if (isLittleEndian) { 9493 Ops[i*2 ] = 2*i; 9494 Ops[i*2+1] = 2*i+16; 9495 } else { 9496 Ops[i*2 ] = 2*i+1; 9497 Ops[i*2+1] = 2*i+1+16; 9498 } 9499 } 9500 if (isLittleEndian) 9501 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 9502 else 9503 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 9504 } else { 9505 llvm_unreachable("Unknown mul to lower!"); 9506 } 9507 } 9508 9509 /// LowerOperation - Provide custom lowering hooks for some operations. 9510 /// 9511 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9512 switch (Op.getOpcode()) { 9513 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 9514 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 9515 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 9516 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 9517 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 9518 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 9519 case ISD::SETCC: return LowerSETCC(Op, DAG); 9520 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 9521 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 9522 9523 // Variable argument lowering. 9524 case ISD::VASTART: return LowerVASTART(Op, DAG); 9525 case ISD::VAARG: return LowerVAARG(Op, DAG); 9526 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 9527 9528 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 9529 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 9530 case ISD::GET_DYNAMIC_AREA_OFFSET: 9531 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 9532 9533 // Exception handling lowering. 9534 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 9535 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 9536 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 9537 9538 case ISD::LOAD: return LowerLOAD(Op, DAG); 9539 case ISD::STORE: return LowerSTORE(Op, DAG); 9540 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 9541 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 9542 case ISD::FP_TO_UINT: 9543 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 9544 case ISD::UINT_TO_FP: 9545 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 9546 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9547 9548 // Lower 64-bit shifts. 9549 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 9550 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 9551 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 9552 9553 // Vector-related lowering. 9554 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 9555 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 9556 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 9557 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 9558 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 9559 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 9560 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9561 case ISD::MUL: return LowerMUL(Op, DAG); 9562 9563 // For counter-based loop handling. 9564 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 9565 9566 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 9567 9568 // Frame & Return address. 9569 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9570 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9571 9572 case ISD::INTRINSIC_VOID: 9573 return LowerINTRINSIC_VOID(Op, DAG); 9574 case ISD::SREM: 9575 case ISD::UREM: 9576 return LowerREM(Op, DAG); 9577 case ISD::BSWAP: 9578 return LowerBSWAP(Op, DAG); 9579 case ISD::ATOMIC_CMP_SWAP: 9580 return LowerATOMIC_CMP_SWAP(Op, DAG); 9581 } 9582 } 9583 9584 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 9585 SmallVectorImpl<SDValue>&Results, 9586 SelectionDAG &DAG) const { 9587 SDLoc dl(N); 9588 switch (N->getOpcode()) { 9589 default: 9590 llvm_unreachable("Do not know how to custom type legalize this operation!"); 9591 case ISD::READCYCLECOUNTER: { 9592 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 9593 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 9594 9595 Results.push_back(RTB); 9596 Results.push_back(RTB.getValue(1)); 9597 Results.push_back(RTB.getValue(2)); 9598 break; 9599 } 9600 case ISD::INTRINSIC_W_CHAIN: { 9601 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 9602 Intrinsic::ppc_is_decremented_ctr_nonzero) 9603 break; 9604 9605 assert(N->getValueType(0) == MVT::i1 && 9606 "Unexpected result type for CTR decrement intrinsic"); 9607 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 9608 N->getValueType(0)); 9609 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 9610 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 9611 N->getOperand(1)); 9612 9613 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 9614 Results.push_back(NewInt.getValue(1)); 9615 break; 9616 } 9617 case ISD::VAARG: { 9618 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 9619 return; 9620 9621 EVT VT = N->getValueType(0); 9622 9623 if (VT == MVT::i64) { 9624 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 9625 9626 Results.push_back(NewNode); 9627 Results.push_back(NewNode.getValue(1)); 9628 } 9629 return; 9630 } 9631 case ISD::FP_TO_SINT: 9632 case ISD::FP_TO_UINT: 9633 // LowerFP_TO_INT() can only handle f32 and f64. 9634 if (N->getOperand(0).getValueType() == MVT::ppcf128) 9635 return; 9636 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 9637 return; 9638 } 9639 } 9640 9641 //===----------------------------------------------------------------------===// 9642 // Other Lowering Code 9643 //===----------------------------------------------------------------------===// 9644 9645 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 9646 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 9647 Function *Func = Intrinsic::getDeclaration(M, Id); 9648 return Builder.CreateCall(Func, {}); 9649 } 9650 9651 // The mappings for emitLeading/TrailingFence is taken from 9652 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 9653 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 9654 Instruction *Inst, 9655 AtomicOrdering Ord) const { 9656 if (Ord == AtomicOrdering::SequentiallyConsistent) 9657 return callIntrinsic(Builder, Intrinsic::ppc_sync); 9658 if (isReleaseOrStronger(Ord)) 9659 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9660 return nullptr; 9661 } 9662 9663 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 9664 Instruction *Inst, 9665 AtomicOrdering Ord) const { 9666 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 9667 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 9668 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 9669 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 9670 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 9671 return Builder.CreateCall( 9672 Intrinsic::getDeclaration( 9673 Builder.GetInsertBlock()->getParent()->getParent(), 9674 Intrinsic::ppc_cfence, {Inst->getType()}), 9675 {Inst}); 9676 // FIXME: Can use isync for rmw operation. 9677 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9678 } 9679 return nullptr; 9680 } 9681 9682 MachineBasicBlock * 9683 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 9684 unsigned AtomicSize, 9685 unsigned BinOpcode, 9686 unsigned CmpOpcode, 9687 unsigned CmpPred) const { 9688 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9689 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9690 9691 auto LoadMnemonic = PPC::LDARX; 9692 auto StoreMnemonic = PPC::STDCX; 9693 switch (AtomicSize) { 9694 default: 9695 llvm_unreachable("Unexpected size of atomic entity"); 9696 case 1: 9697 LoadMnemonic = PPC::LBARX; 9698 StoreMnemonic = PPC::STBCX; 9699 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9700 break; 9701 case 2: 9702 LoadMnemonic = PPC::LHARX; 9703 StoreMnemonic = PPC::STHCX; 9704 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9705 break; 9706 case 4: 9707 LoadMnemonic = PPC::LWARX; 9708 StoreMnemonic = PPC::STWCX; 9709 break; 9710 case 8: 9711 LoadMnemonic = PPC::LDARX; 9712 StoreMnemonic = PPC::STDCX; 9713 break; 9714 } 9715 9716 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9717 MachineFunction *F = BB->getParent(); 9718 MachineFunction::iterator It = ++BB->getIterator(); 9719 9720 unsigned dest = MI.getOperand(0).getReg(); 9721 unsigned ptrA = MI.getOperand(1).getReg(); 9722 unsigned ptrB = MI.getOperand(2).getReg(); 9723 unsigned incr = MI.getOperand(3).getReg(); 9724 DebugLoc dl = MI.getDebugLoc(); 9725 9726 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9727 MachineBasicBlock *loop2MBB = 9728 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9729 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9730 F->insert(It, loopMBB); 9731 if (CmpOpcode) 9732 F->insert(It, loop2MBB); 9733 F->insert(It, exitMBB); 9734 exitMBB->splice(exitMBB->begin(), BB, 9735 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9736 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9737 9738 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9739 unsigned TmpReg = (!BinOpcode) ? incr : 9740 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 9741 : &PPC::GPRCRegClass); 9742 9743 // thisMBB: 9744 // ... 9745 // fallthrough --> loopMBB 9746 BB->addSuccessor(loopMBB); 9747 9748 // loopMBB: 9749 // l[wd]arx dest, ptr 9750 // add r0, dest, incr 9751 // st[wd]cx. r0, ptr 9752 // bne- loopMBB 9753 // fallthrough --> exitMBB 9754 9755 // For max/min... 9756 // loopMBB: 9757 // l[wd]arx dest, ptr 9758 // cmpl?[wd] incr, dest 9759 // bgt exitMBB 9760 // loop2MBB: 9761 // st[wd]cx. dest, ptr 9762 // bne- loopMBB 9763 // fallthrough --> exitMBB 9764 9765 BB = loopMBB; 9766 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9767 .addReg(ptrA).addReg(ptrB); 9768 if (BinOpcode) 9769 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 9770 if (CmpOpcode) { 9771 // Signed comparisons of byte or halfword values must be sign-extended. 9772 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 9773 unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9774 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 9775 ExtReg).addReg(dest); 9776 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9777 .addReg(incr).addReg(ExtReg); 9778 } else 9779 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9780 .addReg(incr).addReg(dest); 9781 9782 BuildMI(BB, dl, TII->get(PPC::BCC)) 9783 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9784 BB->addSuccessor(loop2MBB); 9785 BB->addSuccessor(exitMBB); 9786 BB = loop2MBB; 9787 } 9788 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9789 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 9790 BuildMI(BB, dl, TII->get(PPC::BCC)) 9791 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9792 BB->addSuccessor(loopMBB); 9793 BB->addSuccessor(exitMBB); 9794 9795 // exitMBB: 9796 // ... 9797 BB = exitMBB; 9798 return BB; 9799 } 9800 9801 MachineBasicBlock * 9802 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 9803 MachineBasicBlock *BB, 9804 bool is8bit, // operation 9805 unsigned BinOpcode, 9806 unsigned CmpOpcode, 9807 unsigned CmpPred) const { 9808 // If we support part-word atomic mnemonics, just use them 9809 if (Subtarget.hasPartwordAtomics()) 9810 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, 9811 CmpOpcode, CmpPred); 9812 9813 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9814 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9815 // In 64 bit mode we have to use 64 bits for addresses, even though the 9816 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 9817 // registers without caring whether they're 32 or 64, but here we're 9818 // doing actual arithmetic on the addresses. 9819 bool is64bit = Subtarget.isPPC64(); 9820 bool isLittleEndian = Subtarget.isLittleEndian(); 9821 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9822 9823 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9824 MachineFunction *F = BB->getParent(); 9825 MachineFunction::iterator It = ++BB->getIterator(); 9826 9827 unsigned dest = MI.getOperand(0).getReg(); 9828 unsigned ptrA = MI.getOperand(1).getReg(); 9829 unsigned ptrB = MI.getOperand(2).getReg(); 9830 unsigned incr = MI.getOperand(3).getReg(); 9831 DebugLoc dl = MI.getDebugLoc(); 9832 9833 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9834 MachineBasicBlock *loop2MBB = 9835 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9836 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9837 F->insert(It, loopMBB); 9838 if (CmpOpcode) 9839 F->insert(It, loop2MBB); 9840 F->insert(It, exitMBB); 9841 exitMBB->splice(exitMBB->begin(), BB, 9842 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9843 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9844 9845 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9846 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9847 : &PPC::GPRCRegClass; 9848 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9849 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9850 unsigned ShiftReg = 9851 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9852 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 9853 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9854 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9855 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9856 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9857 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 9858 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9859 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9860 unsigned Ptr1Reg; 9861 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 9862 9863 // thisMBB: 9864 // ... 9865 // fallthrough --> loopMBB 9866 BB->addSuccessor(loopMBB); 9867 9868 // The 4-byte load must be aligned, while a char or short may be 9869 // anywhere in the word. Hence all this nasty bookkeeping code. 9870 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9871 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9872 // xori shift, shift1, 24 [16] 9873 // rlwinm ptr, ptr1, 0, 0, 29 9874 // slw incr2, incr, shift 9875 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9876 // slw mask, mask2, shift 9877 // loopMBB: 9878 // lwarx tmpDest, ptr 9879 // add tmp, tmpDest, incr2 9880 // andc tmp2, tmpDest, mask 9881 // and tmp3, tmp, mask 9882 // or tmp4, tmp3, tmp2 9883 // stwcx. tmp4, ptr 9884 // bne- loopMBB 9885 // fallthrough --> exitMBB 9886 // srw dest, tmpDest, shift 9887 if (ptrA != ZeroReg) { 9888 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9889 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9890 .addReg(ptrA).addReg(ptrB); 9891 } else { 9892 Ptr1Reg = ptrB; 9893 } 9894 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9895 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9896 if (!isLittleEndian) 9897 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9898 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9899 if (is64bit) 9900 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9901 .addReg(Ptr1Reg).addImm(0).addImm(61); 9902 else 9903 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9904 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9905 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 9906 .addReg(incr).addReg(ShiftReg); 9907 if (is8bit) 9908 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9909 else { 9910 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9911 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 9912 } 9913 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9914 .addReg(Mask2Reg).addReg(ShiftReg); 9915 9916 BB = loopMBB; 9917 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9918 .addReg(ZeroReg).addReg(PtrReg); 9919 if (BinOpcode) 9920 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 9921 .addReg(Incr2Reg).addReg(TmpDestReg); 9922 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 9923 .addReg(TmpDestReg).addReg(MaskReg); 9924 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 9925 .addReg(TmpReg).addReg(MaskReg); 9926 if (CmpOpcode) { 9927 // For unsigned comparisons, we can directly compare the shifted values. 9928 // For signed comparisons we shift and sign extend. 9929 unsigned SReg = RegInfo.createVirtualRegister(RC); 9930 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg) 9931 .addReg(TmpDestReg).addReg(MaskReg); 9932 unsigned ValueReg = SReg; 9933 unsigned CmpReg = Incr2Reg; 9934 if (CmpOpcode == PPC::CMPW) { 9935 ValueReg = RegInfo.createVirtualRegister(RC); 9936 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 9937 .addReg(SReg).addReg(ShiftReg); 9938 unsigned ValueSReg = RegInfo.createVirtualRegister(RC); 9939 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 9940 .addReg(ValueReg); 9941 ValueReg = ValueSReg; 9942 CmpReg = incr; 9943 } 9944 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9945 .addReg(CmpReg).addReg(ValueReg); 9946 BuildMI(BB, dl, TII->get(PPC::BCC)) 9947 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9948 BB->addSuccessor(loop2MBB); 9949 BB->addSuccessor(exitMBB); 9950 BB = loop2MBB; 9951 } 9952 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 9953 .addReg(Tmp3Reg).addReg(Tmp2Reg); 9954 BuildMI(BB, dl, TII->get(PPC::STWCX)) 9955 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 9956 BuildMI(BB, dl, TII->get(PPC::BCC)) 9957 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9958 BB->addSuccessor(loopMBB); 9959 BB->addSuccessor(exitMBB); 9960 9961 // exitMBB: 9962 // ... 9963 BB = exitMBB; 9964 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 9965 .addReg(ShiftReg); 9966 return BB; 9967 } 9968 9969 llvm::MachineBasicBlock * 9970 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 9971 MachineBasicBlock *MBB) const { 9972 DebugLoc DL = MI.getDebugLoc(); 9973 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9974 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 9975 9976 MachineFunction *MF = MBB->getParent(); 9977 MachineRegisterInfo &MRI = MF->getRegInfo(); 9978 9979 const BasicBlock *BB = MBB->getBasicBlock(); 9980 MachineFunction::iterator I = ++MBB->getIterator(); 9981 9982 // Memory Reference 9983 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9984 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9985 9986 unsigned DstReg = MI.getOperand(0).getReg(); 9987 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 9988 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 9989 unsigned mainDstReg = MRI.createVirtualRegister(RC); 9990 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 9991 9992 MVT PVT = getPointerTy(MF->getDataLayout()); 9993 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9994 "Invalid Pointer Size!"); 9995 // For v = setjmp(buf), we generate 9996 // 9997 // thisMBB: 9998 // SjLjSetup mainMBB 9999 // bl mainMBB 10000 // v_restore = 1 10001 // b sinkMBB 10002 // 10003 // mainMBB: 10004 // buf[LabelOffset] = LR 10005 // v_main = 0 10006 // 10007 // sinkMBB: 10008 // v = phi(main, restore) 10009 // 10010 10011 MachineBasicBlock *thisMBB = MBB; 10012 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 10013 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 10014 MF->insert(I, mainMBB); 10015 MF->insert(I, sinkMBB); 10016 10017 MachineInstrBuilder MIB; 10018 10019 // Transfer the remainder of BB and its successor edges to sinkMBB. 10020 sinkMBB->splice(sinkMBB->begin(), MBB, 10021 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 10022 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 10023 10024 // Note that the structure of the jmp_buf used here is not compatible 10025 // with that used by libc, and is not designed to be. Specifically, it 10026 // stores only those 'reserved' registers that LLVM does not otherwise 10027 // understand how to spill. Also, by convention, by the time this 10028 // intrinsic is called, Clang has already stored the frame address in the 10029 // first slot of the buffer and stack address in the third. Following the 10030 // X86 target code, we'll store the jump address in the second slot. We also 10031 // need to save the TOC pointer (R2) to handle jumps between shared 10032 // libraries, and that will be stored in the fourth slot. The thread 10033 // identifier (R13) is not affected. 10034 10035 // thisMBB: 10036 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10037 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10038 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10039 10040 // Prepare IP either in reg. 10041 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 10042 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 10043 unsigned BufReg = MI.getOperand(1).getReg(); 10044 10045 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 10046 setUsesTOCBasePtr(*MBB->getParent()); 10047 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 10048 .addReg(PPC::X2) 10049 .addImm(TOCOffset) 10050 .addReg(BufReg); 10051 MIB.setMemRefs(MMOBegin, MMOEnd); 10052 } 10053 10054 // Naked functions never have a base pointer, and so we use r1. For all 10055 // other functions, this decision must be delayed until during PEI. 10056 unsigned BaseReg; 10057 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 10058 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 10059 else 10060 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 10061 10062 MIB = BuildMI(*thisMBB, MI, DL, 10063 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 10064 .addReg(BaseReg) 10065 .addImm(BPOffset) 10066 .addReg(BufReg); 10067 MIB.setMemRefs(MMOBegin, MMOEnd); 10068 10069 // Setup 10070 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 10071 MIB.addRegMask(TRI->getNoPreservedMask()); 10072 10073 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 10074 10075 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 10076 .addMBB(mainMBB); 10077 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 10078 10079 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 10080 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 10081 10082 // mainMBB: 10083 // mainDstReg = 0 10084 MIB = 10085 BuildMI(mainMBB, DL, 10086 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 10087 10088 // Store IP 10089 if (Subtarget.isPPC64()) { 10090 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 10091 .addReg(LabelReg) 10092 .addImm(LabelOffset) 10093 .addReg(BufReg); 10094 } else { 10095 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 10096 .addReg(LabelReg) 10097 .addImm(LabelOffset) 10098 .addReg(BufReg); 10099 } 10100 10101 MIB.setMemRefs(MMOBegin, MMOEnd); 10102 10103 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 10104 mainMBB->addSuccessor(sinkMBB); 10105 10106 // sinkMBB: 10107 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 10108 TII->get(PPC::PHI), DstReg) 10109 .addReg(mainDstReg).addMBB(mainMBB) 10110 .addReg(restoreDstReg).addMBB(thisMBB); 10111 10112 MI.eraseFromParent(); 10113 return sinkMBB; 10114 } 10115 10116 MachineBasicBlock * 10117 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 10118 MachineBasicBlock *MBB) const { 10119 DebugLoc DL = MI.getDebugLoc(); 10120 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10121 10122 MachineFunction *MF = MBB->getParent(); 10123 MachineRegisterInfo &MRI = MF->getRegInfo(); 10124 10125 // Memory Reference 10126 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 10127 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 10128 10129 MVT PVT = getPointerTy(MF->getDataLayout()); 10130 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10131 "Invalid Pointer Size!"); 10132 10133 const TargetRegisterClass *RC = 10134 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10135 unsigned Tmp = MRI.createVirtualRegister(RC); 10136 // Since FP is only updated here but NOT referenced, it's treated as GPR. 10137 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 10138 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 10139 unsigned BP = 10140 (PVT == MVT::i64) 10141 ? PPC::X30 10142 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 10143 : PPC::R30); 10144 10145 MachineInstrBuilder MIB; 10146 10147 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10148 const int64_t SPOffset = 2 * PVT.getStoreSize(); 10149 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10150 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10151 10152 unsigned BufReg = MI.getOperand(0).getReg(); 10153 10154 // Reload FP (the jumped-to function may not have had a 10155 // frame pointer, and if so, then its r31 will be restored 10156 // as necessary). 10157 if (PVT == MVT::i64) { 10158 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 10159 .addImm(0) 10160 .addReg(BufReg); 10161 } else { 10162 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 10163 .addImm(0) 10164 .addReg(BufReg); 10165 } 10166 MIB.setMemRefs(MMOBegin, MMOEnd); 10167 10168 // Reload IP 10169 if (PVT == MVT::i64) { 10170 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 10171 .addImm(LabelOffset) 10172 .addReg(BufReg); 10173 } else { 10174 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 10175 .addImm(LabelOffset) 10176 .addReg(BufReg); 10177 } 10178 MIB.setMemRefs(MMOBegin, MMOEnd); 10179 10180 // Reload SP 10181 if (PVT == MVT::i64) { 10182 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 10183 .addImm(SPOffset) 10184 .addReg(BufReg); 10185 } else { 10186 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 10187 .addImm(SPOffset) 10188 .addReg(BufReg); 10189 } 10190 MIB.setMemRefs(MMOBegin, MMOEnd); 10191 10192 // Reload BP 10193 if (PVT == MVT::i64) { 10194 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 10195 .addImm(BPOffset) 10196 .addReg(BufReg); 10197 } else { 10198 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 10199 .addImm(BPOffset) 10200 .addReg(BufReg); 10201 } 10202 MIB.setMemRefs(MMOBegin, MMOEnd); 10203 10204 // Reload TOC 10205 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 10206 setUsesTOCBasePtr(*MBB->getParent()); 10207 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 10208 .addImm(TOCOffset) 10209 .addReg(BufReg); 10210 10211 MIB.setMemRefs(MMOBegin, MMOEnd); 10212 } 10213 10214 // Jump 10215 BuildMI(*MBB, MI, DL, 10216 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 10217 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 10218 10219 MI.eraseFromParent(); 10220 return MBB; 10221 } 10222 10223 MachineBasicBlock * 10224 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 10225 MachineBasicBlock *BB) const { 10226 if (MI.getOpcode() == TargetOpcode::STACKMAP || 10227 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10228 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 10229 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10230 // Call lowering should have added an r2 operand to indicate a dependence 10231 // on the TOC base pointer value. It can't however, because there is no 10232 // way to mark the dependence as implicit there, and so the stackmap code 10233 // will confuse it with a regular operand. Instead, add the dependence 10234 // here. 10235 setUsesTOCBasePtr(*BB->getParent()); 10236 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 10237 } 10238 10239 return emitPatchPoint(MI, BB); 10240 } 10241 10242 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 10243 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 10244 return emitEHSjLjSetJmp(MI, BB); 10245 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 10246 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 10247 return emitEHSjLjLongJmp(MI, BB); 10248 } 10249 10250 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10251 10252 // To "insert" these instructions we actually have to insert their 10253 // control-flow patterns. 10254 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10255 MachineFunction::iterator It = ++BB->getIterator(); 10256 10257 MachineFunction *F = BB->getParent(); 10258 10259 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10260 MI.getOpcode() == PPC::SELECT_CC_I8 || 10261 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) { 10262 SmallVector<MachineOperand, 2> Cond; 10263 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10264 MI.getOpcode() == PPC::SELECT_CC_I8) 10265 Cond.push_back(MI.getOperand(4)); 10266 else 10267 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 10268 Cond.push_back(MI.getOperand(1)); 10269 10270 DebugLoc dl = MI.getDebugLoc(); 10271 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 10272 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 10273 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10274 MI.getOpcode() == PPC::SELECT_CC_I8 || 10275 MI.getOpcode() == PPC::SELECT_CC_F4 || 10276 MI.getOpcode() == PPC::SELECT_CC_F8 || 10277 MI.getOpcode() == PPC::SELECT_CC_F16 || 10278 MI.getOpcode() == PPC::SELECT_CC_QFRC || 10279 MI.getOpcode() == PPC::SELECT_CC_QSRC || 10280 MI.getOpcode() == PPC::SELECT_CC_QBRC || 10281 MI.getOpcode() == PPC::SELECT_CC_VRRC || 10282 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 10283 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 10284 MI.getOpcode() == PPC::SELECT_CC_VSRC || 10285 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 10286 MI.getOpcode() == PPC::SELECT_CC_SPE || 10287 MI.getOpcode() == PPC::SELECT_I4 || 10288 MI.getOpcode() == PPC::SELECT_I8 || 10289 MI.getOpcode() == PPC::SELECT_F4 || 10290 MI.getOpcode() == PPC::SELECT_F8 || 10291 MI.getOpcode() == PPC::SELECT_F16 || 10292 MI.getOpcode() == PPC::SELECT_QFRC || 10293 MI.getOpcode() == PPC::SELECT_QSRC || 10294 MI.getOpcode() == PPC::SELECT_QBRC || 10295 MI.getOpcode() == PPC::SELECT_SPE || 10296 MI.getOpcode() == PPC::SELECT_SPE4 || 10297 MI.getOpcode() == PPC::SELECT_VRRC || 10298 MI.getOpcode() == PPC::SELECT_VSFRC || 10299 MI.getOpcode() == PPC::SELECT_VSSRC || 10300 MI.getOpcode() == PPC::SELECT_VSRC) { 10301 // The incoming instruction knows the destination vreg to set, the 10302 // condition code register to branch on, the true/false values to 10303 // select between, and a branch opcode to use. 10304 10305 // thisMBB: 10306 // ... 10307 // TrueVal = ... 10308 // cmpTY ccX, r1, r2 10309 // bCC copy1MBB 10310 // fallthrough --> copy0MBB 10311 MachineBasicBlock *thisMBB = BB; 10312 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 10313 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10314 DebugLoc dl = MI.getDebugLoc(); 10315 F->insert(It, copy0MBB); 10316 F->insert(It, sinkMBB); 10317 10318 // Transfer the remainder of BB and its successor edges to sinkMBB. 10319 sinkMBB->splice(sinkMBB->begin(), BB, 10320 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10321 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10322 10323 // Next, add the true and fallthrough blocks as its successors. 10324 BB->addSuccessor(copy0MBB); 10325 BB->addSuccessor(sinkMBB); 10326 10327 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 10328 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 10329 MI.getOpcode() == PPC::SELECT_F16 || 10330 MI.getOpcode() == PPC::SELECT_SPE4 || 10331 MI.getOpcode() == PPC::SELECT_SPE || 10332 MI.getOpcode() == PPC::SELECT_QFRC || 10333 MI.getOpcode() == PPC::SELECT_QSRC || 10334 MI.getOpcode() == PPC::SELECT_QBRC || 10335 MI.getOpcode() == PPC::SELECT_VRRC || 10336 MI.getOpcode() == PPC::SELECT_VSFRC || 10337 MI.getOpcode() == PPC::SELECT_VSSRC || 10338 MI.getOpcode() == PPC::SELECT_VSRC) { 10339 BuildMI(BB, dl, TII->get(PPC::BC)) 10340 .addReg(MI.getOperand(1).getReg()) 10341 .addMBB(sinkMBB); 10342 } else { 10343 unsigned SelectPred = MI.getOperand(4).getImm(); 10344 BuildMI(BB, dl, TII->get(PPC::BCC)) 10345 .addImm(SelectPred) 10346 .addReg(MI.getOperand(1).getReg()) 10347 .addMBB(sinkMBB); 10348 } 10349 10350 // copy0MBB: 10351 // %FalseValue = ... 10352 // # fallthrough to sinkMBB 10353 BB = copy0MBB; 10354 10355 // Update machine-CFG edges 10356 BB->addSuccessor(sinkMBB); 10357 10358 // sinkMBB: 10359 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 10360 // ... 10361 BB = sinkMBB; 10362 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 10363 .addReg(MI.getOperand(3).getReg()) 10364 .addMBB(copy0MBB) 10365 .addReg(MI.getOperand(2).getReg()) 10366 .addMBB(thisMBB); 10367 } else if (MI.getOpcode() == PPC::ReadTB) { 10368 // To read the 64-bit time-base register on a 32-bit target, we read the 10369 // two halves. Should the counter have wrapped while it was being read, we 10370 // need to try again. 10371 // ... 10372 // readLoop: 10373 // mfspr Rx,TBU # load from TBU 10374 // mfspr Ry,TB # load from TB 10375 // mfspr Rz,TBU # load from TBU 10376 // cmpw crX,Rx,Rz # check if 'old'='new' 10377 // bne readLoop # branch if they're not equal 10378 // ... 10379 10380 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 10381 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10382 DebugLoc dl = MI.getDebugLoc(); 10383 F->insert(It, readMBB); 10384 F->insert(It, sinkMBB); 10385 10386 // Transfer the remainder of BB and its successor edges to sinkMBB. 10387 sinkMBB->splice(sinkMBB->begin(), BB, 10388 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10389 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10390 10391 BB->addSuccessor(readMBB); 10392 BB = readMBB; 10393 10394 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10395 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10396 unsigned LoReg = MI.getOperand(0).getReg(); 10397 unsigned HiReg = MI.getOperand(1).getReg(); 10398 10399 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 10400 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 10401 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 10402 10403 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10404 10405 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 10406 .addReg(HiReg).addReg(ReadAgainReg); 10407 BuildMI(BB, dl, TII->get(PPC::BCC)) 10408 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 10409 10410 BB->addSuccessor(readMBB); 10411 BB->addSuccessor(sinkMBB); 10412 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 10413 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 10414 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 10415 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 10416 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 10417 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 10418 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 10419 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 10420 10421 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 10422 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 10423 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 10424 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 10425 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 10426 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 10427 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 10428 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 10429 10430 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 10431 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 10432 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 10433 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 10434 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 10435 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 10436 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 10437 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 10438 10439 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 10440 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 10441 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 10442 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 10443 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 10444 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 10445 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 10446 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 10447 10448 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 10449 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 10450 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 10451 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 10452 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 10453 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 10454 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 10455 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 10456 10457 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 10458 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 10459 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 10460 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 10461 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 10462 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 10463 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 10464 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 10465 10466 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 10467 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 10468 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 10469 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 10470 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 10471 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 10472 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 10473 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 10474 10475 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 10476 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 10477 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 10478 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 10479 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 10480 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 10481 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 10482 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 10483 10484 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 10485 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 10486 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 10487 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 10488 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 10489 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 10490 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 10491 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 10492 10493 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 10494 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 10495 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 10496 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 10497 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 10498 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 10499 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 10500 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 10501 10502 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 10503 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 10504 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 10505 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 10506 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 10507 BB = EmitAtomicBinary(MI, BB, 4, 0); 10508 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 10509 BB = EmitAtomicBinary(MI, BB, 8, 0); 10510 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 10511 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 10512 (Subtarget.hasPartwordAtomics() && 10513 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 10514 (Subtarget.hasPartwordAtomics() && 10515 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 10516 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 10517 10518 auto LoadMnemonic = PPC::LDARX; 10519 auto StoreMnemonic = PPC::STDCX; 10520 switch (MI.getOpcode()) { 10521 default: 10522 llvm_unreachable("Compare and swap of unknown size"); 10523 case PPC::ATOMIC_CMP_SWAP_I8: 10524 LoadMnemonic = PPC::LBARX; 10525 StoreMnemonic = PPC::STBCX; 10526 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10527 break; 10528 case PPC::ATOMIC_CMP_SWAP_I16: 10529 LoadMnemonic = PPC::LHARX; 10530 StoreMnemonic = PPC::STHCX; 10531 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10532 break; 10533 case PPC::ATOMIC_CMP_SWAP_I32: 10534 LoadMnemonic = PPC::LWARX; 10535 StoreMnemonic = PPC::STWCX; 10536 break; 10537 case PPC::ATOMIC_CMP_SWAP_I64: 10538 LoadMnemonic = PPC::LDARX; 10539 StoreMnemonic = PPC::STDCX; 10540 break; 10541 } 10542 unsigned dest = MI.getOperand(0).getReg(); 10543 unsigned ptrA = MI.getOperand(1).getReg(); 10544 unsigned ptrB = MI.getOperand(2).getReg(); 10545 unsigned oldval = MI.getOperand(3).getReg(); 10546 unsigned newval = MI.getOperand(4).getReg(); 10547 DebugLoc dl = MI.getDebugLoc(); 10548 10549 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10550 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10551 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10552 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10553 F->insert(It, loop1MBB); 10554 F->insert(It, loop2MBB); 10555 F->insert(It, midMBB); 10556 F->insert(It, exitMBB); 10557 exitMBB->splice(exitMBB->begin(), BB, 10558 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10559 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10560 10561 // thisMBB: 10562 // ... 10563 // fallthrough --> loopMBB 10564 BB->addSuccessor(loop1MBB); 10565 10566 // loop1MBB: 10567 // l[bhwd]arx dest, ptr 10568 // cmp[wd] dest, oldval 10569 // bne- midMBB 10570 // loop2MBB: 10571 // st[bhwd]cx. newval, ptr 10572 // bne- loopMBB 10573 // b exitBB 10574 // midMBB: 10575 // st[bhwd]cx. dest, ptr 10576 // exitBB: 10577 BB = loop1MBB; 10578 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10579 .addReg(ptrA).addReg(ptrB); 10580 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 10581 .addReg(oldval).addReg(dest); 10582 BuildMI(BB, dl, TII->get(PPC::BCC)) 10583 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10584 BB->addSuccessor(loop2MBB); 10585 BB->addSuccessor(midMBB); 10586 10587 BB = loop2MBB; 10588 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10589 .addReg(newval).addReg(ptrA).addReg(ptrB); 10590 BuildMI(BB, dl, TII->get(PPC::BCC)) 10591 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10592 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10593 BB->addSuccessor(loop1MBB); 10594 BB->addSuccessor(exitMBB); 10595 10596 BB = midMBB; 10597 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10598 .addReg(dest).addReg(ptrA).addReg(ptrB); 10599 BB->addSuccessor(exitMBB); 10600 10601 // exitMBB: 10602 // ... 10603 BB = exitMBB; 10604 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 10605 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 10606 // We must use 64-bit registers for addresses when targeting 64-bit, 10607 // since we're actually doing arithmetic on them. Other registers 10608 // can be 32-bit. 10609 bool is64bit = Subtarget.isPPC64(); 10610 bool isLittleEndian = Subtarget.isLittleEndian(); 10611 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 10612 10613 unsigned dest = MI.getOperand(0).getReg(); 10614 unsigned ptrA = MI.getOperand(1).getReg(); 10615 unsigned ptrB = MI.getOperand(2).getReg(); 10616 unsigned oldval = MI.getOperand(3).getReg(); 10617 unsigned newval = MI.getOperand(4).getReg(); 10618 DebugLoc dl = MI.getDebugLoc(); 10619 10620 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10621 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10622 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10623 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10624 F->insert(It, loop1MBB); 10625 F->insert(It, loop2MBB); 10626 F->insert(It, midMBB); 10627 F->insert(It, exitMBB); 10628 exitMBB->splice(exitMBB->begin(), BB, 10629 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10630 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10631 10632 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10633 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 10634 : &PPC::GPRCRegClass; 10635 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 10636 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 10637 unsigned ShiftReg = 10638 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 10639 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 10640 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 10641 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 10642 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 10643 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 10644 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 10645 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 10646 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 10647 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 10648 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 10649 unsigned Ptr1Reg; 10650 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 10651 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10652 // thisMBB: 10653 // ... 10654 // fallthrough --> loopMBB 10655 BB->addSuccessor(loop1MBB); 10656 10657 // The 4-byte load must be aligned, while a char or short may be 10658 // anywhere in the word. Hence all this nasty bookkeeping code. 10659 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10660 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10661 // xori shift, shift1, 24 [16] 10662 // rlwinm ptr, ptr1, 0, 0, 29 10663 // slw newval2, newval, shift 10664 // slw oldval2, oldval,shift 10665 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10666 // slw mask, mask2, shift 10667 // and newval3, newval2, mask 10668 // and oldval3, oldval2, mask 10669 // loop1MBB: 10670 // lwarx tmpDest, ptr 10671 // and tmp, tmpDest, mask 10672 // cmpw tmp, oldval3 10673 // bne- midMBB 10674 // loop2MBB: 10675 // andc tmp2, tmpDest, mask 10676 // or tmp4, tmp2, newval3 10677 // stwcx. tmp4, ptr 10678 // bne- loop1MBB 10679 // b exitBB 10680 // midMBB: 10681 // stwcx. tmpDest, ptr 10682 // exitBB: 10683 // srw dest, tmpDest, shift 10684 if (ptrA != ZeroReg) { 10685 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10686 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10687 .addReg(ptrA).addReg(ptrB); 10688 } else { 10689 Ptr1Reg = ptrB; 10690 } 10691 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 10692 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 10693 if (!isLittleEndian) 10694 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 10695 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 10696 if (is64bit) 10697 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10698 .addReg(Ptr1Reg).addImm(0).addImm(61); 10699 else 10700 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10701 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 10702 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 10703 .addReg(newval).addReg(ShiftReg); 10704 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 10705 .addReg(oldval).addReg(ShiftReg); 10706 if (is8bit) 10707 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10708 else { 10709 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10710 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10711 .addReg(Mask3Reg).addImm(65535); 10712 } 10713 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10714 .addReg(Mask2Reg).addReg(ShiftReg); 10715 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 10716 .addReg(NewVal2Reg).addReg(MaskReg); 10717 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 10718 .addReg(OldVal2Reg).addReg(MaskReg); 10719 10720 BB = loop1MBB; 10721 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10722 .addReg(ZeroReg).addReg(PtrReg); 10723 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 10724 .addReg(TmpDestReg).addReg(MaskReg); 10725 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 10726 .addReg(TmpReg).addReg(OldVal3Reg); 10727 BuildMI(BB, dl, TII->get(PPC::BCC)) 10728 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10729 BB->addSuccessor(loop2MBB); 10730 BB->addSuccessor(midMBB); 10731 10732 BB = loop2MBB; 10733 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 10734 .addReg(TmpDestReg).addReg(MaskReg); 10735 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 10736 .addReg(Tmp2Reg).addReg(NewVal3Reg); 10737 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 10738 .addReg(ZeroReg).addReg(PtrReg); 10739 BuildMI(BB, dl, TII->get(PPC::BCC)) 10740 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10741 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10742 BB->addSuccessor(loop1MBB); 10743 BB->addSuccessor(exitMBB); 10744 10745 BB = midMBB; 10746 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 10747 .addReg(ZeroReg).addReg(PtrReg); 10748 BB->addSuccessor(exitMBB); 10749 10750 // exitMBB: 10751 // ... 10752 BB = exitMBB; 10753 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 10754 .addReg(ShiftReg); 10755 } else if (MI.getOpcode() == PPC::FADDrtz) { 10756 // This pseudo performs an FADD with rounding mode temporarily forced 10757 // to round-to-zero. We emit this via custom inserter since the FPSCR 10758 // is not modeled at the SelectionDAG level. 10759 unsigned Dest = MI.getOperand(0).getReg(); 10760 unsigned Src1 = MI.getOperand(1).getReg(); 10761 unsigned Src2 = MI.getOperand(2).getReg(); 10762 DebugLoc dl = MI.getDebugLoc(); 10763 10764 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10765 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 10766 10767 // Save FPSCR value. 10768 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 10769 10770 // Set rounding mode to round-to-zero. 10771 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 10772 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 10773 10774 // Perform addition. 10775 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 10776 10777 // Restore FPSCR value. 10778 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 10779 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10780 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 10781 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10782 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 10783 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10784 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 10785 ? PPC::ANDIo8 10786 : PPC::ANDIo; 10787 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10788 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 10789 10790 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10791 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 10792 &PPC::GPRCRegClass : 10793 &PPC::G8RCRegClass); 10794 10795 DebugLoc dl = MI.getDebugLoc(); 10796 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 10797 .addReg(MI.getOperand(1).getReg()) 10798 .addImm(1); 10799 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 10800 MI.getOperand(0).getReg()) 10801 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 10802 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 10803 DebugLoc Dl = MI.getDebugLoc(); 10804 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10805 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10806 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 10807 return BB; 10808 } else { 10809 llvm_unreachable("Unexpected instr type to insert"); 10810 } 10811 10812 MI.eraseFromParent(); // The pseudo instruction is gone now. 10813 return BB; 10814 } 10815 10816 //===----------------------------------------------------------------------===// 10817 // Target Optimization Hooks 10818 //===----------------------------------------------------------------------===// 10819 10820 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 10821 // For the estimates, convergence is quadratic, so we essentially double the 10822 // number of digits correct after every iteration. For both FRE and FRSQRTE, 10823 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 10824 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 10825 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 10826 if (VT.getScalarType() == MVT::f64) 10827 RefinementSteps++; 10828 return RefinementSteps; 10829 } 10830 10831 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 10832 int Enabled, int &RefinementSteps, 10833 bool &UseOneConstNR, 10834 bool Reciprocal) const { 10835 EVT VT = Operand.getValueType(); 10836 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 10837 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 10838 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10839 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10840 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10841 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10842 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10843 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10844 10845 UseOneConstNR = true; 10846 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 10847 } 10848 return SDValue(); 10849 } 10850 10851 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 10852 int Enabled, 10853 int &RefinementSteps) const { 10854 EVT VT = Operand.getValueType(); 10855 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 10856 (VT == MVT::f64 && Subtarget.hasFRE()) || 10857 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10858 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10859 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10860 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10861 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10862 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10863 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 10864 } 10865 return SDValue(); 10866 } 10867 10868 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 10869 // Note: This functionality is used only when unsafe-fp-math is enabled, and 10870 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 10871 // enabled for division), this functionality is redundant with the default 10872 // combiner logic (once the division -> reciprocal/multiply transformation 10873 // has taken place). As a result, this matters more for older cores than for 10874 // newer ones. 10875 10876 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 10877 // reciprocal if there are two or more FDIVs (for embedded cores with only 10878 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 10879 switch (Subtarget.getDarwinDirective()) { 10880 default: 10881 return 3; 10882 case PPC::DIR_440: 10883 case PPC::DIR_A2: 10884 case PPC::DIR_E500: 10885 case PPC::DIR_E500mc: 10886 case PPC::DIR_E5500: 10887 return 2; 10888 } 10889 } 10890 10891 // isConsecutiveLSLoc needs to work even if all adds have not yet been 10892 // collapsed, and so we need to look through chains of them. 10893 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 10894 int64_t& Offset, SelectionDAG &DAG) { 10895 if (DAG.isBaseWithConstantOffset(Loc)) { 10896 Base = Loc.getOperand(0); 10897 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 10898 10899 // The base might itself be a base plus an offset, and if so, accumulate 10900 // that as well. 10901 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 10902 } 10903 } 10904 10905 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 10906 unsigned Bytes, int Dist, 10907 SelectionDAG &DAG) { 10908 if (VT.getSizeInBits() / 8 != Bytes) 10909 return false; 10910 10911 SDValue BaseLoc = Base->getBasePtr(); 10912 if (Loc.getOpcode() == ISD::FrameIndex) { 10913 if (BaseLoc.getOpcode() != ISD::FrameIndex) 10914 return false; 10915 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10916 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 10917 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 10918 int FS = MFI.getObjectSize(FI); 10919 int BFS = MFI.getObjectSize(BFI); 10920 if (FS != BFS || FS != (int)Bytes) return false; 10921 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 10922 } 10923 10924 SDValue Base1 = Loc, Base2 = BaseLoc; 10925 int64_t Offset1 = 0, Offset2 = 0; 10926 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 10927 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 10928 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 10929 return true; 10930 10931 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10932 const GlobalValue *GV1 = nullptr; 10933 const GlobalValue *GV2 = nullptr; 10934 Offset1 = 0; 10935 Offset2 = 0; 10936 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 10937 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 10938 if (isGA1 && isGA2 && GV1 == GV2) 10939 return Offset1 == (Offset2 + Dist*Bytes); 10940 return false; 10941 } 10942 10943 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 10944 // not enforce equality of the chain operands. 10945 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 10946 unsigned Bytes, int Dist, 10947 SelectionDAG &DAG) { 10948 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 10949 EVT VT = LS->getMemoryVT(); 10950 SDValue Loc = LS->getBasePtr(); 10951 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 10952 } 10953 10954 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 10955 EVT VT; 10956 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10957 default: return false; 10958 case Intrinsic::ppc_qpx_qvlfd: 10959 case Intrinsic::ppc_qpx_qvlfda: 10960 VT = MVT::v4f64; 10961 break; 10962 case Intrinsic::ppc_qpx_qvlfs: 10963 case Intrinsic::ppc_qpx_qvlfsa: 10964 VT = MVT::v4f32; 10965 break; 10966 case Intrinsic::ppc_qpx_qvlfcd: 10967 case Intrinsic::ppc_qpx_qvlfcda: 10968 VT = MVT::v2f64; 10969 break; 10970 case Intrinsic::ppc_qpx_qvlfcs: 10971 case Intrinsic::ppc_qpx_qvlfcsa: 10972 VT = MVT::v2f32; 10973 break; 10974 case Intrinsic::ppc_qpx_qvlfiwa: 10975 case Intrinsic::ppc_qpx_qvlfiwz: 10976 case Intrinsic::ppc_altivec_lvx: 10977 case Intrinsic::ppc_altivec_lvxl: 10978 case Intrinsic::ppc_vsx_lxvw4x: 10979 case Intrinsic::ppc_vsx_lxvw4x_be: 10980 VT = MVT::v4i32; 10981 break; 10982 case Intrinsic::ppc_vsx_lxvd2x: 10983 case Intrinsic::ppc_vsx_lxvd2x_be: 10984 VT = MVT::v2f64; 10985 break; 10986 case Intrinsic::ppc_altivec_lvebx: 10987 VT = MVT::i8; 10988 break; 10989 case Intrinsic::ppc_altivec_lvehx: 10990 VT = MVT::i16; 10991 break; 10992 case Intrinsic::ppc_altivec_lvewx: 10993 VT = MVT::i32; 10994 break; 10995 } 10996 10997 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 10998 } 10999 11000 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 11001 EVT VT; 11002 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11003 default: return false; 11004 case Intrinsic::ppc_qpx_qvstfd: 11005 case Intrinsic::ppc_qpx_qvstfda: 11006 VT = MVT::v4f64; 11007 break; 11008 case Intrinsic::ppc_qpx_qvstfs: 11009 case Intrinsic::ppc_qpx_qvstfsa: 11010 VT = MVT::v4f32; 11011 break; 11012 case Intrinsic::ppc_qpx_qvstfcd: 11013 case Intrinsic::ppc_qpx_qvstfcda: 11014 VT = MVT::v2f64; 11015 break; 11016 case Intrinsic::ppc_qpx_qvstfcs: 11017 case Intrinsic::ppc_qpx_qvstfcsa: 11018 VT = MVT::v2f32; 11019 break; 11020 case Intrinsic::ppc_qpx_qvstfiw: 11021 case Intrinsic::ppc_qpx_qvstfiwa: 11022 case Intrinsic::ppc_altivec_stvx: 11023 case Intrinsic::ppc_altivec_stvxl: 11024 case Intrinsic::ppc_vsx_stxvw4x: 11025 VT = MVT::v4i32; 11026 break; 11027 case Intrinsic::ppc_vsx_stxvd2x: 11028 VT = MVT::v2f64; 11029 break; 11030 case Intrinsic::ppc_vsx_stxvw4x_be: 11031 VT = MVT::v4i32; 11032 break; 11033 case Intrinsic::ppc_vsx_stxvd2x_be: 11034 VT = MVT::v2f64; 11035 break; 11036 case Intrinsic::ppc_altivec_stvebx: 11037 VT = MVT::i8; 11038 break; 11039 case Intrinsic::ppc_altivec_stvehx: 11040 VT = MVT::i16; 11041 break; 11042 case Intrinsic::ppc_altivec_stvewx: 11043 VT = MVT::i32; 11044 break; 11045 } 11046 11047 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 11048 } 11049 11050 return false; 11051 } 11052 11053 // Return true is there is a nearyby consecutive load to the one provided 11054 // (regardless of alignment). We search up and down the chain, looking though 11055 // token factors and other loads (but nothing else). As a result, a true result 11056 // indicates that it is safe to create a new consecutive load adjacent to the 11057 // load provided. 11058 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 11059 SDValue Chain = LD->getChain(); 11060 EVT VT = LD->getMemoryVT(); 11061 11062 SmallSet<SDNode *, 16> LoadRoots; 11063 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 11064 SmallSet<SDNode *, 16> Visited; 11065 11066 // First, search up the chain, branching to follow all token-factor operands. 11067 // If we find a consecutive load, then we're done, otherwise, record all 11068 // nodes just above the top-level loads and token factors. 11069 while (!Queue.empty()) { 11070 SDNode *ChainNext = Queue.pop_back_val(); 11071 if (!Visited.insert(ChainNext).second) 11072 continue; 11073 11074 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 11075 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 11076 return true; 11077 11078 if (!Visited.count(ChainLD->getChain().getNode())) 11079 Queue.push_back(ChainLD->getChain().getNode()); 11080 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 11081 for (const SDUse &O : ChainNext->ops()) 11082 if (!Visited.count(O.getNode())) 11083 Queue.push_back(O.getNode()); 11084 } else 11085 LoadRoots.insert(ChainNext); 11086 } 11087 11088 // Second, search down the chain, starting from the top-level nodes recorded 11089 // in the first phase. These top-level nodes are the nodes just above all 11090 // loads and token factors. Starting with their uses, recursively look though 11091 // all loads (just the chain uses) and token factors to find a consecutive 11092 // load. 11093 Visited.clear(); 11094 Queue.clear(); 11095 11096 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 11097 IE = LoadRoots.end(); I != IE; ++I) { 11098 Queue.push_back(*I); 11099 11100 while (!Queue.empty()) { 11101 SDNode *LoadRoot = Queue.pop_back_val(); 11102 if (!Visited.insert(LoadRoot).second) 11103 continue; 11104 11105 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 11106 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 11107 return true; 11108 11109 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 11110 UE = LoadRoot->use_end(); UI != UE; ++UI) 11111 if (((isa<MemSDNode>(*UI) && 11112 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 11113 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 11114 Queue.push_back(*UI); 11115 } 11116 } 11117 11118 return false; 11119 } 11120 11121 /// This function is called when we have proved that a SETCC node can be replaced 11122 /// by subtraction (and other supporting instructions) so that the result of 11123 /// comparison is kept in a GPR instead of CR. This function is purely for 11124 /// codegen purposes and has some flags to guide the codegen process. 11125 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 11126 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 11127 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 11128 11129 // Zero extend the operands to the largest legal integer. Originally, they 11130 // must be of a strictly smaller size. 11131 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 11132 DAG.getConstant(Size, DL, MVT::i32)); 11133 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 11134 DAG.getConstant(Size, DL, MVT::i32)); 11135 11136 // Swap if needed. Depends on the condition code. 11137 if (Swap) 11138 std::swap(Op0, Op1); 11139 11140 // Subtract extended integers. 11141 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 11142 11143 // Move the sign bit to the least significant position and zero out the rest. 11144 // Now the least significant bit carries the result of original comparison. 11145 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 11146 DAG.getConstant(Size - 1, DL, MVT::i32)); 11147 auto Final = Shifted; 11148 11149 // Complement the result if needed. Based on the condition code. 11150 if (Complement) 11151 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 11152 DAG.getConstant(1, DL, MVT::i64)); 11153 11154 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 11155 } 11156 11157 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 11158 DAGCombinerInfo &DCI) const { 11159 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 11160 11161 SelectionDAG &DAG = DCI.DAG; 11162 SDLoc DL(N); 11163 11164 // Size of integers being compared has a critical role in the following 11165 // analysis, so we prefer to do this when all types are legal. 11166 if (!DCI.isAfterLegalizeDAG()) 11167 return SDValue(); 11168 11169 // If all users of SETCC extend its value to a legal integer type 11170 // then we replace SETCC with a subtraction 11171 for (SDNode::use_iterator UI = N->use_begin(), 11172 UE = N->use_end(); UI != UE; ++UI) { 11173 if (UI->getOpcode() != ISD::ZERO_EXTEND) 11174 return SDValue(); 11175 } 11176 11177 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 11178 auto OpSize = N->getOperand(0).getValueSizeInBits(); 11179 11180 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 11181 11182 if (OpSize < Size) { 11183 switch (CC) { 11184 default: break; 11185 case ISD::SETULT: 11186 return generateEquivalentSub(N, Size, false, false, DL, DAG); 11187 case ISD::SETULE: 11188 return generateEquivalentSub(N, Size, true, true, DL, DAG); 11189 case ISD::SETUGT: 11190 return generateEquivalentSub(N, Size, false, true, DL, DAG); 11191 case ISD::SETUGE: 11192 return generateEquivalentSub(N, Size, true, false, DL, DAG); 11193 } 11194 } 11195 11196 return SDValue(); 11197 } 11198 11199 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 11200 DAGCombinerInfo &DCI) const { 11201 SelectionDAG &DAG = DCI.DAG; 11202 SDLoc dl(N); 11203 11204 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 11205 // If we're tracking CR bits, we need to be careful that we don't have: 11206 // trunc(binary-ops(zext(x), zext(y))) 11207 // or 11208 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 11209 // such that we're unnecessarily moving things into GPRs when it would be 11210 // better to keep them in CR bits. 11211 11212 // Note that trunc here can be an actual i1 trunc, or can be the effective 11213 // truncation that comes from a setcc or select_cc. 11214 if (N->getOpcode() == ISD::TRUNCATE && 11215 N->getValueType(0) != MVT::i1) 11216 return SDValue(); 11217 11218 if (N->getOperand(0).getValueType() != MVT::i32 && 11219 N->getOperand(0).getValueType() != MVT::i64) 11220 return SDValue(); 11221 11222 if (N->getOpcode() == ISD::SETCC || 11223 N->getOpcode() == ISD::SELECT_CC) { 11224 // If we're looking at a comparison, then we need to make sure that the 11225 // high bits (all except for the first) don't matter the result. 11226 ISD::CondCode CC = 11227 cast<CondCodeSDNode>(N->getOperand( 11228 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 11229 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 11230 11231 if (ISD::isSignedIntSetCC(CC)) { 11232 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 11233 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 11234 return SDValue(); 11235 } else if (ISD::isUnsignedIntSetCC(CC)) { 11236 if (!DAG.MaskedValueIsZero(N->getOperand(0), 11237 APInt::getHighBitsSet(OpBits, OpBits-1)) || 11238 !DAG.MaskedValueIsZero(N->getOperand(1), 11239 APInt::getHighBitsSet(OpBits, OpBits-1))) 11240 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 11241 : SDValue()); 11242 } else { 11243 // This is neither a signed nor an unsigned comparison, just make sure 11244 // that the high bits are equal. 11245 KnownBits Op1Known, Op2Known; 11246 DAG.computeKnownBits(N->getOperand(0), Op1Known); 11247 DAG.computeKnownBits(N->getOperand(1), Op2Known); 11248 11249 // We don't really care about what is known about the first bit (if 11250 // anything), so clear it in all masks prior to comparing them. 11251 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 11252 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 11253 11254 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 11255 return SDValue(); 11256 } 11257 } 11258 11259 // We now know that the higher-order bits are irrelevant, we just need to 11260 // make sure that all of the intermediate operations are bit operations, and 11261 // all inputs are extensions. 11262 if (N->getOperand(0).getOpcode() != ISD::AND && 11263 N->getOperand(0).getOpcode() != ISD::OR && 11264 N->getOperand(0).getOpcode() != ISD::XOR && 11265 N->getOperand(0).getOpcode() != ISD::SELECT && 11266 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 11267 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 11268 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 11269 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 11270 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 11271 return SDValue(); 11272 11273 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 11274 N->getOperand(1).getOpcode() != ISD::AND && 11275 N->getOperand(1).getOpcode() != ISD::OR && 11276 N->getOperand(1).getOpcode() != ISD::XOR && 11277 N->getOperand(1).getOpcode() != ISD::SELECT && 11278 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 11279 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 11280 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 11281 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 11282 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 11283 return SDValue(); 11284 11285 SmallVector<SDValue, 4> Inputs; 11286 SmallVector<SDValue, 8> BinOps, PromOps; 11287 SmallPtrSet<SDNode *, 16> Visited; 11288 11289 for (unsigned i = 0; i < 2; ++i) { 11290 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11291 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11292 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11293 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11294 isa<ConstantSDNode>(N->getOperand(i))) 11295 Inputs.push_back(N->getOperand(i)); 11296 else 11297 BinOps.push_back(N->getOperand(i)); 11298 11299 if (N->getOpcode() == ISD::TRUNCATE) 11300 break; 11301 } 11302 11303 // Visit all inputs, collect all binary operations (and, or, xor and 11304 // select) that are all fed by extensions. 11305 while (!BinOps.empty()) { 11306 SDValue BinOp = BinOps.back(); 11307 BinOps.pop_back(); 11308 11309 if (!Visited.insert(BinOp.getNode()).second) 11310 continue; 11311 11312 PromOps.push_back(BinOp); 11313 11314 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11315 // The condition of the select is not promoted. 11316 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11317 continue; 11318 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11319 continue; 11320 11321 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11322 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11323 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11324 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11325 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11326 Inputs.push_back(BinOp.getOperand(i)); 11327 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11328 BinOp.getOperand(i).getOpcode() == ISD::OR || 11329 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11330 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11331 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 11332 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11333 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11334 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11335 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 11336 BinOps.push_back(BinOp.getOperand(i)); 11337 } else { 11338 // We have an input that is not an extension or another binary 11339 // operation; we'll abort this transformation. 11340 return SDValue(); 11341 } 11342 } 11343 } 11344 11345 // Make sure that this is a self-contained cluster of operations (which 11346 // is not quite the same thing as saying that everything has only one 11347 // use). 11348 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11349 if (isa<ConstantSDNode>(Inputs[i])) 11350 continue; 11351 11352 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11353 UE = Inputs[i].getNode()->use_end(); 11354 UI != UE; ++UI) { 11355 SDNode *User = *UI; 11356 if (User != N && !Visited.count(User)) 11357 return SDValue(); 11358 11359 // Make sure that we're not going to promote the non-output-value 11360 // operand(s) or SELECT or SELECT_CC. 11361 // FIXME: Although we could sometimes handle this, and it does occur in 11362 // practice that one of the condition inputs to the select is also one of 11363 // the outputs, we currently can't deal with this. 11364 if (User->getOpcode() == ISD::SELECT) { 11365 if (User->getOperand(0) == Inputs[i]) 11366 return SDValue(); 11367 } else if (User->getOpcode() == ISD::SELECT_CC) { 11368 if (User->getOperand(0) == Inputs[i] || 11369 User->getOperand(1) == Inputs[i]) 11370 return SDValue(); 11371 } 11372 } 11373 } 11374 11375 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11376 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11377 UE = PromOps[i].getNode()->use_end(); 11378 UI != UE; ++UI) { 11379 SDNode *User = *UI; 11380 if (User != N && !Visited.count(User)) 11381 return SDValue(); 11382 11383 // Make sure that we're not going to promote the non-output-value 11384 // operand(s) or SELECT or SELECT_CC. 11385 // FIXME: Although we could sometimes handle this, and it does occur in 11386 // practice that one of the condition inputs to the select is also one of 11387 // the outputs, we currently can't deal with this. 11388 if (User->getOpcode() == ISD::SELECT) { 11389 if (User->getOperand(0) == PromOps[i]) 11390 return SDValue(); 11391 } else if (User->getOpcode() == ISD::SELECT_CC) { 11392 if (User->getOperand(0) == PromOps[i] || 11393 User->getOperand(1) == PromOps[i]) 11394 return SDValue(); 11395 } 11396 } 11397 } 11398 11399 // Replace all inputs with the extension operand. 11400 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11401 // Constants may have users outside the cluster of to-be-promoted nodes, 11402 // and so we need to replace those as we do the promotions. 11403 if (isa<ConstantSDNode>(Inputs[i])) 11404 continue; 11405 else 11406 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 11407 } 11408 11409 std::list<HandleSDNode> PromOpHandles; 11410 for (auto &PromOp : PromOps) 11411 PromOpHandles.emplace_back(PromOp); 11412 11413 // Replace all operations (these are all the same, but have a different 11414 // (i1) return type). DAG.getNode will validate that the types of 11415 // a binary operator match, so go through the list in reverse so that 11416 // we've likely promoted both operands first. Any intermediate truncations or 11417 // extensions disappear. 11418 while (!PromOpHandles.empty()) { 11419 SDValue PromOp = PromOpHandles.back().getValue(); 11420 PromOpHandles.pop_back(); 11421 11422 if (PromOp.getOpcode() == ISD::TRUNCATE || 11423 PromOp.getOpcode() == ISD::SIGN_EXTEND || 11424 PromOp.getOpcode() == ISD::ZERO_EXTEND || 11425 PromOp.getOpcode() == ISD::ANY_EXTEND) { 11426 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 11427 PromOp.getOperand(0).getValueType() != MVT::i1) { 11428 // The operand is not yet ready (see comment below). 11429 PromOpHandles.emplace_front(PromOp); 11430 continue; 11431 } 11432 11433 SDValue RepValue = PromOp.getOperand(0); 11434 if (isa<ConstantSDNode>(RepValue)) 11435 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 11436 11437 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 11438 continue; 11439 } 11440 11441 unsigned C; 11442 switch (PromOp.getOpcode()) { 11443 default: C = 0; break; 11444 case ISD::SELECT: C = 1; break; 11445 case ISD::SELECT_CC: C = 2; break; 11446 } 11447 11448 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11449 PromOp.getOperand(C).getValueType() != MVT::i1) || 11450 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11451 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 11452 // The to-be-promoted operands of this node have not yet been 11453 // promoted (this should be rare because we're going through the 11454 // list backward, but if one of the operands has several users in 11455 // this cluster of to-be-promoted nodes, it is possible). 11456 PromOpHandles.emplace_front(PromOp); 11457 continue; 11458 } 11459 11460 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11461 PromOp.getNode()->op_end()); 11462 11463 // If there are any constant inputs, make sure they're replaced now. 11464 for (unsigned i = 0; i < 2; ++i) 11465 if (isa<ConstantSDNode>(Ops[C+i])) 11466 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 11467 11468 DAG.ReplaceAllUsesOfValueWith(PromOp, 11469 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 11470 } 11471 11472 // Now we're left with the initial truncation itself. 11473 if (N->getOpcode() == ISD::TRUNCATE) 11474 return N->getOperand(0); 11475 11476 // Otherwise, this is a comparison. The operands to be compared have just 11477 // changed type (to i1), but everything else is the same. 11478 return SDValue(N, 0); 11479 } 11480 11481 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 11482 DAGCombinerInfo &DCI) const { 11483 SelectionDAG &DAG = DCI.DAG; 11484 SDLoc dl(N); 11485 11486 // If we're tracking CR bits, we need to be careful that we don't have: 11487 // zext(binary-ops(trunc(x), trunc(y))) 11488 // or 11489 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 11490 // such that we're unnecessarily moving things into CR bits that can more 11491 // efficiently stay in GPRs. Note that if we're not certain that the high 11492 // bits are set as required by the final extension, we still may need to do 11493 // some masking to get the proper behavior. 11494 11495 // This same functionality is important on PPC64 when dealing with 11496 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 11497 // the return values of functions. Because it is so similar, it is handled 11498 // here as well. 11499 11500 if (N->getValueType(0) != MVT::i32 && 11501 N->getValueType(0) != MVT::i64) 11502 return SDValue(); 11503 11504 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 11505 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 11506 return SDValue(); 11507 11508 if (N->getOperand(0).getOpcode() != ISD::AND && 11509 N->getOperand(0).getOpcode() != ISD::OR && 11510 N->getOperand(0).getOpcode() != ISD::XOR && 11511 N->getOperand(0).getOpcode() != ISD::SELECT && 11512 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 11513 return SDValue(); 11514 11515 SmallVector<SDValue, 4> Inputs; 11516 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 11517 SmallPtrSet<SDNode *, 16> Visited; 11518 11519 // Visit all inputs, collect all binary operations (and, or, xor and 11520 // select) that are all fed by truncations. 11521 while (!BinOps.empty()) { 11522 SDValue BinOp = BinOps.back(); 11523 BinOps.pop_back(); 11524 11525 if (!Visited.insert(BinOp.getNode()).second) 11526 continue; 11527 11528 PromOps.push_back(BinOp); 11529 11530 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11531 // The condition of the select is not promoted. 11532 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11533 continue; 11534 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11535 continue; 11536 11537 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11538 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11539 Inputs.push_back(BinOp.getOperand(i)); 11540 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11541 BinOp.getOperand(i).getOpcode() == ISD::OR || 11542 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11543 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11544 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 11545 BinOps.push_back(BinOp.getOperand(i)); 11546 } else { 11547 // We have an input that is not a truncation or another binary 11548 // operation; we'll abort this transformation. 11549 return SDValue(); 11550 } 11551 } 11552 } 11553 11554 // The operands of a select that must be truncated when the select is 11555 // promoted because the operand is actually part of the to-be-promoted set. 11556 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 11557 11558 // Make sure that this is a self-contained cluster of operations (which 11559 // is not quite the same thing as saying that everything has only one 11560 // use). 11561 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11562 if (isa<ConstantSDNode>(Inputs[i])) 11563 continue; 11564 11565 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11566 UE = Inputs[i].getNode()->use_end(); 11567 UI != UE; ++UI) { 11568 SDNode *User = *UI; 11569 if (User != N && !Visited.count(User)) 11570 return SDValue(); 11571 11572 // If we're going to promote the non-output-value operand(s) or SELECT or 11573 // SELECT_CC, record them for truncation. 11574 if (User->getOpcode() == ISD::SELECT) { 11575 if (User->getOperand(0) == Inputs[i]) 11576 SelectTruncOp[0].insert(std::make_pair(User, 11577 User->getOperand(0).getValueType())); 11578 } else if (User->getOpcode() == ISD::SELECT_CC) { 11579 if (User->getOperand(0) == Inputs[i]) 11580 SelectTruncOp[0].insert(std::make_pair(User, 11581 User->getOperand(0).getValueType())); 11582 if (User->getOperand(1) == Inputs[i]) 11583 SelectTruncOp[1].insert(std::make_pair(User, 11584 User->getOperand(1).getValueType())); 11585 } 11586 } 11587 } 11588 11589 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11590 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11591 UE = PromOps[i].getNode()->use_end(); 11592 UI != UE; ++UI) { 11593 SDNode *User = *UI; 11594 if (User != N && !Visited.count(User)) 11595 return SDValue(); 11596 11597 // If we're going to promote the non-output-value operand(s) or SELECT or 11598 // SELECT_CC, record them for truncation. 11599 if (User->getOpcode() == ISD::SELECT) { 11600 if (User->getOperand(0) == PromOps[i]) 11601 SelectTruncOp[0].insert(std::make_pair(User, 11602 User->getOperand(0).getValueType())); 11603 } else if (User->getOpcode() == ISD::SELECT_CC) { 11604 if (User->getOperand(0) == PromOps[i]) 11605 SelectTruncOp[0].insert(std::make_pair(User, 11606 User->getOperand(0).getValueType())); 11607 if (User->getOperand(1) == PromOps[i]) 11608 SelectTruncOp[1].insert(std::make_pair(User, 11609 User->getOperand(1).getValueType())); 11610 } 11611 } 11612 } 11613 11614 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 11615 bool ReallyNeedsExt = false; 11616 if (N->getOpcode() != ISD::ANY_EXTEND) { 11617 // If all of the inputs are not already sign/zero extended, then 11618 // we'll still need to do that at the end. 11619 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11620 if (isa<ConstantSDNode>(Inputs[i])) 11621 continue; 11622 11623 unsigned OpBits = 11624 Inputs[i].getOperand(0).getValueSizeInBits(); 11625 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 11626 11627 if ((N->getOpcode() == ISD::ZERO_EXTEND && 11628 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 11629 APInt::getHighBitsSet(OpBits, 11630 OpBits-PromBits))) || 11631 (N->getOpcode() == ISD::SIGN_EXTEND && 11632 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 11633 (OpBits-(PromBits-1)))) { 11634 ReallyNeedsExt = true; 11635 break; 11636 } 11637 } 11638 } 11639 11640 // Replace all inputs, either with the truncation operand, or a 11641 // truncation or extension to the final output type. 11642 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11643 // Constant inputs need to be replaced with the to-be-promoted nodes that 11644 // use them because they might have users outside of the cluster of 11645 // promoted nodes. 11646 if (isa<ConstantSDNode>(Inputs[i])) 11647 continue; 11648 11649 SDValue InSrc = Inputs[i].getOperand(0); 11650 if (Inputs[i].getValueType() == N->getValueType(0)) 11651 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 11652 else if (N->getOpcode() == ISD::SIGN_EXTEND) 11653 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11654 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 11655 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11656 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11657 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 11658 else 11659 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11660 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 11661 } 11662 11663 std::list<HandleSDNode> PromOpHandles; 11664 for (auto &PromOp : PromOps) 11665 PromOpHandles.emplace_back(PromOp); 11666 11667 // Replace all operations (these are all the same, but have a different 11668 // (promoted) return type). DAG.getNode will validate that the types of 11669 // a binary operator match, so go through the list in reverse so that 11670 // we've likely promoted both operands first. 11671 while (!PromOpHandles.empty()) { 11672 SDValue PromOp = PromOpHandles.back().getValue(); 11673 PromOpHandles.pop_back(); 11674 11675 unsigned C; 11676 switch (PromOp.getOpcode()) { 11677 default: C = 0; break; 11678 case ISD::SELECT: C = 1; break; 11679 case ISD::SELECT_CC: C = 2; break; 11680 } 11681 11682 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11683 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 11684 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11685 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 11686 // The to-be-promoted operands of this node have not yet been 11687 // promoted (this should be rare because we're going through the 11688 // list backward, but if one of the operands has several users in 11689 // this cluster of to-be-promoted nodes, it is possible). 11690 PromOpHandles.emplace_front(PromOp); 11691 continue; 11692 } 11693 11694 // For SELECT and SELECT_CC nodes, we do a similar check for any 11695 // to-be-promoted comparison inputs. 11696 if (PromOp.getOpcode() == ISD::SELECT || 11697 PromOp.getOpcode() == ISD::SELECT_CC) { 11698 if ((SelectTruncOp[0].count(PromOp.getNode()) && 11699 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 11700 (SelectTruncOp[1].count(PromOp.getNode()) && 11701 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 11702 PromOpHandles.emplace_front(PromOp); 11703 continue; 11704 } 11705 } 11706 11707 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11708 PromOp.getNode()->op_end()); 11709 11710 // If this node has constant inputs, then they'll need to be promoted here. 11711 for (unsigned i = 0; i < 2; ++i) { 11712 if (!isa<ConstantSDNode>(Ops[C+i])) 11713 continue; 11714 if (Ops[C+i].getValueType() == N->getValueType(0)) 11715 continue; 11716 11717 if (N->getOpcode() == ISD::SIGN_EXTEND) 11718 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11719 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11720 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11721 else 11722 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11723 } 11724 11725 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 11726 // truncate them again to the original value type. 11727 if (PromOp.getOpcode() == ISD::SELECT || 11728 PromOp.getOpcode() == ISD::SELECT_CC) { 11729 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 11730 if (SI0 != SelectTruncOp[0].end()) 11731 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 11732 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 11733 if (SI1 != SelectTruncOp[1].end()) 11734 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 11735 } 11736 11737 DAG.ReplaceAllUsesOfValueWith(PromOp, 11738 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 11739 } 11740 11741 // Now we're left with the initial extension itself. 11742 if (!ReallyNeedsExt) 11743 return N->getOperand(0); 11744 11745 // To zero extend, just mask off everything except for the first bit (in the 11746 // i1 case). 11747 if (N->getOpcode() == ISD::ZERO_EXTEND) 11748 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 11749 DAG.getConstant(APInt::getLowBitsSet( 11750 N->getValueSizeInBits(0), PromBits), 11751 dl, N->getValueType(0))); 11752 11753 assert(N->getOpcode() == ISD::SIGN_EXTEND && 11754 "Invalid extension type"); 11755 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 11756 SDValue ShiftCst = 11757 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 11758 return DAG.getNode( 11759 ISD::SRA, dl, N->getValueType(0), 11760 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 11761 ShiftCst); 11762 } 11763 11764 // Is this an extending load from an f32 to an f64? 11765 static bool isFPExtLoad(SDValue Op) { 11766 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 11767 return LD->getExtensionType() == ISD::EXTLOAD && 11768 Op.getValueType() == MVT::f64; 11769 return false; 11770 } 11771 11772 /// Reduces the number of fp-to-int conversion when building a vector. 11773 /// 11774 /// If this vector is built out of floating to integer conversions, 11775 /// transform it to a vector built out of floating point values followed by a 11776 /// single floating to integer conversion of the vector. 11777 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 11778 /// becomes (fptosi (build_vector ($A, $B, ...))) 11779 SDValue PPCTargetLowering:: 11780 combineElementTruncationToVectorTruncation(SDNode *N, 11781 DAGCombinerInfo &DCI) const { 11782 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11783 "Should be called with a BUILD_VECTOR node"); 11784 11785 SelectionDAG &DAG = DCI.DAG; 11786 SDLoc dl(N); 11787 11788 SDValue FirstInput = N->getOperand(0); 11789 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 11790 "The input operand must be an fp-to-int conversion."); 11791 11792 // This combine happens after legalization so the fp_to_[su]i nodes are 11793 // already converted to PPCSISD nodes. 11794 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 11795 if (FirstConversion == PPCISD::FCTIDZ || 11796 FirstConversion == PPCISD::FCTIDUZ || 11797 FirstConversion == PPCISD::FCTIWZ || 11798 FirstConversion == PPCISD::FCTIWUZ) { 11799 bool IsSplat = true; 11800 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 11801 FirstConversion == PPCISD::FCTIWUZ; 11802 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 11803 SmallVector<SDValue, 4> Ops; 11804 EVT TargetVT = N->getValueType(0); 11805 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11806 SDValue NextOp = N->getOperand(i); 11807 if (NextOp.getOpcode() != PPCISD::MFVSR) 11808 return SDValue(); 11809 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 11810 if (NextConversion != FirstConversion) 11811 return SDValue(); 11812 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 11813 // This is not valid if the input was originally double precision. It is 11814 // also not profitable to do unless this is an extending load in which 11815 // case doing this combine will allow us to combine consecutive loads. 11816 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 11817 return SDValue(); 11818 if (N->getOperand(i) != FirstInput) 11819 IsSplat = false; 11820 } 11821 11822 // If this is a splat, we leave it as-is since there will be only a single 11823 // fp-to-int conversion followed by a splat of the integer. This is better 11824 // for 32-bit and smaller ints and neutral for 64-bit ints. 11825 if (IsSplat) 11826 return SDValue(); 11827 11828 // Now that we know we have the right type of node, get its operands 11829 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11830 SDValue In = N->getOperand(i).getOperand(0); 11831 if (Is32Bit) { 11832 // For 32-bit values, we need to add an FP_ROUND node (if we made it 11833 // here, we know that all inputs are extending loads so this is safe). 11834 if (In.isUndef()) 11835 Ops.push_back(DAG.getUNDEF(SrcVT)); 11836 else { 11837 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 11838 MVT::f32, In.getOperand(0), 11839 DAG.getIntPtrConstant(1, dl)); 11840 Ops.push_back(Trunc); 11841 } 11842 } else 11843 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 11844 } 11845 11846 unsigned Opcode; 11847 if (FirstConversion == PPCISD::FCTIDZ || 11848 FirstConversion == PPCISD::FCTIWZ) 11849 Opcode = ISD::FP_TO_SINT; 11850 else 11851 Opcode = ISD::FP_TO_UINT; 11852 11853 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 11854 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 11855 return DAG.getNode(Opcode, dl, TargetVT, BV); 11856 } 11857 return SDValue(); 11858 } 11859 11860 /// Reduce the number of loads when building a vector. 11861 /// 11862 /// Building a vector out of multiple loads can be converted to a load 11863 /// of the vector type if the loads are consecutive. If the loads are 11864 /// consecutive but in descending order, a shuffle is added at the end 11865 /// to reorder the vector. 11866 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 11867 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11868 "Should be called with a BUILD_VECTOR node"); 11869 11870 SDLoc dl(N); 11871 bool InputsAreConsecutiveLoads = true; 11872 bool InputsAreReverseConsecutive = true; 11873 unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8; 11874 SDValue FirstInput = N->getOperand(0); 11875 bool IsRoundOfExtLoad = false; 11876 11877 if (FirstInput.getOpcode() == ISD::FP_ROUND && 11878 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 11879 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 11880 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 11881 } 11882 // Not a build vector of (possibly fp_rounded) loads. 11883 if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) 11884 return SDValue(); 11885 11886 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 11887 // If any inputs are fp_round(extload), they all must be. 11888 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 11889 return SDValue(); 11890 11891 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 11892 N->getOperand(i); 11893 if (NextInput.getOpcode() != ISD::LOAD) 11894 return SDValue(); 11895 11896 SDValue PreviousInput = 11897 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 11898 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 11899 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 11900 11901 // If any inputs are fp_round(extload), they all must be. 11902 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 11903 return SDValue(); 11904 11905 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 11906 InputsAreConsecutiveLoads = false; 11907 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 11908 InputsAreReverseConsecutive = false; 11909 11910 // Exit early if the loads are neither consecutive nor reverse consecutive. 11911 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 11912 return SDValue(); 11913 } 11914 11915 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 11916 "The loads cannot be both consecutive and reverse consecutive."); 11917 11918 SDValue FirstLoadOp = 11919 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 11920 SDValue LastLoadOp = 11921 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 11922 N->getOperand(N->getNumOperands()-1); 11923 11924 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 11925 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 11926 if (InputsAreConsecutiveLoads) { 11927 assert(LD1 && "Input needs to be a LoadSDNode."); 11928 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 11929 LD1->getBasePtr(), LD1->getPointerInfo(), 11930 LD1->getAlignment()); 11931 } 11932 if (InputsAreReverseConsecutive) { 11933 assert(LDL && "Input needs to be a LoadSDNode."); 11934 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 11935 LDL->getBasePtr(), LDL->getPointerInfo(), 11936 LDL->getAlignment()); 11937 SmallVector<int, 16> Ops; 11938 for (int i = N->getNumOperands() - 1; i >= 0; i--) 11939 Ops.push_back(i); 11940 11941 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 11942 DAG.getUNDEF(N->getValueType(0)), Ops); 11943 } 11944 return SDValue(); 11945 } 11946 11947 // This function adds the required vector_shuffle needed to get 11948 // the elements of the vector extract in the correct position 11949 // as specified by the CorrectElems encoding. 11950 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 11951 SDValue Input, uint64_t Elems, 11952 uint64_t CorrectElems) { 11953 SDLoc dl(N); 11954 11955 unsigned NumElems = Input.getValueType().getVectorNumElements(); 11956 SmallVector<int, 16> ShuffleMask(NumElems, -1); 11957 11958 // Knowing the element indices being extracted from the original 11959 // vector and the order in which they're being inserted, just put 11960 // them at element indices required for the instruction. 11961 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11962 if (DAG.getDataLayout().isLittleEndian()) 11963 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 11964 else 11965 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 11966 CorrectElems = CorrectElems >> 8; 11967 Elems = Elems >> 8; 11968 } 11969 11970 SDValue Shuffle = 11971 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 11972 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 11973 11974 EVT Ty = N->getValueType(0); 11975 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 11976 return BV; 11977 } 11978 11979 // Look for build vector patterns where input operands come from sign 11980 // extended vector_extract elements of specific indices. If the correct indices 11981 // aren't used, add a vector shuffle to fix up the indices and create a new 11982 // PPCISD:SExtVElems node which selects the vector sign extend instructions 11983 // during instruction selection. 11984 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 11985 // This array encodes the indices that the vector sign extend instructions 11986 // extract from when extending from one type to another for both BE and LE. 11987 // The right nibble of each byte corresponds to the LE incides. 11988 // and the left nibble of each byte corresponds to the BE incides. 11989 // For example: 0x3074B8FC byte->word 11990 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 11991 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 11992 // For example: 0x000070F8 byte->double word 11993 // For LE: the allowed indices are: 0x0,0x8 11994 // For BE: the allowed indices are: 0x7,0xF 11995 uint64_t TargetElems[] = { 11996 0x3074B8FC, // b->w 11997 0x000070F8, // b->d 11998 0x10325476, // h->w 11999 0x00003074, // h->d 12000 0x00001032, // w->d 12001 }; 12002 12003 uint64_t Elems = 0; 12004 int Index; 12005 SDValue Input; 12006 12007 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 12008 if (!Op) 12009 return false; 12010 if (Op.getOpcode() != ISD::SIGN_EXTEND) 12011 return false; 12012 12013 SDValue Extract = Op.getOperand(0); 12014 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12015 return false; 12016 12017 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 12018 if (!ExtOp) 12019 return false; 12020 12021 Index = ExtOp->getZExtValue(); 12022 if (Input && Input != Extract.getOperand(0)) 12023 return false; 12024 12025 if (!Input) 12026 Input = Extract.getOperand(0); 12027 12028 Elems = Elems << 8; 12029 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 12030 Elems |= Index; 12031 12032 return true; 12033 }; 12034 12035 // If the build vector operands aren't sign extended vector extracts, 12036 // of the same input vector, then return. 12037 for (unsigned i = 0; i < N->getNumOperands(); i++) { 12038 if (!isSExtOfVecExtract(N->getOperand(i))) { 12039 return SDValue(); 12040 } 12041 } 12042 12043 // If the vector extract indicies are not correct, add the appropriate 12044 // vector_shuffle. 12045 int TgtElemArrayIdx; 12046 int InputSize = Input.getValueType().getScalarSizeInBits(); 12047 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 12048 if (InputSize + OutputSize == 40) 12049 TgtElemArrayIdx = 0; 12050 else if (InputSize + OutputSize == 72) 12051 TgtElemArrayIdx = 1; 12052 else if (InputSize + OutputSize == 48) 12053 TgtElemArrayIdx = 2; 12054 else if (InputSize + OutputSize == 80) 12055 TgtElemArrayIdx = 3; 12056 else if (InputSize + OutputSize == 96) 12057 TgtElemArrayIdx = 4; 12058 else 12059 return SDValue(); 12060 12061 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 12062 CorrectElems = DAG.getDataLayout().isLittleEndian() 12063 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 12064 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 12065 if (Elems != CorrectElems) { 12066 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 12067 } 12068 12069 // Regular lowering will catch cases where a shuffle is not needed. 12070 return SDValue(); 12071 } 12072 12073 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 12074 DAGCombinerInfo &DCI) const { 12075 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12076 "Should be called with a BUILD_VECTOR node"); 12077 12078 SelectionDAG &DAG = DCI.DAG; 12079 SDLoc dl(N); 12080 12081 if (!Subtarget.hasVSX()) 12082 return SDValue(); 12083 12084 // The target independent DAG combiner will leave a build_vector of 12085 // float-to-int conversions intact. We can generate MUCH better code for 12086 // a float-to-int conversion of a vector of floats. 12087 SDValue FirstInput = N->getOperand(0); 12088 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 12089 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 12090 if (Reduced) 12091 return Reduced; 12092 } 12093 12094 // If we're building a vector out of consecutive loads, just load that 12095 // vector type. 12096 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 12097 if (Reduced) 12098 return Reduced; 12099 12100 // If we're building a vector out of extended elements from another vector 12101 // we have P9 vector integer extend instructions. 12102 if (Subtarget.hasP9Altivec()) { 12103 Reduced = combineBVOfVecSExt(N, DAG); 12104 if (Reduced) 12105 return Reduced; 12106 } 12107 12108 12109 if (N->getValueType(0) != MVT::v2f64) 12110 return SDValue(); 12111 12112 // Looking for: 12113 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 12114 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 12115 FirstInput.getOpcode() != ISD::UINT_TO_FP) 12116 return SDValue(); 12117 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 12118 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 12119 return SDValue(); 12120 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 12121 return SDValue(); 12122 12123 SDValue Ext1 = FirstInput.getOperand(0); 12124 SDValue Ext2 = N->getOperand(1).getOperand(0); 12125 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 12126 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12127 return SDValue(); 12128 12129 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 12130 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 12131 if (!Ext1Op || !Ext2Op) 12132 return SDValue(); 12133 if (Ext1.getValueType() != MVT::i32 || 12134 Ext2.getValueType() != MVT::i32) 12135 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 12136 return SDValue(); 12137 12138 int FirstElem = Ext1Op->getZExtValue(); 12139 int SecondElem = Ext2Op->getZExtValue(); 12140 int SubvecIdx; 12141 if (FirstElem == 0 && SecondElem == 1) 12142 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 12143 else if (FirstElem == 2 && SecondElem == 3) 12144 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 12145 else 12146 return SDValue(); 12147 12148 SDValue SrcVec = Ext1.getOperand(0); 12149 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 12150 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 12151 return DAG.getNode(NodeType, dl, MVT::v2f64, 12152 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 12153 } 12154 12155 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 12156 DAGCombinerInfo &DCI) const { 12157 assert((N->getOpcode() == ISD::SINT_TO_FP || 12158 N->getOpcode() == ISD::UINT_TO_FP) && 12159 "Need an int -> FP conversion node here"); 12160 12161 if (useSoftFloat() || !Subtarget.has64BitSupport()) 12162 return SDValue(); 12163 12164 SelectionDAG &DAG = DCI.DAG; 12165 SDLoc dl(N); 12166 SDValue Op(N, 0); 12167 12168 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 12169 // from the hardware. 12170 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 12171 return SDValue(); 12172 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 12173 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 12174 return SDValue(); 12175 12176 SDValue FirstOperand(Op.getOperand(0)); 12177 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 12178 (FirstOperand.getValueType() == MVT::i8 || 12179 FirstOperand.getValueType() == MVT::i16); 12180 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 12181 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 12182 bool DstDouble = Op.getValueType() == MVT::f64; 12183 unsigned ConvOp = Signed ? 12184 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 12185 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 12186 SDValue WidthConst = 12187 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 12188 dl, false); 12189 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 12190 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 12191 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 12192 DAG.getVTList(MVT::f64, MVT::Other), 12193 Ops, MVT::i8, LDN->getMemOperand()); 12194 12195 // For signed conversion, we need to sign-extend the value in the VSR 12196 if (Signed) { 12197 SDValue ExtOps[] = { Ld, WidthConst }; 12198 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 12199 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 12200 } else 12201 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 12202 } 12203 12204 12205 // For i32 intermediate values, unfortunately, the conversion functions 12206 // leave the upper 32 bits of the value are undefined. Within the set of 12207 // scalar instructions, we have no method for zero- or sign-extending the 12208 // value. Thus, we cannot handle i32 intermediate values here. 12209 if (Op.getOperand(0).getValueType() == MVT::i32) 12210 return SDValue(); 12211 12212 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 12213 "UINT_TO_FP is supported only with FPCVT"); 12214 12215 // If we have FCFIDS, then use it when converting to single-precision. 12216 // Otherwise, convert to double-precision and then round. 12217 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12218 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 12219 : PPCISD::FCFIDS) 12220 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 12221 : PPCISD::FCFID); 12222 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12223 ? MVT::f32 12224 : MVT::f64; 12225 12226 // If we're converting from a float, to an int, and back to a float again, 12227 // then we don't need the store/load pair at all. 12228 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 12229 Subtarget.hasFPCVT()) || 12230 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 12231 SDValue Src = Op.getOperand(0).getOperand(0); 12232 if (Src.getValueType() == MVT::f32) { 12233 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 12234 DCI.AddToWorklist(Src.getNode()); 12235 } else if (Src.getValueType() != MVT::f64) { 12236 // Make sure that we don't pick up a ppc_fp128 source value. 12237 return SDValue(); 12238 } 12239 12240 unsigned FCTOp = 12241 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 12242 PPCISD::FCTIDUZ; 12243 12244 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 12245 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 12246 12247 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 12248 FP = DAG.getNode(ISD::FP_ROUND, dl, 12249 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 12250 DCI.AddToWorklist(FP.getNode()); 12251 } 12252 12253 return FP; 12254 } 12255 12256 return SDValue(); 12257 } 12258 12259 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 12260 // builtins) into loads with swaps. 12261 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 12262 DAGCombinerInfo &DCI) const { 12263 SelectionDAG &DAG = DCI.DAG; 12264 SDLoc dl(N); 12265 SDValue Chain; 12266 SDValue Base; 12267 MachineMemOperand *MMO; 12268 12269 switch (N->getOpcode()) { 12270 default: 12271 llvm_unreachable("Unexpected opcode for little endian VSX load"); 12272 case ISD::LOAD: { 12273 LoadSDNode *LD = cast<LoadSDNode>(N); 12274 Chain = LD->getChain(); 12275 Base = LD->getBasePtr(); 12276 MMO = LD->getMemOperand(); 12277 // If the MMO suggests this isn't a load of a full vector, leave 12278 // things alone. For a built-in, we have to make the change for 12279 // correctness, so if there is a size problem that will be a bug. 12280 if (MMO->getSize() < 16) 12281 return SDValue(); 12282 break; 12283 } 12284 case ISD::INTRINSIC_W_CHAIN: { 12285 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12286 Chain = Intrin->getChain(); 12287 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 12288 // us what we want. Get operand 2 instead. 12289 Base = Intrin->getOperand(2); 12290 MMO = Intrin->getMemOperand(); 12291 break; 12292 } 12293 } 12294 12295 MVT VecTy = N->getValueType(0).getSimpleVT(); 12296 12297 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 12298 // aligned and the type is a vector with elements up to 4 bytes 12299 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12300 && VecTy.getScalarSizeInBits() <= 32 ) { 12301 return SDValue(); 12302 } 12303 12304 SDValue LoadOps[] = { Chain, Base }; 12305 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 12306 DAG.getVTList(MVT::v2f64, MVT::Other), 12307 LoadOps, MVT::v2f64, MMO); 12308 12309 DCI.AddToWorklist(Load.getNode()); 12310 Chain = Load.getValue(1); 12311 SDValue Swap = DAG.getNode( 12312 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 12313 DCI.AddToWorklist(Swap.getNode()); 12314 12315 // Add a bitcast if the resulting load type doesn't match v2f64. 12316 if (VecTy != MVT::v2f64) { 12317 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 12318 DCI.AddToWorklist(N.getNode()); 12319 // Package {bitcast value, swap's chain} to match Load's shape. 12320 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 12321 N, Swap.getValue(1)); 12322 } 12323 12324 return Swap; 12325 } 12326 12327 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 12328 // builtins) into stores with swaps. 12329 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 12330 DAGCombinerInfo &DCI) const { 12331 SelectionDAG &DAG = DCI.DAG; 12332 SDLoc dl(N); 12333 SDValue Chain; 12334 SDValue Base; 12335 unsigned SrcOpnd; 12336 MachineMemOperand *MMO; 12337 12338 switch (N->getOpcode()) { 12339 default: 12340 llvm_unreachable("Unexpected opcode for little endian VSX store"); 12341 case ISD::STORE: { 12342 StoreSDNode *ST = cast<StoreSDNode>(N); 12343 Chain = ST->getChain(); 12344 Base = ST->getBasePtr(); 12345 MMO = ST->getMemOperand(); 12346 SrcOpnd = 1; 12347 // If the MMO suggests this isn't a store of a full vector, leave 12348 // things alone. For a built-in, we have to make the change for 12349 // correctness, so if there is a size problem that will be a bug. 12350 if (MMO->getSize() < 16) 12351 return SDValue(); 12352 break; 12353 } 12354 case ISD::INTRINSIC_VOID: { 12355 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12356 Chain = Intrin->getChain(); 12357 // Intrin->getBasePtr() oddly does not get what we want. 12358 Base = Intrin->getOperand(3); 12359 MMO = Intrin->getMemOperand(); 12360 SrcOpnd = 2; 12361 break; 12362 } 12363 } 12364 12365 SDValue Src = N->getOperand(SrcOpnd); 12366 MVT VecTy = Src.getValueType().getSimpleVT(); 12367 12368 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 12369 // aligned and the type is a vector with elements up to 4 bytes 12370 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12371 && VecTy.getScalarSizeInBits() <= 32 ) { 12372 return SDValue(); 12373 } 12374 12375 // All stores are done as v2f64 and possible bit cast. 12376 if (VecTy != MVT::v2f64) { 12377 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 12378 DCI.AddToWorklist(Src.getNode()); 12379 } 12380 12381 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 12382 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 12383 DCI.AddToWorklist(Swap.getNode()); 12384 Chain = Swap.getValue(1); 12385 SDValue StoreOps[] = { Chain, Swap, Base }; 12386 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 12387 DAG.getVTList(MVT::Other), 12388 StoreOps, VecTy, MMO); 12389 DCI.AddToWorklist(Store.getNode()); 12390 return Store; 12391 } 12392 12393 // Handle DAG combine for STORE (FP_TO_INT F). 12394 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 12395 DAGCombinerInfo &DCI) const { 12396 12397 SelectionDAG &DAG = DCI.DAG; 12398 SDLoc dl(N); 12399 unsigned Opcode = N->getOperand(1).getOpcode(); 12400 12401 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 12402 && "Not a FP_TO_INT Instruction!"); 12403 12404 SDValue Val = N->getOperand(1).getOperand(0); 12405 EVT Op1VT = N->getOperand(1).getValueType(); 12406 EVT ResVT = Val.getValueType(); 12407 12408 // Floating point types smaller than 32 bits are not legal on Power. 12409 if (ResVT.getScalarSizeInBits() < 32) 12410 return SDValue(); 12411 12412 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 12413 bool ValidTypeForStoreFltAsInt = 12414 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 12415 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 12416 12417 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() || 12418 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 12419 return SDValue(); 12420 12421 // Extend f32 values to f64 12422 if (ResVT.getScalarSizeInBits() == 32) { 12423 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 12424 DCI.AddToWorklist(Val.getNode()); 12425 } 12426 12427 // Set signed or unsigned conversion opcode. 12428 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 12429 PPCISD::FP_TO_SINT_IN_VSR : 12430 PPCISD::FP_TO_UINT_IN_VSR; 12431 12432 Val = DAG.getNode(ConvOpcode, 12433 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 12434 DCI.AddToWorklist(Val.getNode()); 12435 12436 // Set number of bytes being converted. 12437 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 12438 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 12439 DAG.getIntPtrConstant(ByteSize, dl, false), 12440 DAG.getValueType(Op1VT) }; 12441 12442 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 12443 DAG.getVTList(MVT::Other), Ops, 12444 cast<StoreSDNode>(N)->getMemoryVT(), 12445 cast<StoreSDNode>(N)->getMemOperand()); 12446 12447 DCI.AddToWorklist(Val.getNode()); 12448 return Val; 12449 } 12450 12451 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 12452 DAGCombinerInfo &DCI) const { 12453 SelectionDAG &DAG = DCI.DAG; 12454 SDLoc dl(N); 12455 switch (N->getOpcode()) { 12456 default: break; 12457 case ISD::SHL: 12458 return combineSHL(N, DCI); 12459 case ISD::SRA: 12460 return combineSRA(N, DCI); 12461 case ISD::SRL: 12462 return combineSRL(N, DCI); 12463 case PPCISD::SHL: 12464 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 12465 return N->getOperand(0); 12466 break; 12467 case PPCISD::SRL: 12468 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 12469 return N->getOperand(0); 12470 break; 12471 case PPCISD::SRA: 12472 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 12473 if (C->isNullValue() || // 0 >>s V -> 0. 12474 C->isAllOnesValue()) // -1 >>s V -> -1. 12475 return N->getOperand(0); 12476 } 12477 break; 12478 case ISD::SIGN_EXTEND: 12479 case ISD::ZERO_EXTEND: 12480 case ISD::ANY_EXTEND: 12481 return DAGCombineExtBoolTrunc(N, DCI); 12482 case ISD::TRUNCATE: 12483 case ISD::SETCC: 12484 case ISD::SELECT_CC: 12485 return DAGCombineTruncBoolExt(N, DCI); 12486 case ISD::SINT_TO_FP: 12487 case ISD::UINT_TO_FP: 12488 return combineFPToIntToFP(N, DCI); 12489 case ISD::STORE: { 12490 12491 EVT Op1VT = N->getOperand(1).getValueType(); 12492 unsigned Opcode = N->getOperand(1).getOpcode(); 12493 12494 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 12495 SDValue Val= combineStoreFPToInt(N, DCI); 12496 if (Val) 12497 return Val; 12498 } 12499 12500 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 12501 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 12502 N->getOperand(1).getNode()->hasOneUse() && 12503 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 12504 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 12505 12506 // STBRX can only handle simple types. 12507 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 12508 if (mVT.isExtended()) 12509 break; 12510 12511 SDValue BSwapOp = N->getOperand(1).getOperand(0); 12512 // Do an any-extend to 32-bits if this is a half-word input. 12513 if (BSwapOp.getValueType() == MVT::i16) 12514 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 12515 12516 // If the type of BSWAP operand is wider than stored memory width 12517 // it need to be shifted to the right side before STBRX. 12518 if (Op1VT.bitsGT(mVT)) { 12519 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 12520 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 12521 DAG.getConstant(Shift, dl, MVT::i32)); 12522 // Need to truncate if this is a bswap of i64 stored as i32/i16. 12523 if (Op1VT == MVT::i64) 12524 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 12525 } 12526 12527 SDValue Ops[] = { 12528 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 12529 }; 12530 return 12531 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 12532 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 12533 cast<StoreSDNode>(N)->getMemOperand()); 12534 } 12535 12536 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 12537 // So it can increase the chance of CSE constant construction. 12538 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 12539 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 12540 // Need to sign-extended to 64-bits to handle negative values. 12541 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 12542 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 12543 MemVT.getSizeInBits()); 12544 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 12545 12546 // DAG.getTruncStore() can't be used here because it doesn't accept 12547 // the general (base + offset) addressing mode. 12548 // So we use UpdateNodeOperands and setTruncatingStore instead. 12549 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 12550 N->getOperand(3)); 12551 cast<StoreSDNode>(N)->setTruncatingStore(true); 12552 return SDValue(N, 0); 12553 } 12554 12555 // For little endian, VSX stores require generating xxswapd/lxvd2x. 12556 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12557 if (Op1VT.isSimple()) { 12558 MVT StoreVT = Op1VT.getSimpleVT(); 12559 if (Subtarget.needsSwapsForVSXMemOps() && 12560 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 12561 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 12562 return expandVSXStoreForLE(N, DCI); 12563 } 12564 break; 12565 } 12566 case ISD::LOAD: { 12567 LoadSDNode *LD = cast<LoadSDNode>(N); 12568 EVT VT = LD->getValueType(0); 12569 12570 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12571 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12572 if (VT.isSimple()) { 12573 MVT LoadVT = VT.getSimpleVT(); 12574 if (Subtarget.needsSwapsForVSXMemOps() && 12575 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 12576 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 12577 return expandVSXLoadForLE(N, DCI); 12578 } 12579 12580 // We sometimes end up with a 64-bit integer load, from which we extract 12581 // two single-precision floating-point numbers. This happens with 12582 // std::complex<float>, and other similar structures, because of the way we 12583 // canonicalize structure copies. However, if we lack direct moves, 12584 // then the final bitcasts from the extracted integer values to the 12585 // floating-point numbers turn into store/load pairs. Even with direct moves, 12586 // just loading the two floating-point numbers is likely better. 12587 auto ReplaceTwoFloatLoad = [&]() { 12588 if (VT != MVT::i64) 12589 return false; 12590 12591 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 12592 LD->isVolatile()) 12593 return false; 12594 12595 // We're looking for a sequence like this: 12596 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 12597 // t16: i64 = srl t13, Constant:i32<32> 12598 // t17: i32 = truncate t16 12599 // t18: f32 = bitcast t17 12600 // t19: i32 = truncate t13 12601 // t20: f32 = bitcast t19 12602 12603 if (!LD->hasNUsesOfValue(2, 0)) 12604 return false; 12605 12606 auto UI = LD->use_begin(); 12607 while (UI.getUse().getResNo() != 0) ++UI; 12608 SDNode *Trunc = *UI++; 12609 while (UI.getUse().getResNo() != 0) ++UI; 12610 SDNode *RightShift = *UI; 12611 if (Trunc->getOpcode() != ISD::TRUNCATE) 12612 std::swap(Trunc, RightShift); 12613 12614 if (Trunc->getOpcode() != ISD::TRUNCATE || 12615 Trunc->getValueType(0) != MVT::i32 || 12616 !Trunc->hasOneUse()) 12617 return false; 12618 if (RightShift->getOpcode() != ISD::SRL || 12619 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 12620 RightShift->getConstantOperandVal(1) != 32 || 12621 !RightShift->hasOneUse()) 12622 return false; 12623 12624 SDNode *Trunc2 = *RightShift->use_begin(); 12625 if (Trunc2->getOpcode() != ISD::TRUNCATE || 12626 Trunc2->getValueType(0) != MVT::i32 || 12627 !Trunc2->hasOneUse()) 12628 return false; 12629 12630 SDNode *Bitcast = *Trunc->use_begin(); 12631 SDNode *Bitcast2 = *Trunc2->use_begin(); 12632 12633 if (Bitcast->getOpcode() != ISD::BITCAST || 12634 Bitcast->getValueType(0) != MVT::f32) 12635 return false; 12636 if (Bitcast2->getOpcode() != ISD::BITCAST || 12637 Bitcast2->getValueType(0) != MVT::f32) 12638 return false; 12639 12640 if (Subtarget.isLittleEndian()) 12641 std::swap(Bitcast, Bitcast2); 12642 12643 // Bitcast has the second float (in memory-layout order) and Bitcast2 12644 // has the first one. 12645 12646 SDValue BasePtr = LD->getBasePtr(); 12647 if (LD->isIndexed()) { 12648 assert(LD->getAddressingMode() == ISD::PRE_INC && 12649 "Non-pre-inc AM on PPC?"); 12650 BasePtr = 12651 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 12652 LD->getOffset()); 12653 } 12654 12655 auto MMOFlags = 12656 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 12657 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 12658 LD->getPointerInfo(), LD->getAlignment(), 12659 MMOFlags, LD->getAAInfo()); 12660 SDValue AddPtr = 12661 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 12662 BasePtr, DAG.getIntPtrConstant(4, dl)); 12663 SDValue FloatLoad2 = DAG.getLoad( 12664 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 12665 LD->getPointerInfo().getWithOffset(4), 12666 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 12667 12668 if (LD->isIndexed()) { 12669 // Note that DAGCombine should re-form any pre-increment load(s) from 12670 // what is produced here if that makes sense. 12671 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 12672 } 12673 12674 DCI.CombineTo(Bitcast2, FloatLoad); 12675 DCI.CombineTo(Bitcast, FloatLoad2); 12676 12677 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 12678 SDValue(FloatLoad2.getNode(), 1)); 12679 return true; 12680 }; 12681 12682 if (ReplaceTwoFloatLoad()) 12683 return SDValue(N, 0); 12684 12685 EVT MemVT = LD->getMemoryVT(); 12686 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 12687 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 12688 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 12689 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 12690 if (LD->isUnindexed() && VT.isVector() && 12691 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 12692 // P8 and later hardware should just use LOAD. 12693 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 12694 VT == MVT::v4i32 || VT == MVT::v4f32)) || 12695 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 12696 LD->getAlignment() >= ScalarABIAlignment)) && 12697 LD->getAlignment() < ABIAlignment) { 12698 // This is a type-legal unaligned Altivec or QPX load. 12699 SDValue Chain = LD->getChain(); 12700 SDValue Ptr = LD->getBasePtr(); 12701 bool isLittleEndian = Subtarget.isLittleEndian(); 12702 12703 // This implements the loading of unaligned vectors as described in 12704 // the venerable Apple Velocity Engine overview. Specifically: 12705 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 12706 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 12707 // 12708 // The general idea is to expand a sequence of one or more unaligned 12709 // loads into an alignment-based permutation-control instruction (lvsl 12710 // or lvsr), a series of regular vector loads (which always truncate 12711 // their input address to an aligned address), and a series of 12712 // permutations. The results of these permutations are the requested 12713 // loaded values. The trick is that the last "extra" load is not taken 12714 // from the address you might suspect (sizeof(vector) bytes after the 12715 // last requested load), but rather sizeof(vector) - 1 bytes after the 12716 // last requested vector. The point of this is to avoid a page fault if 12717 // the base address happened to be aligned. This works because if the 12718 // base address is aligned, then adding less than a full vector length 12719 // will cause the last vector in the sequence to be (re)loaded. 12720 // Otherwise, the next vector will be fetched as you might suspect was 12721 // necessary. 12722 12723 // We might be able to reuse the permutation generation from 12724 // a different base address offset from this one by an aligned amount. 12725 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 12726 // optimization later. 12727 Intrinsic::ID Intr, IntrLD, IntrPerm; 12728 MVT PermCntlTy, PermTy, LDTy; 12729 if (Subtarget.hasAltivec()) { 12730 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 12731 Intrinsic::ppc_altivec_lvsl; 12732 IntrLD = Intrinsic::ppc_altivec_lvx; 12733 IntrPerm = Intrinsic::ppc_altivec_vperm; 12734 PermCntlTy = MVT::v16i8; 12735 PermTy = MVT::v4i32; 12736 LDTy = MVT::v4i32; 12737 } else { 12738 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 12739 Intrinsic::ppc_qpx_qvlpcls; 12740 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 12741 Intrinsic::ppc_qpx_qvlfs; 12742 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 12743 PermCntlTy = MVT::v4f64; 12744 PermTy = MVT::v4f64; 12745 LDTy = MemVT.getSimpleVT(); 12746 } 12747 12748 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 12749 12750 // Create the new MMO for the new base load. It is like the original MMO, 12751 // but represents an area in memory almost twice the vector size centered 12752 // on the original address. If the address is unaligned, we might start 12753 // reading up to (sizeof(vector)-1) bytes below the address of the 12754 // original unaligned load. 12755 MachineFunction &MF = DAG.getMachineFunction(); 12756 MachineMemOperand *BaseMMO = 12757 MF.getMachineMemOperand(LD->getMemOperand(), 12758 -(long)MemVT.getStoreSize()+1, 12759 2*MemVT.getStoreSize()-1); 12760 12761 // Create the new base load. 12762 SDValue LDXIntID = 12763 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 12764 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 12765 SDValue BaseLoad = 12766 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12767 DAG.getVTList(PermTy, MVT::Other), 12768 BaseLoadOps, LDTy, BaseMMO); 12769 12770 // Note that the value of IncOffset (which is provided to the next 12771 // load's pointer info offset value, and thus used to calculate the 12772 // alignment), and the value of IncValue (which is actually used to 12773 // increment the pointer value) are different! This is because we 12774 // require the next load to appear to be aligned, even though it 12775 // is actually offset from the base pointer by a lesser amount. 12776 int IncOffset = VT.getSizeInBits() / 8; 12777 int IncValue = IncOffset; 12778 12779 // Walk (both up and down) the chain looking for another load at the real 12780 // (aligned) offset (the alignment of the other load does not matter in 12781 // this case). If found, then do not use the offset reduction trick, as 12782 // that will prevent the loads from being later combined (as they would 12783 // otherwise be duplicates). 12784 if (!findConsecutiveLoad(LD, DAG)) 12785 --IncValue; 12786 12787 SDValue Increment = 12788 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 12789 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 12790 12791 MachineMemOperand *ExtraMMO = 12792 MF.getMachineMemOperand(LD->getMemOperand(), 12793 1, 2*MemVT.getStoreSize()-1); 12794 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 12795 SDValue ExtraLoad = 12796 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12797 DAG.getVTList(PermTy, MVT::Other), 12798 ExtraLoadOps, LDTy, ExtraMMO); 12799 12800 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 12801 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 12802 12803 // Because vperm has a big-endian bias, we must reverse the order 12804 // of the input vectors and complement the permute control vector 12805 // when generating little endian code. We have already handled the 12806 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 12807 // and ExtraLoad here. 12808 SDValue Perm; 12809 if (isLittleEndian) 12810 Perm = BuildIntrinsicOp(IntrPerm, 12811 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 12812 else 12813 Perm = BuildIntrinsicOp(IntrPerm, 12814 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 12815 12816 if (VT != PermTy) 12817 Perm = Subtarget.hasAltivec() ? 12818 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 12819 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 12820 DAG.getTargetConstant(1, dl, MVT::i64)); 12821 // second argument is 1 because this rounding 12822 // is always exact. 12823 12824 // The output of the permutation is our loaded result, the TokenFactor is 12825 // our new chain. 12826 DCI.CombineTo(N, Perm, TF); 12827 return SDValue(N, 0); 12828 } 12829 } 12830 break; 12831 case ISD::INTRINSIC_WO_CHAIN: { 12832 bool isLittleEndian = Subtarget.isLittleEndian(); 12833 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 12834 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 12835 : Intrinsic::ppc_altivec_lvsl); 12836 if ((IID == Intr || 12837 IID == Intrinsic::ppc_qpx_qvlpcld || 12838 IID == Intrinsic::ppc_qpx_qvlpcls) && 12839 N->getOperand(1)->getOpcode() == ISD::ADD) { 12840 SDValue Add = N->getOperand(1); 12841 12842 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 12843 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 12844 12845 if (DAG.MaskedValueIsZero(Add->getOperand(1), 12846 APInt::getAllOnesValue(Bits /* alignment */) 12847 .zext(Add.getScalarValueSizeInBits()))) { 12848 SDNode *BasePtr = Add->getOperand(0).getNode(); 12849 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12850 UE = BasePtr->use_end(); 12851 UI != UE; ++UI) { 12852 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12853 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 12854 // We've found another LVSL/LVSR, and this address is an aligned 12855 // multiple of that one. The results will be the same, so use the 12856 // one we've just found instead. 12857 12858 return SDValue(*UI, 0); 12859 } 12860 } 12861 } 12862 12863 if (isa<ConstantSDNode>(Add->getOperand(1))) { 12864 SDNode *BasePtr = Add->getOperand(0).getNode(); 12865 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12866 UE = BasePtr->use_end(); UI != UE; ++UI) { 12867 if (UI->getOpcode() == ISD::ADD && 12868 isa<ConstantSDNode>(UI->getOperand(1)) && 12869 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 12870 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 12871 (1ULL << Bits) == 0) { 12872 SDNode *OtherAdd = *UI; 12873 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 12874 VE = OtherAdd->use_end(); VI != VE; ++VI) { 12875 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12876 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 12877 return SDValue(*VI, 0); 12878 } 12879 } 12880 } 12881 } 12882 } 12883 } 12884 } 12885 12886 break; 12887 case ISD::INTRINSIC_W_CHAIN: 12888 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12889 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12890 if (Subtarget.needsSwapsForVSXMemOps()) { 12891 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12892 default: 12893 break; 12894 case Intrinsic::ppc_vsx_lxvw4x: 12895 case Intrinsic::ppc_vsx_lxvd2x: 12896 return expandVSXLoadForLE(N, DCI); 12897 } 12898 } 12899 break; 12900 case ISD::INTRINSIC_VOID: 12901 // For little endian, VSX stores require generating xxswapd/stxvd2x. 12902 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12903 if (Subtarget.needsSwapsForVSXMemOps()) { 12904 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12905 default: 12906 break; 12907 case Intrinsic::ppc_vsx_stxvw4x: 12908 case Intrinsic::ppc_vsx_stxvd2x: 12909 return expandVSXStoreForLE(N, DCI); 12910 } 12911 } 12912 break; 12913 case ISD::BSWAP: 12914 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 12915 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 12916 N->getOperand(0).hasOneUse() && 12917 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 12918 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 12919 N->getValueType(0) == MVT::i64))) { 12920 SDValue Load = N->getOperand(0); 12921 LoadSDNode *LD = cast<LoadSDNode>(Load); 12922 // Create the byte-swapping load. 12923 SDValue Ops[] = { 12924 LD->getChain(), // Chain 12925 LD->getBasePtr(), // Ptr 12926 DAG.getValueType(N->getValueType(0)) // VT 12927 }; 12928 SDValue BSLoad = 12929 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 12930 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 12931 MVT::i64 : MVT::i32, MVT::Other), 12932 Ops, LD->getMemoryVT(), LD->getMemOperand()); 12933 12934 // If this is an i16 load, insert the truncate. 12935 SDValue ResVal = BSLoad; 12936 if (N->getValueType(0) == MVT::i16) 12937 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 12938 12939 // First, combine the bswap away. This makes the value produced by the 12940 // load dead. 12941 DCI.CombineTo(N, ResVal); 12942 12943 // Next, combine the load away, we give it a bogus result value but a real 12944 // chain result. The result value is dead because the bswap is dead. 12945 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 12946 12947 // Return N so it doesn't get rechecked! 12948 return SDValue(N, 0); 12949 } 12950 break; 12951 case PPCISD::VCMP: 12952 // If a VCMPo node already exists with exactly the same operands as this 12953 // node, use its result instead of this node (VCMPo computes both a CR6 and 12954 // a normal output). 12955 // 12956 if (!N->getOperand(0).hasOneUse() && 12957 !N->getOperand(1).hasOneUse() && 12958 !N->getOperand(2).hasOneUse()) { 12959 12960 // Scan all of the users of the LHS, looking for VCMPo's that match. 12961 SDNode *VCMPoNode = nullptr; 12962 12963 SDNode *LHSN = N->getOperand(0).getNode(); 12964 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 12965 UI != E; ++UI) 12966 if (UI->getOpcode() == PPCISD::VCMPo && 12967 UI->getOperand(1) == N->getOperand(1) && 12968 UI->getOperand(2) == N->getOperand(2) && 12969 UI->getOperand(0) == N->getOperand(0)) { 12970 VCMPoNode = *UI; 12971 break; 12972 } 12973 12974 // If there is no VCMPo node, or if the flag value has a single use, don't 12975 // transform this. 12976 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 12977 break; 12978 12979 // Look at the (necessarily single) use of the flag value. If it has a 12980 // chain, this transformation is more complex. Note that multiple things 12981 // could use the value result, which we should ignore. 12982 SDNode *FlagUser = nullptr; 12983 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 12984 FlagUser == nullptr; ++UI) { 12985 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 12986 SDNode *User = *UI; 12987 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 12988 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 12989 FlagUser = User; 12990 break; 12991 } 12992 } 12993 } 12994 12995 // If the user is a MFOCRF instruction, we know this is safe. 12996 // Otherwise we give up for right now. 12997 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 12998 return SDValue(VCMPoNode, 0); 12999 } 13000 break; 13001 case ISD::BRCOND: { 13002 SDValue Cond = N->getOperand(1); 13003 SDValue Target = N->getOperand(2); 13004 13005 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 13006 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 13007 Intrinsic::ppc_is_decremented_ctr_nonzero) { 13008 13009 // We now need to make the intrinsic dead (it cannot be instruction 13010 // selected). 13011 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 13012 assert(Cond.getNode()->hasOneUse() && 13013 "Counter decrement has more than one use"); 13014 13015 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 13016 N->getOperand(0), Target); 13017 } 13018 } 13019 break; 13020 case ISD::BR_CC: { 13021 // If this is a branch on an altivec predicate comparison, lower this so 13022 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 13023 // lowering is done pre-legalize, because the legalizer lowers the predicate 13024 // compare down to code that is difficult to reassemble. 13025 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 13026 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 13027 13028 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 13029 // value. If so, pass-through the AND to get to the intrinsic. 13030 if (LHS.getOpcode() == ISD::AND && 13031 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 13032 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 13033 Intrinsic::ppc_is_decremented_ctr_nonzero && 13034 isa<ConstantSDNode>(LHS.getOperand(1)) && 13035 !isNullConstant(LHS.getOperand(1))) 13036 LHS = LHS.getOperand(0); 13037 13038 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 13039 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 13040 Intrinsic::ppc_is_decremented_ctr_nonzero && 13041 isa<ConstantSDNode>(RHS)) { 13042 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 13043 "Counter decrement comparison is not EQ or NE"); 13044 13045 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 13046 bool isBDNZ = (CC == ISD::SETEQ && Val) || 13047 (CC == ISD::SETNE && !Val); 13048 13049 // We now need to make the intrinsic dead (it cannot be instruction 13050 // selected). 13051 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 13052 assert(LHS.getNode()->hasOneUse() && 13053 "Counter decrement has more than one use"); 13054 13055 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 13056 N->getOperand(0), N->getOperand(4)); 13057 } 13058 13059 int CompareOpc; 13060 bool isDot; 13061 13062 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 13063 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 13064 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 13065 assert(isDot && "Can't compare against a vector result!"); 13066 13067 // If this is a comparison against something other than 0/1, then we know 13068 // that the condition is never/always true. 13069 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 13070 if (Val != 0 && Val != 1) { 13071 if (CC == ISD::SETEQ) // Cond never true, remove branch. 13072 return N->getOperand(0); 13073 // Always !=, turn it into an unconditional branch. 13074 return DAG.getNode(ISD::BR, dl, MVT::Other, 13075 N->getOperand(0), N->getOperand(4)); 13076 } 13077 13078 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 13079 13080 // Create the PPCISD altivec 'dot' comparison node. 13081 SDValue Ops[] = { 13082 LHS.getOperand(2), // LHS of compare 13083 LHS.getOperand(3), // RHS of compare 13084 DAG.getConstant(CompareOpc, dl, MVT::i32) 13085 }; 13086 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 13087 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 13088 13089 // Unpack the result based on how the target uses it. 13090 PPC::Predicate CompOpc; 13091 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 13092 default: // Can't happen, don't crash on invalid number though. 13093 case 0: // Branch on the value of the EQ bit of CR6. 13094 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 13095 break; 13096 case 1: // Branch on the inverted value of the EQ bit of CR6. 13097 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 13098 break; 13099 case 2: // Branch on the value of the LT bit of CR6. 13100 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 13101 break; 13102 case 3: // Branch on the inverted value of the LT bit of CR6. 13103 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 13104 break; 13105 } 13106 13107 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 13108 DAG.getConstant(CompOpc, dl, MVT::i32), 13109 DAG.getRegister(PPC::CR6, MVT::i32), 13110 N->getOperand(4), CompNode.getValue(1)); 13111 } 13112 break; 13113 } 13114 case ISD::BUILD_VECTOR: 13115 return DAGCombineBuildVector(N, DCI); 13116 } 13117 13118 return SDValue(); 13119 } 13120 13121 SDValue 13122 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 13123 SelectionDAG &DAG, 13124 SmallVectorImpl<SDNode *> &Created) const { 13125 // fold (sdiv X, pow2) 13126 EVT VT = N->getValueType(0); 13127 if (VT == MVT::i64 && !Subtarget.isPPC64()) 13128 return SDValue(); 13129 if ((VT != MVT::i32 && VT != MVT::i64) || 13130 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 13131 return SDValue(); 13132 13133 SDLoc DL(N); 13134 SDValue N0 = N->getOperand(0); 13135 13136 bool IsNegPow2 = (-Divisor).isPowerOf2(); 13137 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 13138 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 13139 13140 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 13141 Created.push_back(Op.getNode()); 13142 13143 if (IsNegPow2) { 13144 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 13145 Created.push_back(Op.getNode()); 13146 } 13147 13148 return Op; 13149 } 13150 13151 //===----------------------------------------------------------------------===// 13152 // Inline Assembly Support 13153 //===----------------------------------------------------------------------===// 13154 13155 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 13156 KnownBits &Known, 13157 const APInt &DemandedElts, 13158 const SelectionDAG &DAG, 13159 unsigned Depth) const { 13160 Known.resetAll(); 13161 switch (Op.getOpcode()) { 13162 default: break; 13163 case PPCISD::LBRX: { 13164 // lhbrx is known to have the top bits cleared out. 13165 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 13166 Known.Zero = 0xFFFF0000; 13167 break; 13168 } 13169 case ISD::INTRINSIC_WO_CHAIN: { 13170 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 13171 default: break; 13172 case Intrinsic::ppc_altivec_vcmpbfp_p: 13173 case Intrinsic::ppc_altivec_vcmpeqfp_p: 13174 case Intrinsic::ppc_altivec_vcmpequb_p: 13175 case Intrinsic::ppc_altivec_vcmpequh_p: 13176 case Intrinsic::ppc_altivec_vcmpequw_p: 13177 case Intrinsic::ppc_altivec_vcmpequd_p: 13178 case Intrinsic::ppc_altivec_vcmpgefp_p: 13179 case Intrinsic::ppc_altivec_vcmpgtfp_p: 13180 case Intrinsic::ppc_altivec_vcmpgtsb_p: 13181 case Intrinsic::ppc_altivec_vcmpgtsh_p: 13182 case Intrinsic::ppc_altivec_vcmpgtsw_p: 13183 case Intrinsic::ppc_altivec_vcmpgtsd_p: 13184 case Intrinsic::ppc_altivec_vcmpgtub_p: 13185 case Intrinsic::ppc_altivec_vcmpgtuh_p: 13186 case Intrinsic::ppc_altivec_vcmpgtuw_p: 13187 case Intrinsic::ppc_altivec_vcmpgtud_p: 13188 Known.Zero = ~1U; // All bits but the low one are known to be zero. 13189 break; 13190 } 13191 } 13192 } 13193 } 13194 13195 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 13196 switch (Subtarget.getDarwinDirective()) { 13197 default: break; 13198 case PPC::DIR_970: 13199 case PPC::DIR_PWR4: 13200 case PPC::DIR_PWR5: 13201 case PPC::DIR_PWR5X: 13202 case PPC::DIR_PWR6: 13203 case PPC::DIR_PWR6X: 13204 case PPC::DIR_PWR7: 13205 case PPC::DIR_PWR8: 13206 case PPC::DIR_PWR9: { 13207 if (!ML) 13208 break; 13209 13210 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 13211 13212 // For small loops (between 5 and 8 instructions), align to a 32-byte 13213 // boundary so that the entire loop fits in one instruction-cache line. 13214 uint64_t LoopSize = 0; 13215 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 13216 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 13217 LoopSize += TII->getInstSizeInBytes(*J); 13218 if (LoopSize > 32) 13219 break; 13220 } 13221 13222 if (LoopSize > 16 && LoopSize <= 32) 13223 return 5; 13224 13225 break; 13226 } 13227 } 13228 13229 return TargetLowering::getPrefLoopAlignment(ML); 13230 } 13231 13232 /// getConstraintType - Given a constraint, return the type of 13233 /// constraint it is for this target. 13234 PPCTargetLowering::ConstraintType 13235 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 13236 if (Constraint.size() == 1) { 13237 switch (Constraint[0]) { 13238 default: break; 13239 case 'b': 13240 case 'r': 13241 case 'f': 13242 case 'd': 13243 case 'v': 13244 case 'y': 13245 return C_RegisterClass; 13246 case 'Z': 13247 // FIXME: While Z does indicate a memory constraint, it specifically 13248 // indicates an r+r address (used in conjunction with the 'y' modifier 13249 // in the replacement string). Currently, we're forcing the base 13250 // register to be r0 in the asm printer (which is interpreted as zero) 13251 // and forming the complete address in the second register. This is 13252 // suboptimal. 13253 return C_Memory; 13254 } 13255 } else if (Constraint == "wc") { // individual CR bits. 13256 return C_RegisterClass; 13257 } else if (Constraint == "wa" || Constraint == "wd" || 13258 Constraint == "wf" || Constraint == "ws") { 13259 return C_RegisterClass; // VSX registers. 13260 } 13261 return TargetLowering::getConstraintType(Constraint); 13262 } 13263 13264 /// Examine constraint type and operand type and determine a weight value. 13265 /// This object must already have been set up with the operand type 13266 /// and the current alternative constraint selected. 13267 TargetLowering::ConstraintWeight 13268 PPCTargetLowering::getSingleConstraintMatchWeight( 13269 AsmOperandInfo &info, const char *constraint) const { 13270 ConstraintWeight weight = CW_Invalid; 13271 Value *CallOperandVal = info.CallOperandVal; 13272 // If we don't have a value, we can't do a match, 13273 // but allow it at the lowest weight. 13274 if (!CallOperandVal) 13275 return CW_Default; 13276 Type *type = CallOperandVal->getType(); 13277 13278 // Look at the constraint type. 13279 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 13280 return CW_Register; // an individual CR bit. 13281 else if ((StringRef(constraint) == "wa" || 13282 StringRef(constraint) == "wd" || 13283 StringRef(constraint) == "wf") && 13284 type->isVectorTy()) 13285 return CW_Register; 13286 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 13287 return CW_Register; 13288 13289 switch (*constraint) { 13290 default: 13291 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 13292 break; 13293 case 'b': 13294 if (type->isIntegerTy()) 13295 weight = CW_Register; 13296 break; 13297 case 'f': 13298 if (type->isFloatTy()) 13299 weight = CW_Register; 13300 break; 13301 case 'd': 13302 if (type->isDoubleTy()) 13303 weight = CW_Register; 13304 break; 13305 case 'v': 13306 if (type->isVectorTy()) 13307 weight = CW_Register; 13308 break; 13309 case 'y': 13310 weight = CW_Register; 13311 break; 13312 case 'Z': 13313 weight = CW_Memory; 13314 break; 13315 } 13316 return weight; 13317 } 13318 13319 std::pair<unsigned, const TargetRegisterClass *> 13320 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 13321 StringRef Constraint, 13322 MVT VT) const { 13323 if (Constraint.size() == 1) { 13324 // GCC RS6000 Constraint Letters 13325 switch (Constraint[0]) { 13326 case 'b': // R1-R31 13327 if (VT == MVT::i64 && Subtarget.isPPC64()) 13328 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 13329 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 13330 case 'r': // R0-R31 13331 if (VT == MVT::i64 && Subtarget.isPPC64()) 13332 return std::make_pair(0U, &PPC::G8RCRegClass); 13333 return std::make_pair(0U, &PPC::GPRCRegClass); 13334 // 'd' and 'f' constraints are both defined to be "the floating point 13335 // registers", where one is for 32-bit and the other for 64-bit. We don't 13336 // really care overly much here so just give them all the same reg classes. 13337 case 'd': 13338 case 'f': 13339 if (Subtarget.hasSPE()) { 13340 if (VT == MVT::f32 || VT == MVT::i32) 13341 return std::make_pair(0U, &PPC::SPE4RCRegClass); 13342 if (VT == MVT::f64 || VT == MVT::i64) 13343 return std::make_pair(0U, &PPC::SPERCRegClass); 13344 } else { 13345 if (VT == MVT::f32 || VT == MVT::i32) 13346 return std::make_pair(0U, &PPC::F4RCRegClass); 13347 if (VT == MVT::f64 || VT == MVT::i64) 13348 return std::make_pair(0U, &PPC::F8RCRegClass); 13349 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13350 return std::make_pair(0U, &PPC::QFRCRegClass); 13351 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13352 return std::make_pair(0U, &PPC::QSRCRegClass); 13353 } 13354 break; 13355 case 'v': 13356 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13357 return std::make_pair(0U, &PPC::QFRCRegClass); 13358 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13359 return std::make_pair(0U, &PPC::QSRCRegClass); 13360 if (Subtarget.hasAltivec()) 13361 return std::make_pair(0U, &PPC::VRRCRegClass); 13362 break; 13363 case 'y': // crrc 13364 return std::make_pair(0U, &PPC::CRRCRegClass); 13365 } 13366 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 13367 // An individual CR bit. 13368 return std::make_pair(0U, &PPC::CRBITRCRegClass); 13369 } else if ((Constraint == "wa" || Constraint == "wd" || 13370 Constraint == "wf") && Subtarget.hasVSX()) { 13371 return std::make_pair(0U, &PPC::VSRCRegClass); 13372 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 13373 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 13374 return std::make_pair(0U, &PPC::VSSRCRegClass); 13375 else 13376 return std::make_pair(0U, &PPC::VSFRCRegClass); 13377 } 13378 13379 std::pair<unsigned, const TargetRegisterClass *> R = 13380 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 13381 13382 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 13383 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 13384 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 13385 // register. 13386 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 13387 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 13388 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 13389 PPC::GPRCRegClass.contains(R.first)) 13390 return std::make_pair(TRI->getMatchingSuperReg(R.first, 13391 PPC::sub_32, &PPC::G8RCRegClass), 13392 &PPC::G8RCRegClass); 13393 13394 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 13395 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 13396 R.first = PPC::CR0; 13397 R.second = &PPC::CRRCRegClass; 13398 } 13399 13400 return R; 13401 } 13402 13403 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 13404 /// vector. If it is invalid, don't add anything to Ops. 13405 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 13406 std::string &Constraint, 13407 std::vector<SDValue>&Ops, 13408 SelectionDAG &DAG) const { 13409 SDValue Result; 13410 13411 // Only support length 1 constraints. 13412 if (Constraint.length() > 1) return; 13413 13414 char Letter = Constraint[0]; 13415 switch (Letter) { 13416 default: break; 13417 case 'I': 13418 case 'J': 13419 case 'K': 13420 case 'L': 13421 case 'M': 13422 case 'N': 13423 case 'O': 13424 case 'P': { 13425 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 13426 if (!CST) return; // Must be an immediate to match. 13427 SDLoc dl(Op); 13428 int64_t Value = CST->getSExtValue(); 13429 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 13430 // numbers are printed as such. 13431 switch (Letter) { 13432 default: llvm_unreachable("Unknown constraint letter!"); 13433 case 'I': // "I" is a signed 16-bit constant. 13434 if (isInt<16>(Value)) 13435 Result = DAG.getTargetConstant(Value, dl, TCVT); 13436 break; 13437 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 13438 if (isShiftedUInt<16, 16>(Value)) 13439 Result = DAG.getTargetConstant(Value, dl, TCVT); 13440 break; 13441 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 13442 if (isShiftedInt<16, 16>(Value)) 13443 Result = DAG.getTargetConstant(Value, dl, TCVT); 13444 break; 13445 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 13446 if (isUInt<16>(Value)) 13447 Result = DAG.getTargetConstant(Value, dl, TCVT); 13448 break; 13449 case 'M': // "M" is a constant that is greater than 31. 13450 if (Value > 31) 13451 Result = DAG.getTargetConstant(Value, dl, TCVT); 13452 break; 13453 case 'N': // "N" is a positive constant that is an exact power of two. 13454 if (Value > 0 && isPowerOf2_64(Value)) 13455 Result = DAG.getTargetConstant(Value, dl, TCVT); 13456 break; 13457 case 'O': // "O" is the constant zero. 13458 if (Value == 0) 13459 Result = DAG.getTargetConstant(Value, dl, TCVT); 13460 break; 13461 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 13462 if (isInt<16>(-Value)) 13463 Result = DAG.getTargetConstant(Value, dl, TCVT); 13464 break; 13465 } 13466 break; 13467 } 13468 } 13469 13470 if (Result.getNode()) { 13471 Ops.push_back(Result); 13472 return; 13473 } 13474 13475 // Handle standard constraint letters. 13476 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 13477 } 13478 13479 // isLegalAddressingMode - Return true if the addressing mode represented 13480 // by AM is legal for this target, for a load/store of the specified type. 13481 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 13482 const AddrMode &AM, Type *Ty, 13483 unsigned AS, Instruction *I) const { 13484 // PPC does not allow r+i addressing modes for vectors! 13485 if (Ty->isVectorTy() && AM.BaseOffs != 0) 13486 return false; 13487 13488 // PPC allows a sign-extended 16-bit immediate field. 13489 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 13490 return false; 13491 13492 // No global is ever allowed as a base. 13493 if (AM.BaseGV) 13494 return false; 13495 13496 // PPC only support r+r, 13497 switch (AM.Scale) { 13498 case 0: // "r+i" or just "i", depending on HasBaseReg. 13499 break; 13500 case 1: 13501 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 13502 return false; 13503 // Otherwise we have r+r or r+i. 13504 break; 13505 case 2: 13506 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 13507 return false; 13508 // Allow 2*r as r+r. 13509 break; 13510 default: 13511 // No other scales are supported. 13512 return false; 13513 } 13514 13515 return true; 13516 } 13517 13518 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 13519 SelectionDAG &DAG) const { 13520 MachineFunction &MF = DAG.getMachineFunction(); 13521 MachineFrameInfo &MFI = MF.getFrameInfo(); 13522 MFI.setReturnAddressIsTaken(true); 13523 13524 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 13525 return SDValue(); 13526 13527 SDLoc dl(Op); 13528 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13529 13530 // Make sure the function does not optimize away the store of the RA to 13531 // the stack. 13532 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 13533 FuncInfo->setLRStoreRequired(); 13534 bool isPPC64 = Subtarget.isPPC64(); 13535 auto PtrVT = getPointerTy(MF.getDataLayout()); 13536 13537 if (Depth > 0) { 13538 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 13539 SDValue Offset = 13540 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 13541 isPPC64 ? MVT::i64 : MVT::i32); 13542 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 13543 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 13544 MachinePointerInfo()); 13545 } 13546 13547 // Just load the return address off the stack. 13548 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 13549 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 13550 MachinePointerInfo()); 13551 } 13552 13553 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 13554 SelectionDAG &DAG) const { 13555 SDLoc dl(Op); 13556 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13557 13558 MachineFunction &MF = DAG.getMachineFunction(); 13559 MachineFrameInfo &MFI = MF.getFrameInfo(); 13560 MFI.setFrameAddressIsTaken(true); 13561 13562 EVT PtrVT = getPointerTy(MF.getDataLayout()); 13563 bool isPPC64 = PtrVT == MVT::i64; 13564 13565 // Naked functions never have a frame pointer, and so we use r1. For all 13566 // other functions, this decision must be delayed until during PEI. 13567 unsigned FrameReg; 13568 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 13569 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 13570 else 13571 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 13572 13573 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 13574 PtrVT); 13575 while (Depth--) 13576 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 13577 FrameAddr, MachinePointerInfo()); 13578 return FrameAddr; 13579 } 13580 13581 // FIXME? Maybe this could be a TableGen attribute on some registers and 13582 // this table could be generated automatically from RegInfo. 13583 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 13584 SelectionDAG &DAG) const { 13585 bool isPPC64 = Subtarget.isPPC64(); 13586 bool isDarwinABI = Subtarget.isDarwinABI(); 13587 13588 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 13589 (!isPPC64 && VT != MVT::i32)) 13590 report_fatal_error("Invalid register global variable type"); 13591 13592 bool is64Bit = isPPC64 && VT == MVT::i64; 13593 unsigned Reg = StringSwitch<unsigned>(RegName) 13594 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 13595 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 13596 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 13597 (is64Bit ? PPC::X13 : PPC::R13)) 13598 .Default(0); 13599 13600 if (Reg) 13601 return Reg; 13602 report_fatal_error("Invalid register name global variable"); 13603 } 13604 13605 bool 13606 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 13607 // The PowerPC target isn't yet aware of offsets. 13608 return false; 13609 } 13610 13611 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 13612 const CallInst &I, 13613 MachineFunction &MF, 13614 unsigned Intrinsic) const { 13615 switch (Intrinsic) { 13616 case Intrinsic::ppc_qpx_qvlfd: 13617 case Intrinsic::ppc_qpx_qvlfs: 13618 case Intrinsic::ppc_qpx_qvlfcd: 13619 case Intrinsic::ppc_qpx_qvlfcs: 13620 case Intrinsic::ppc_qpx_qvlfiwa: 13621 case Intrinsic::ppc_qpx_qvlfiwz: 13622 case Intrinsic::ppc_altivec_lvx: 13623 case Intrinsic::ppc_altivec_lvxl: 13624 case Intrinsic::ppc_altivec_lvebx: 13625 case Intrinsic::ppc_altivec_lvehx: 13626 case Intrinsic::ppc_altivec_lvewx: 13627 case Intrinsic::ppc_vsx_lxvd2x: 13628 case Intrinsic::ppc_vsx_lxvw4x: { 13629 EVT VT; 13630 switch (Intrinsic) { 13631 case Intrinsic::ppc_altivec_lvebx: 13632 VT = MVT::i8; 13633 break; 13634 case Intrinsic::ppc_altivec_lvehx: 13635 VT = MVT::i16; 13636 break; 13637 case Intrinsic::ppc_altivec_lvewx: 13638 VT = MVT::i32; 13639 break; 13640 case Intrinsic::ppc_vsx_lxvd2x: 13641 VT = MVT::v2f64; 13642 break; 13643 case Intrinsic::ppc_qpx_qvlfd: 13644 VT = MVT::v4f64; 13645 break; 13646 case Intrinsic::ppc_qpx_qvlfs: 13647 VT = MVT::v4f32; 13648 break; 13649 case Intrinsic::ppc_qpx_qvlfcd: 13650 VT = MVT::v2f64; 13651 break; 13652 case Intrinsic::ppc_qpx_qvlfcs: 13653 VT = MVT::v2f32; 13654 break; 13655 default: 13656 VT = MVT::v4i32; 13657 break; 13658 } 13659 13660 Info.opc = ISD::INTRINSIC_W_CHAIN; 13661 Info.memVT = VT; 13662 Info.ptrVal = I.getArgOperand(0); 13663 Info.offset = -VT.getStoreSize()+1; 13664 Info.size = 2*VT.getStoreSize()-1; 13665 Info.align = 1; 13666 Info.flags = MachineMemOperand::MOLoad; 13667 return true; 13668 } 13669 case Intrinsic::ppc_qpx_qvlfda: 13670 case Intrinsic::ppc_qpx_qvlfsa: 13671 case Intrinsic::ppc_qpx_qvlfcda: 13672 case Intrinsic::ppc_qpx_qvlfcsa: 13673 case Intrinsic::ppc_qpx_qvlfiwaa: 13674 case Intrinsic::ppc_qpx_qvlfiwza: { 13675 EVT VT; 13676 switch (Intrinsic) { 13677 case Intrinsic::ppc_qpx_qvlfda: 13678 VT = MVT::v4f64; 13679 break; 13680 case Intrinsic::ppc_qpx_qvlfsa: 13681 VT = MVT::v4f32; 13682 break; 13683 case Intrinsic::ppc_qpx_qvlfcda: 13684 VT = MVT::v2f64; 13685 break; 13686 case Intrinsic::ppc_qpx_qvlfcsa: 13687 VT = MVT::v2f32; 13688 break; 13689 default: 13690 VT = MVT::v4i32; 13691 break; 13692 } 13693 13694 Info.opc = ISD::INTRINSIC_W_CHAIN; 13695 Info.memVT = VT; 13696 Info.ptrVal = I.getArgOperand(0); 13697 Info.offset = 0; 13698 Info.size = VT.getStoreSize(); 13699 Info.align = 1; 13700 Info.flags = MachineMemOperand::MOLoad; 13701 return true; 13702 } 13703 case Intrinsic::ppc_qpx_qvstfd: 13704 case Intrinsic::ppc_qpx_qvstfs: 13705 case Intrinsic::ppc_qpx_qvstfcd: 13706 case Intrinsic::ppc_qpx_qvstfcs: 13707 case Intrinsic::ppc_qpx_qvstfiw: 13708 case Intrinsic::ppc_altivec_stvx: 13709 case Intrinsic::ppc_altivec_stvxl: 13710 case Intrinsic::ppc_altivec_stvebx: 13711 case Intrinsic::ppc_altivec_stvehx: 13712 case Intrinsic::ppc_altivec_stvewx: 13713 case Intrinsic::ppc_vsx_stxvd2x: 13714 case Intrinsic::ppc_vsx_stxvw4x: { 13715 EVT VT; 13716 switch (Intrinsic) { 13717 case Intrinsic::ppc_altivec_stvebx: 13718 VT = MVT::i8; 13719 break; 13720 case Intrinsic::ppc_altivec_stvehx: 13721 VT = MVT::i16; 13722 break; 13723 case Intrinsic::ppc_altivec_stvewx: 13724 VT = MVT::i32; 13725 break; 13726 case Intrinsic::ppc_vsx_stxvd2x: 13727 VT = MVT::v2f64; 13728 break; 13729 case Intrinsic::ppc_qpx_qvstfd: 13730 VT = MVT::v4f64; 13731 break; 13732 case Intrinsic::ppc_qpx_qvstfs: 13733 VT = MVT::v4f32; 13734 break; 13735 case Intrinsic::ppc_qpx_qvstfcd: 13736 VT = MVT::v2f64; 13737 break; 13738 case Intrinsic::ppc_qpx_qvstfcs: 13739 VT = MVT::v2f32; 13740 break; 13741 default: 13742 VT = MVT::v4i32; 13743 break; 13744 } 13745 13746 Info.opc = ISD::INTRINSIC_VOID; 13747 Info.memVT = VT; 13748 Info.ptrVal = I.getArgOperand(1); 13749 Info.offset = -VT.getStoreSize()+1; 13750 Info.size = 2*VT.getStoreSize()-1; 13751 Info.align = 1; 13752 Info.flags = MachineMemOperand::MOStore; 13753 return true; 13754 } 13755 case Intrinsic::ppc_qpx_qvstfda: 13756 case Intrinsic::ppc_qpx_qvstfsa: 13757 case Intrinsic::ppc_qpx_qvstfcda: 13758 case Intrinsic::ppc_qpx_qvstfcsa: 13759 case Intrinsic::ppc_qpx_qvstfiwa: { 13760 EVT VT; 13761 switch (Intrinsic) { 13762 case Intrinsic::ppc_qpx_qvstfda: 13763 VT = MVT::v4f64; 13764 break; 13765 case Intrinsic::ppc_qpx_qvstfsa: 13766 VT = MVT::v4f32; 13767 break; 13768 case Intrinsic::ppc_qpx_qvstfcda: 13769 VT = MVT::v2f64; 13770 break; 13771 case Intrinsic::ppc_qpx_qvstfcsa: 13772 VT = MVT::v2f32; 13773 break; 13774 default: 13775 VT = MVT::v4i32; 13776 break; 13777 } 13778 13779 Info.opc = ISD::INTRINSIC_VOID; 13780 Info.memVT = VT; 13781 Info.ptrVal = I.getArgOperand(1); 13782 Info.offset = 0; 13783 Info.size = VT.getStoreSize(); 13784 Info.align = 1; 13785 Info.flags = MachineMemOperand::MOStore; 13786 return true; 13787 } 13788 default: 13789 break; 13790 } 13791 13792 return false; 13793 } 13794 13795 /// getOptimalMemOpType - Returns the target specific optimal type for load 13796 /// and store operations as a result of memset, memcpy, and memmove 13797 /// lowering. If DstAlign is zero that means it's safe to destination 13798 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 13799 /// means there isn't a need to check it against alignment requirement, 13800 /// probably because the source does not need to be loaded. If 'IsMemset' is 13801 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 13802 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 13803 /// source is constant so it does not need to be loaded. 13804 /// It returns EVT::Other if the type should be determined using generic 13805 /// target-independent logic. 13806 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 13807 unsigned DstAlign, unsigned SrcAlign, 13808 bool IsMemset, bool ZeroMemset, 13809 bool MemcpyStrSrc, 13810 MachineFunction &MF) const { 13811 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 13812 const Function &F = MF.getFunction(); 13813 // When expanding a memset, require at least two QPX instructions to cover 13814 // the cost of loading the value to be stored from the constant pool. 13815 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 13816 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 13817 !F.hasFnAttribute(Attribute::NoImplicitFloat)) { 13818 return MVT::v4f64; 13819 } 13820 13821 // We should use Altivec/VSX loads and stores when available. For unaligned 13822 // addresses, unaligned VSX loads are only fast starting with the P8. 13823 if (Subtarget.hasAltivec() && Size >= 16 && 13824 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 13825 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 13826 return MVT::v4i32; 13827 } 13828 13829 if (Subtarget.isPPC64()) { 13830 return MVT::i64; 13831 } 13832 13833 return MVT::i32; 13834 } 13835 13836 /// Returns true if it is beneficial to convert a load of a constant 13837 /// to just the constant itself. 13838 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 13839 Type *Ty) const { 13840 assert(Ty->isIntegerTy()); 13841 13842 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 13843 return !(BitSize == 0 || BitSize > 64); 13844 } 13845 13846 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 13847 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 13848 return false; 13849 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 13850 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 13851 return NumBits1 == 64 && NumBits2 == 32; 13852 } 13853 13854 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 13855 if (!VT1.isInteger() || !VT2.isInteger()) 13856 return false; 13857 unsigned NumBits1 = VT1.getSizeInBits(); 13858 unsigned NumBits2 = VT2.getSizeInBits(); 13859 return NumBits1 == 64 && NumBits2 == 32; 13860 } 13861 13862 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 13863 // Generally speaking, zexts are not free, but they are free when they can be 13864 // folded with other operations. 13865 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 13866 EVT MemVT = LD->getMemoryVT(); 13867 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 13868 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 13869 (LD->getExtensionType() == ISD::NON_EXTLOAD || 13870 LD->getExtensionType() == ISD::ZEXTLOAD)) 13871 return true; 13872 } 13873 13874 // FIXME: Add other cases... 13875 // - 32-bit shifts with a zext to i64 13876 // - zext after ctlz, bswap, etc. 13877 // - zext after and by a constant mask 13878 13879 return TargetLowering::isZExtFree(Val, VT2); 13880 } 13881 13882 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 13883 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 13884 "invalid fpext types"); 13885 // Extending to float128 is not free. 13886 if (DestVT == MVT::f128) 13887 return false; 13888 return true; 13889 } 13890 13891 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 13892 return isInt<16>(Imm) || isUInt<16>(Imm); 13893 } 13894 13895 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 13896 return isInt<16>(Imm) || isUInt<16>(Imm); 13897 } 13898 13899 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 13900 unsigned, 13901 unsigned, 13902 bool *Fast) const { 13903 if (DisablePPCUnaligned) 13904 return false; 13905 13906 // PowerPC supports unaligned memory access for simple non-vector types. 13907 // Although accessing unaligned addresses is not as efficient as accessing 13908 // aligned addresses, it is generally more efficient than manual expansion, 13909 // and generally only traps for software emulation when crossing page 13910 // boundaries. 13911 13912 if (!VT.isSimple()) 13913 return false; 13914 13915 if (VT.getSimpleVT().isVector()) { 13916 if (Subtarget.hasVSX()) { 13917 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 13918 VT != MVT::v4f32 && VT != MVT::v4i32) 13919 return false; 13920 } else { 13921 return false; 13922 } 13923 } 13924 13925 if (VT == MVT::ppcf128) 13926 return false; 13927 13928 if (Fast) 13929 *Fast = true; 13930 13931 return true; 13932 } 13933 13934 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 13935 VT = VT.getScalarType(); 13936 13937 if (!VT.isSimple()) 13938 return false; 13939 13940 switch (VT.getSimpleVT().SimpleTy) { 13941 case MVT::f32: 13942 case MVT::f64: 13943 return true; 13944 case MVT::f128: 13945 return (EnableQuadPrecision && Subtarget.hasP9Vector()); 13946 default: 13947 break; 13948 } 13949 13950 return false; 13951 } 13952 13953 const MCPhysReg * 13954 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 13955 // LR is a callee-save register, but we must treat it as clobbered by any call 13956 // site. Hence we include LR in the scratch registers, which are in turn added 13957 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 13958 // to CTR, which is used by any indirect call. 13959 static const MCPhysReg ScratchRegs[] = { 13960 PPC::X12, PPC::LR8, PPC::CTR8, 0 13961 }; 13962 13963 return ScratchRegs; 13964 } 13965 13966 unsigned PPCTargetLowering::getExceptionPointerRegister( 13967 const Constant *PersonalityFn) const { 13968 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 13969 } 13970 13971 unsigned PPCTargetLowering::getExceptionSelectorRegister( 13972 const Constant *PersonalityFn) const { 13973 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 13974 } 13975 13976 bool 13977 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 13978 EVT VT , unsigned DefinedValues) const { 13979 if (VT == MVT::v2i64) 13980 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 13981 13982 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 13983 return true; 13984 13985 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 13986 } 13987 13988 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 13989 if (DisableILPPref || Subtarget.enableMachineScheduler()) 13990 return TargetLowering::getSchedulingPreference(N); 13991 13992 return Sched::ILP; 13993 } 13994 13995 // Create a fast isel object. 13996 FastISel * 13997 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 13998 const TargetLibraryInfo *LibInfo) const { 13999 return PPC::createFastISel(FuncInfo, LibInfo); 14000 } 14001 14002 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 14003 if (Subtarget.isDarwinABI()) return; 14004 if (!Subtarget.isPPC64()) return; 14005 14006 // Update IsSplitCSR in PPCFunctionInfo 14007 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 14008 PFI->setIsSplitCSR(true); 14009 } 14010 14011 void PPCTargetLowering::insertCopiesSplitCSR( 14012 MachineBasicBlock *Entry, 14013 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 14014 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 14015 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 14016 if (!IStart) 14017 return; 14018 14019 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 14020 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 14021 MachineBasicBlock::iterator MBBI = Entry->begin(); 14022 for (const MCPhysReg *I = IStart; *I; ++I) { 14023 const TargetRegisterClass *RC = nullptr; 14024 if (PPC::G8RCRegClass.contains(*I)) 14025 RC = &PPC::G8RCRegClass; 14026 else if (PPC::F8RCRegClass.contains(*I)) 14027 RC = &PPC::F8RCRegClass; 14028 else if (PPC::CRRCRegClass.contains(*I)) 14029 RC = &PPC::CRRCRegClass; 14030 else if (PPC::VRRCRegClass.contains(*I)) 14031 RC = &PPC::VRRCRegClass; 14032 else 14033 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 14034 14035 unsigned NewVR = MRI->createVirtualRegister(RC); 14036 // Create copy from CSR to a virtual register. 14037 // FIXME: this currently does not emit CFI pseudo-instructions, it works 14038 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 14039 // nounwind. If we want to generalize this later, we may need to emit 14040 // CFI pseudo-instructions. 14041 assert(Entry->getParent()->getFunction().hasFnAttribute( 14042 Attribute::NoUnwind) && 14043 "Function should be nounwind in insertCopiesSplitCSR!"); 14044 Entry->addLiveIn(*I); 14045 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 14046 .addReg(*I); 14047 14048 // Insert the copy-back instructions right before the terminator 14049 for (auto *Exit : Exits) 14050 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 14051 TII->get(TargetOpcode::COPY), *I) 14052 .addReg(NewVR); 14053 } 14054 } 14055 14056 // Override to enable LOAD_STACK_GUARD lowering on Linux. 14057 bool PPCTargetLowering::useLoadStackGuardNode() const { 14058 if (!Subtarget.isTargetLinux()) 14059 return TargetLowering::useLoadStackGuardNode(); 14060 return true; 14061 } 14062 14063 // Override to disable global variable loading on Linux. 14064 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 14065 if (!Subtarget.isTargetLinux()) 14066 return TargetLowering::insertSSPDeclarations(M); 14067 } 14068 14069 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 14070 if (!VT.isSimple() || !Subtarget.hasVSX()) 14071 return false; 14072 14073 switch(VT.getSimpleVT().SimpleTy) { 14074 default: 14075 // For FP types that are currently not supported by PPC backend, return 14076 // false. Examples: f16, f80. 14077 return false; 14078 case MVT::f32: 14079 case MVT::f64: 14080 case MVT::ppcf128: 14081 return Imm.isPosZero(); 14082 } 14083 } 14084 14085 // For vector shift operation op, fold 14086 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 14087 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 14088 SelectionDAG &DAG) { 14089 SDValue N0 = N->getOperand(0); 14090 SDValue N1 = N->getOperand(1); 14091 EVT VT = N0.getValueType(); 14092 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 14093 unsigned Opcode = N->getOpcode(); 14094 unsigned TargetOpcode; 14095 14096 switch (Opcode) { 14097 default: 14098 llvm_unreachable("Unexpected shift operation"); 14099 case ISD::SHL: 14100 TargetOpcode = PPCISD::SHL; 14101 break; 14102 case ISD::SRL: 14103 TargetOpcode = PPCISD::SRL; 14104 break; 14105 case ISD::SRA: 14106 TargetOpcode = PPCISD::SRA; 14107 break; 14108 } 14109 14110 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 14111 N1->getOpcode() == ISD::AND) 14112 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 14113 if (Mask->getZExtValue() == OpSizeInBits - 1) 14114 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 14115 14116 return SDValue(); 14117 } 14118 14119 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 14120 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 14121 return Value; 14122 14123 return SDValue(); 14124 } 14125 14126 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 14127 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 14128 return Value; 14129 14130 return SDValue(); 14131 } 14132 14133 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 14134 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 14135 return Value; 14136 14137 return SDValue(); 14138 } 14139 14140 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 14141 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 14142 if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64()) 14143 return false; 14144 14145 // If not a tail call then no need to proceed. 14146 if (!CI->isTailCall()) 14147 return false; 14148 14149 // If tail calls are disabled for the caller then we are done. 14150 const Function *Caller = CI->getParent()->getParent(); 14151 auto Attr = Caller->getFnAttribute("disable-tail-calls"); 14152 if (Attr.getValueAsString() == "true") 14153 return false; 14154 14155 // If sibling calls have been disabled and tail-calls aren't guaranteed 14156 // there is no reason to duplicate. 14157 auto &TM = getTargetMachine(); 14158 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 14159 return false; 14160 14161 // Can't tail call a function called indirectly, or if it has variadic args. 14162 const Function *Callee = CI->getCalledFunction(); 14163 if (!Callee || Callee->isVarArg()) 14164 return false; 14165 14166 // Make sure the callee and caller calling conventions are eligible for tco. 14167 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 14168 CI->getCallingConv())) 14169 return false; 14170 14171 // If the function is local then we have a good chance at tail-calling it 14172 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 14173 } 14174 14175 bool PPCTargetLowering:: 14176 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 14177 const Value *Mask = AndI.getOperand(1); 14178 // If the mask is suitable for andi. or andis. we should sink the and. 14179 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 14180 // Can't handle constants wider than 64-bits. 14181 if (CI->getBitWidth() > 64) 14182 return false; 14183 int64_t ConstVal = CI->getZExtValue(); 14184 return isUInt<16>(ConstVal) || 14185 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 14186 } 14187 14188 // For non-constant masks, we can always use the record-form and. 14189 return true; 14190 } 14191