1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCCCState.h" 18 #include "PPCCallingConv.h" 19 #include "PPCFrameLowering.h" 20 #include "PPCInstrInfo.h" 21 #include "PPCMachineFunctionInfo.h" 22 #include "PPCPerfectShuffle.h" 23 #include "PPCRegisterInfo.h" 24 #include "PPCSubtarget.h" 25 #include "PPCTargetMachine.h" 26 #include "llvm/ADT/APFloat.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallPtrSet.h" 33 #include "llvm/ADT/SmallSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/ISDOpcodes.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFrameInfo.h" 42 #include "llvm/CodeGen/MachineFunction.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineJumpTableInfo.h" 46 #include "llvm/CodeGen/MachineLoopInfo.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetRegisterInfo.h" 56 #include "llvm/CodeGen/ValueTypes.h" 57 #include "llvm/IR/CallSite.h" 58 #include "llvm/IR/CallingConv.h" 59 #include "llvm/IR/Constant.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/DataLayout.h" 62 #include "llvm/IR/DebugLoc.h" 63 #include "llvm/IR/DerivedTypes.h" 64 #include "llvm/IR/Function.h" 65 #include "llvm/IR/GlobalValue.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/Intrinsics.h" 69 #include "llvm/IR/Module.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/MC/MCExpr.h" 74 #include "llvm/MC/MCRegisterInfo.h" 75 #include "llvm/Support/AtomicOrdering.h" 76 #include "llvm/Support/BranchProbability.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CodeGen.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/Debug.h" 82 #include "llvm/Support/ErrorHandling.h" 83 #include "llvm/Support/Format.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MachineValueType.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include "llvm/Target/TargetMachine.h" 89 #include "llvm/Target/TargetOptions.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 100 #define DEBUG_TYPE "ppc-lowering" 101 102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 104 105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 107 108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 110 111 static cl::opt<bool> DisableSCO("disable-ppc-sco", 112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 113 114 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision", 115 cl::desc("enable quad precision float support on ppc"), cl::Hidden); 116 117 STATISTIC(NumTailCalls, "Number of tail calls"); 118 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 119 120 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 121 122 // FIXME: Remove this once the bug has been fixed! 123 extern cl::opt<bool> ANDIGlueBug; 124 125 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 126 const PPCSubtarget &STI) 127 : TargetLowering(TM), Subtarget(STI) { 128 // Use _setjmp/_longjmp instead of setjmp/longjmp. 129 setUseUnderscoreSetJmp(true); 130 setUseUnderscoreLongJmp(true); 131 132 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 133 // arguments are at least 4/8 bytes aligned. 134 bool isPPC64 = Subtarget.isPPC64(); 135 setMinStackArgumentAlignment(isPPC64 ? 8:4); 136 137 // Set up the register classes. 138 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 139 if (!useSoftFloat()) { 140 if (hasSPE()) { 141 addRegisterClass(MVT::f32, &PPC::SPE4RCRegClass); 142 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 143 } else { 144 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 145 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 146 } 147 } 148 149 // Match BITREVERSE to customized fast code sequence in the td file. 150 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 151 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 152 153 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 154 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 155 156 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 157 for (MVT VT : MVT::integer_valuetypes()) { 158 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 159 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 160 } 161 162 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 163 164 // PowerPC has pre-inc load and store's. 165 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 166 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 167 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 168 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 169 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 170 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 171 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 172 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 173 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 174 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 175 if (!Subtarget.hasSPE()) { 176 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 177 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 178 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 179 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 180 } 181 182 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 183 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 184 for (MVT VT : ScalarIntVTs) { 185 setOperationAction(ISD::ADDC, VT, Legal); 186 setOperationAction(ISD::ADDE, VT, Legal); 187 setOperationAction(ISD::SUBC, VT, Legal); 188 setOperationAction(ISD::SUBE, VT, Legal); 189 } 190 191 if (Subtarget.useCRBits()) { 192 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 193 194 if (isPPC64 || Subtarget.hasFPCVT()) { 195 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 196 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 197 isPPC64 ? MVT::i64 : MVT::i32); 198 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 199 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 200 isPPC64 ? MVT::i64 : MVT::i32); 201 } else { 202 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 203 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 204 } 205 206 // PowerPC does not support direct load/store of condition registers. 207 setOperationAction(ISD::LOAD, MVT::i1, Custom); 208 setOperationAction(ISD::STORE, MVT::i1, Custom); 209 210 // FIXME: Remove this once the ANDI glue bug is fixed: 211 if (ANDIGlueBug) 212 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 213 214 for (MVT VT : MVT::integer_valuetypes()) { 215 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 216 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 217 setTruncStoreAction(VT, MVT::i1, Expand); 218 } 219 220 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 221 } 222 223 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 224 // PPC (the libcall is not available). 225 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 226 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 227 228 // We do not currently implement these libm ops for PowerPC. 229 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 230 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 231 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 232 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 233 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 234 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 235 236 // PowerPC has no SREM/UREM instructions unless we are on P9 237 // On P9 we may use a hardware instruction to compute the remainder. 238 // The instructions are not legalized directly because in the cases where the 239 // result of both the remainder and the division is required it is more 240 // efficient to compute the remainder from the result of the division rather 241 // than use the remainder instruction. 242 if (Subtarget.isISA3_0()) { 243 setOperationAction(ISD::SREM, MVT::i32, Custom); 244 setOperationAction(ISD::UREM, MVT::i32, Custom); 245 setOperationAction(ISD::SREM, MVT::i64, Custom); 246 setOperationAction(ISD::UREM, MVT::i64, Custom); 247 } else { 248 setOperationAction(ISD::SREM, MVT::i32, Expand); 249 setOperationAction(ISD::UREM, MVT::i32, Expand); 250 setOperationAction(ISD::SREM, MVT::i64, Expand); 251 setOperationAction(ISD::UREM, MVT::i64, Expand); 252 } 253 254 if (Subtarget.hasP9Vector()) { 255 setOperationAction(ISD::ABS, MVT::v4i32, Legal); 256 setOperationAction(ISD::ABS, MVT::v8i16, Legal); 257 setOperationAction(ISD::ABS, MVT::v16i8, Legal); 258 } 259 260 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 261 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 262 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 263 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 264 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 265 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 266 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 267 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 268 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 269 270 // We don't support sin/cos/sqrt/fmod/pow 271 setOperationAction(ISD::FSIN , MVT::f64, Expand); 272 setOperationAction(ISD::FCOS , MVT::f64, Expand); 273 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 274 setOperationAction(ISD::FREM , MVT::f64, Expand); 275 setOperationAction(ISD::FPOW , MVT::f64, Expand); 276 setOperationAction(ISD::FSIN , MVT::f32, Expand); 277 setOperationAction(ISD::FCOS , MVT::f32, Expand); 278 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 279 setOperationAction(ISD::FREM , MVT::f32, Expand); 280 setOperationAction(ISD::FPOW , MVT::f32, Expand); 281 if (Subtarget.hasSPE()) { 282 setOperationAction(ISD::FMA , MVT::f64, Expand); 283 setOperationAction(ISD::FMA , MVT::f32, Expand); 284 } else { 285 setOperationAction(ISD::FMA , MVT::f64, Legal); 286 setOperationAction(ISD::FMA , MVT::f32, Legal); 287 } 288 289 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 290 291 // If we're enabling GP optimizations, use hardware square root 292 if (!Subtarget.hasFSQRT() && 293 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 294 Subtarget.hasFRE())) 295 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 296 297 if (!Subtarget.hasFSQRT() && 298 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 299 Subtarget.hasFRES())) 300 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 301 302 if (Subtarget.hasFCPSGN()) { 303 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 304 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 305 } else { 306 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 307 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 308 } 309 310 if (Subtarget.hasFPRND()) { 311 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 312 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 313 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 314 setOperationAction(ISD::FROUND, MVT::f64, Legal); 315 316 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 317 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 318 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 319 setOperationAction(ISD::FROUND, MVT::f32, Legal); 320 } 321 322 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 323 // to speed up scalar BSWAP64. 324 // CTPOP or CTTZ were introduced in P8/P9 respectively 325 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 326 if (Subtarget.isISA3_0()) { 327 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 328 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 329 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 330 } else { 331 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 332 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 333 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 334 } 335 336 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 337 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 338 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 339 } else { 340 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 341 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 342 } 343 344 // PowerPC does not have ROTR 345 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 346 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 347 348 if (!Subtarget.useCRBits()) { 349 // PowerPC does not have Select 350 setOperationAction(ISD::SELECT, MVT::i32, Expand); 351 setOperationAction(ISD::SELECT, MVT::i64, Expand); 352 setOperationAction(ISD::SELECT, MVT::f32, Expand); 353 setOperationAction(ISD::SELECT, MVT::f64, Expand); 354 } 355 356 // PowerPC wants to turn select_cc of FP into fsel when possible. 357 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 358 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 359 360 // PowerPC wants to optimize integer setcc a bit 361 if (!Subtarget.useCRBits()) 362 setOperationAction(ISD::SETCC, MVT::i32, Custom); 363 364 // PowerPC does not have BRCOND which requires SetCC 365 if (!Subtarget.useCRBits()) 366 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 367 368 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 369 370 if (Subtarget.hasSPE()) { 371 // SPE has built-in conversions 372 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 373 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 374 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 375 } else { 376 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 377 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 378 379 // PowerPC does not have [U|S]INT_TO_FP 380 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 381 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 382 } 383 384 if (Subtarget.hasDirectMove() && isPPC64) { 385 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 386 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 387 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 388 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 389 } else { 390 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 391 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 392 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 393 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 394 } 395 396 // We cannot sextinreg(i1). Expand to shifts. 397 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 398 399 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 400 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 401 // support continuation, user-level threading, and etc.. As a result, no 402 // other SjLj exception interfaces are implemented and please don't build 403 // your own exception handling based on them. 404 // LLVM/Clang supports zero-cost DWARF exception handling. 405 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 406 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 407 408 // We want to legalize GlobalAddress and ConstantPool nodes into the 409 // appropriate instructions to materialize the address. 410 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 411 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 412 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 413 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 414 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 415 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 416 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 417 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 418 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 419 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 420 421 // TRAP is legal. 422 setOperationAction(ISD::TRAP, MVT::Other, Legal); 423 424 // TRAMPOLINE is custom lowered. 425 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 426 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 427 428 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 429 setOperationAction(ISD::VASTART , MVT::Other, Custom); 430 431 if (Subtarget.isSVR4ABI()) { 432 if (isPPC64) { 433 // VAARG always uses double-word chunks, so promote anything smaller. 434 setOperationAction(ISD::VAARG, MVT::i1, Promote); 435 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 436 setOperationAction(ISD::VAARG, MVT::i8, Promote); 437 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 438 setOperationAction(ISD::VAARG, MVT::i16, Promote); 439 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 440 setOperationAction(ISD::VAARG, MVT::i32, Promote); 441 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 442 setOperationAction(ISD::VAARG, MVT::Other, Expand); 443 } else { 444 // VAARG is custom lowered with the 32-bit SVR4 ABI. 445 setOperationAction(ISD::VAARG, MVT::Other, Custom); 446 setOperationAction(ISD::VAARG, MVT::i64, Custom); 447 } 448 } else 449 setOperationAction(ISD::VAARG, MVT::Other, Expand); 450 451 if (Subtarget.isSVR4ABI() && !isPPC64) 452 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 453 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 454 else 455 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 456 457 // Use the default implementation. 458 setOperationAction(ISD::VAEND , MVT::Other, Expand); 459 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 460 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 461 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 462 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 463 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 464 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 465 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 466 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 467 468 // We want to custom lower some of our intrinsics. 469 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 470 471 // To handle counter-based loop conditions. 472 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 473 474 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 475 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 476 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 477 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 478 479 // Comparisons that require checking two conditions. 480 if (Subtarget.hasSPE()) { 481 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 482 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 483 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 484 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 485 } 486 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 487 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 488 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 489 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 490 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 491 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 492 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 493 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 494 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 495 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 496 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 497 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 498 499 if (Subtarget.has64BitSupport()) { 500 // They also have instructions for converting between i64 and fp. 501 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 502 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 503 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 504 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 505 // This is just the low 32 bits of a (signed) fp->i64 conversion. 506 // We cannot do this with Promote because i64 is not a legal type. 507 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 508 509 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 510 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 511 } else { 512 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 513 if (Subtarget.hasSPE()) 514 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 515 else 516 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 517 } 518 519 // With the instructions enabled under FPCVT, we can do everything. 520 if (Subtarget.hasFPCVT()) { 521 if (Subtarget.has64BitSupport()) { 522 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 523 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 524 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 525 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 526 } 527 528 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 529 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 530 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 531 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 532 } 533 534 if (Subtarget.use64BitRegs()) { 535 // 64-bit PowerPC implementations can support i64 types directly 536 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 537 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 538 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 539 // 64-bit PowerPC wants to expand i128 shifts itself. 540 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 541 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 542 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 543 } else { 544 // 32-bit PowerPC wants to expand i64 shifts itself. 545 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 546 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 547 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 548 } 549 550 if (Subtarget.hasAltivec()) { 551 // First set operation action for all vector types to expand. Then we 552 // will selectively turn on ones that can be effectively codegen'd. 553 for (MVT VT : MVT::vector_valuetypes()) { 554 // add/sub are legal for all supported vector VT's. 555 setOperationAction(ISD::ADD, VT, Legal); 556 setOperationAction(ISD::SUB, VT, Legal); 557 558 // Vector instructions introduced in P8 559 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 560 setOperationAction(ISD::CTPOP, VT, Legal); 561 setOperationAction(ISD::CTLZ, VT, Legal); 562 } 563 else { 564 setOperationAction(ISD::CTPOP, VT, Expand); 565 setOperationAction(ISD::CTLZ, VT, Expand); 566 } 567 568 // Vector instructions introduced in P9 569 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 570 setOperationAction(ISD::CTTZ, VT, Legal); 571 else 572 setOperationAction(ISD::CTTZ, VT, Expand); 573 574 // We promote all shuffles to v16i8. 575 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 576 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 577 578 // We promote all non-typed operations to v4i32. 579 setOperationAction(ISD::AND , VT, Promote); 580 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 581 setOperationAction(ISD::OR , VT, Promote); 582 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 583 setOperationAction(ISD::XOR , VT, Promote); 584 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 585 setOperationAction(ISD::LOAD , VT, Promote); 586 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 587 setOperationAction(ISD::SELECT, VT, Promote); 588 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 589 setOperationAction(ISD::SELECT_CC, VT, Promote); 590 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 591 setOperationAction(ISD::STORE, VT, Promote); 592 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 593 594 // No other operations are legal. 595 setOperationAction(ISD::MUL , VT, Expand); 596 setOperationAction(ISD::SDIV, VT, Expand); 597 setOperationAction(ISD::SREM, VT, Expand); 598 setOperationAction(ISD::UDIV, VT, Expand); 599 setOperationAction(ISD::UREM, VT, Expand); 600 setOperationAction(ISD::FDIV, VT, Expand); 601 setOperationAction(ISD::FREM, VT, Expand); 602 setOperationAction(ISD::FNEG, VT, Expand); 603 setOperationAction(ISD::FSQRT, VT, Expand); 604 setOperationAction(ISD::FLOG, VT, Expand); 605 setOperationAction(ISD::FLOG10, VT, Expand); 606 setOperationAction(ISD::FLOG2, VT, Expand); 607 setOperationAction(ISD::FEXP, VT, Expand); 608 setOperationAction(ISD::FEXP2, VT, Expand); 609 setOperationAction(ISD::FSIN, VT, Expand); 610 setOperationAction(ISD::FCOS, VT, Expand); 611 setOperationAction(ISD::FABS, VT, Expand); 612 setOperationAction(ISD::FFLOOR, VT, Expand); 613 setOperationAction(ISD::FCEIL, VT, Expand); 614 setOperationAction(ISD::FTRUNC, VT, Expand); 615 setOperationAction(ISD::FRINT, VT, Expand); 616 setOperationAction(ISD::FNEARBYINT, VT, Expand); 617 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 618 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 619 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 620 setOperationAction(ISD::MULHU, VT, Expand); 621 setOperationAction(ISD::MULHS, VT, Expand); 622 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 623 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 624 setOperationAction(ISD::UDIVREM, VT, Expand); 625 setOperationAction(ISD::SDIVREM, VT, Expand); 626 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 627 setOperationAction(ISD::FPOW, VT, Expand); 628 setOperationAction(ISD::BSWAP, VT, Expand); 629 setOperationAction(ISD::VSELECT, VT, Expand); 630 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 631 setOperationAction(ISD::ROTL, VT, Expand); 632 setOperationAction(ISD::ROTR, VT, Expand); 633 634 for (MVT InnerVT : MVT::vector_valuetypes()) { 635 setTruncStoreAction(VT, InnerVT, Expand); 636 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 637 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 638 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 639 } 640 } 641 642 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 643 // with merges, splats, etc. 644 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 645 646 setOperationAction(ISD::AND , MVT::v4i32, Legal); 647 setOperationAction(ISD::OR , MVT::v4i32, Legal); 648 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 649 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 650 setOperationAction(ISD::SELECT, MVT::v4i32, 651 Subtarget.useCRBits() ? Legal : Expand); 652 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 653 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 654 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 655 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 656 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 657 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 658 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 659 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 660 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 661 662 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 663 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 664 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 665 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 666 667 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 668 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 669 670 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 671 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 672 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 673 } 674 675 if (Subtarget.hasP8Altivec()) 676 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 677 else 678 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 679 680 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 681 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 682 683 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 684 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 685 686 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 687 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 688 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 689 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 690 691 // Altivec does not contain unordered floating-point compare instructions 692 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 693 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 694 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 695 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 696 697 if (Subtarget.hasVSX()) { 698 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 699 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 700 if (Subtarget.hasP8Vector()) { 701 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 703 } 704 if (Subtarget.hasDirectMove() && isPPC64) { 705 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 706 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 707 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 708 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 709 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 710 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 711 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 712 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 713 } 714 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 715 716 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 717 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 718 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 719 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 720 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 721 722 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 723 724 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 725 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 726 727 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 728 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 729 730 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 731 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 732 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 733 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 734 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 735 736 // Share the Altivec comparison restrictions. 737 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 738 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 739 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 740 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 741 742 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 743 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 744 745 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 746 747 if (Subtarget.hasP8Vector()) 748 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 749 750 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 751 752 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 753 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 754 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 755 756 if (Subtarget.hasP8Altivec()) { 757 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 758 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 759 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 760 761 // 128 bit shifts can be accomplished via 3 instructions for SHL and 762 // SRL, but not for SRA because of the instructions available: 763 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 764 // doing 765 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 766 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 767 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 768 769 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 770 } 771 else { 772 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 773 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 774 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 775 776 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 777 778 // VSX v2i64 only supports non-arithmetic operations. 779 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 780 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 781 } 782 783 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 784 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 785 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 786 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 787 788 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 789 790 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 791 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 792 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 793 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 794 795 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 796 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 797 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 798 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 799 800 if (Subtarget.hasDirectMove()) 801 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 802 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 803 804 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 805 } 806 807 if (Subtarget.hasP8Altivec()) { 808 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 809 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 810 } 811 812 if (Subtarget.hasP9Vector()) { 813 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 814 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 815 816 // 128 bit shifts can be accomplished via 3 instructions for SHL and 817 // SRL, but not for SRA because of the instructions available: 818 // VS{RL} and VS{RL}O. 819 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 820 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 821 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 822 823 if (EnableQuadPrecision) { 824 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 825 setOperationAction(ISD::FADD, MVT::f128, Legal); 826 setOperationAction(ISD::FSUB, MVT::f128, Legal); 827 setOperationAction(ISD::FDIV, MVT::f128, Legal); 828 setOperationAction(ISD::FMUL, MVT::f128, Legal); 829 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 830 // No extending loads to f128 on PPC. 831 for (MVT FPT : MVT::fp_valuetypes()) 832 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 833 setOperationAction(ISD::FMA, MVT::f128, Legal); 834 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 835 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 836 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 837 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 838 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 839 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 840 841 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 842 setOperationAction(ISD::FRINT, MVT::f128, Legal); 843 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 844 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 845 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 846 setOperationAction(ISD::FROUND, MVT::f128, Legal); 847 848 setOperationAction(ISD::SELECT, MVT::f128, Expand); 849 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 850 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 851 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 852 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 853 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 854 // No implementation for these ops for PowerPC. 855 setOperationAction(ISD::FSIN , MVT::f128, Expand); 856 setOperationAction(ISD::FCOS , MVT::f128, Expand); 857 setOperationAction(ISD::FPOW, MVT::f128, Expand); 858 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 859 setOperationAction(ISD::FREM, MVT::f128, Expand); 860 } 861 862 } 863 864 if (Subtarget.hasP9Altivec()) { 865 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 866 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 867 } 868 } 869 870 if (Subtarget.hasQPX()) { 871 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 872 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 873 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 874 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 875 876 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 877 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 878 879 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 880 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 881 882 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 883 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 884 885 if (!Subtarget.useCRBits()) 886 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 887 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 888 889 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 890 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 891 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 892 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 893 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 894 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 895 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 896 897 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 898 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 899 900 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 901 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 902 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 903 904 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 905 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 906 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 907 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 908 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 909 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 910 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 911 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 912 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 913 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 914 915 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 916 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 917 918 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 919 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 920 921 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 922 923 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 924 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 925 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 926 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 927 928 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 929 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 930 931 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 932 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 933 934 if (!Subtarget.useCRBits()) 935 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 936 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 937 938 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 939 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 940 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 941 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 942 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 943 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 944 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 945 946 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 947 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 948 949 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 950 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 951 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 952 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 953 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 954 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 955 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 956 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 957 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 958 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 959 960 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 961 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 962 963 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 964 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 965 966 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 967 968 setOperationAction(ISD::AND , MVT::v4i1, Legal); 969 setOperationAction(ISD::OR , MVT::v4i1, Legal); 970 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 971 972 if (!Subtarget.useCRBits()) 973 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 974 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 975 976 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 977 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 978 979 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 980 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 981 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 982 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 983 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 984 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 985 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 986 987 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 988 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 989 990 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 991 992 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 993 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 994 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 995 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 996 997 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 998 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 999 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 1000 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 1001 1002 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 1003 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 1004 1005 // These need to set FE_INEXACT, and so cannot be vectorized here. 1006 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 1007 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 1008 1009 if (TM.Options.UnsafeFPMath) { 1010 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1011 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1012 1013 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 1014 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 1015 } else { 1016 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 1017 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 1018 1019 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 1020 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 1021 } 1022 } 1023 1024 if (Subtarget.has64BitSupport()) 1025 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1026 1027 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1028 1029 if (!isPPC64) { 1030 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1031 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1032 } 1033 1034 setBooleanContents(ZeroOrOneBooleanContent); 1035 1036 if (Subtarget.hasAltivec()) { 1037 // Altivec instructions set fields to all zeros or all ones. 1038 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1039 } 1040 1041 if (!isPPC64) { 1042 // These libcalls are not available in 32-bit. 1043 setLibcallName(RTLIB::SHL_I128, nullptr); 1044 setLibcallName(RTLIB::SRL_I128, nullptr); 1045 setLibcallName(RTLIB::SRA_I128, nullptr); 1046 } 1047 1048 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1049 1050 // We have target-specific dag combine patterns for the following nodes: 1051 setTargetDAGCombine(ISD::ADD); 1052 setTargetDAGCombine(ISD::SHL); 1053 setTargetDAGCombine(ISD::SRA); 1054 setTargetDAGCombine(ISD::SRL); 1055 setTargetDAGCombine(ISD::SINT_TO_FP); 1056 setTargetDAGCombine(ISD::BUILD_VECTOR); 1057 if (Subtarget.hasFPCVT()) 1058 setTargetDAGCombine(ISD::UINT_TO_FP); 1059 setTargetDAGCombine(ISD::LOAD); 1060 setTargetDAGCombine(ISD::STORE); 1061 setTargetDAGCombine(ISD::BR_CC); 1062 if (Subtarget.useCRBits()) 1063 setTargetDAGCombine(ISD::BRCOND); 1064 setTargetDAGCombine(ISD::BSWAP); 1065 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1066 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1067 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1068 1069 setTargetDAGCombine(ISD::SIGN_EXTEND); 1070 setTargetDAGCombine(ISD::ZERO_EXTEND); 1071 setTargetDAGCombine(ISD::ANY_EXTEND); 1072 1073 if (Subtarget.useCRBits()) { 1074 setTargetDAGCombine(ISD::TRUNCATE); 1075 setTargetDAGCombine(ISD::SETCC); 1076 setTargetDAGCombine(ISD::SELECT_CC); 1077 } 1078 1079 // Use reciprocal estimates. 1080 if (TM.Options.UnsafeFPMath) { 1081 setTargetDAGCombine(ISD::FDIV); 1082 setTargetDAGCombine(ISD::FSQRT); 1083 } 1084 1085 // Darwin long double math library functions have $LDBL128 appended. 1086 if (Subtarget.isDarwin()) { 1087 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1088 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1089 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1090 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1091 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1092 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1093 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1094 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1095 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1096 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1097 } 1098 1099 if (EnableQuadPrecision) { 1100 setLibcallName(RTLIB::LOG_F128, "logf128"); 1101 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1102 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1103 setLibcallName(RTLIB::EXP_F128, "expf128"); 1104 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1105 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1106 setLibcallName(RTLIB::COS_F128, "cosf128"); 1107 setLibcallName(RTLIB::POW_F128, "powf128"); 1108 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1109 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1110 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 1111 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1112 } 1113 1114 // With 32 condition bits, we don't need to sink (and duplicate) compares 1115 // aggressively in CodeGenPrep. 1116 if (Subtarget.useCRBits()) { 1117 setHasMultipleConditionRegisters(); 1118 setJumpIsExpensive(); 1119 } 1120 1121 setMinFunctionAlignment(2); 1122 if (Subtarget.isDarwin()) 1123 setPrefFunctionAlignment(4); 1124 1125 switch (Subtarget.getDarwinDirective()) { 1126 default: break; 1127 case PPC::DIR_970: 1128 case PPC::DIR_A2: 1129 case PPC::DIR_E500: 1130 case PPC::DIR_E500mc: 1131 case PPC::DIR_E5500: 1132 case PPC::DIR_PWR4: 1133 case PPC::DIR_PWR5: 1134 case PPC::DIR_PWR5X: 1135 case PPC::DIR_PWR6: 1136 case PPC::DIR_PWR6X: 1137 case PPC::DIR_PWR7: 1138 case PPC::DIR_PWR8: 1139 case PPC::DIR_PWR9: 1140 setPrefFunctionAlignment(4); 1141 setPrefLoopAlignment(4); 1142 break; 1143 } 1144 1145 if (Subtarget.enableMachineScheduler()) 1146 setSchedulingPreference(Sched::Source); 1147 else 1148 setSchedulingPreference(Sched::Hybrid); 1149 1150 computeRegisterProperties(STI.getRegisterInfo()); 1151 1152 // The Freescale cores do better with aggressive inlining of memcpy and 1153 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1154 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1155 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1156 MaxStoresPerMemset = 32; 1157 MaxStoresPerMemsetOptSize = 16; 1158 MaxStoresPerMemcpy = 32; 1159 MaxStoresPerMemcpyOptSize = 8; 1160 MaxStoresPerMemmove = 32; 1161 MaxStoresPerMemmoveOptSize = 8; 1162 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1163 // The A2 also benefits from (very) aggressive inlining of memcpy and 1164 // friends. The overhead of a the function call, even when warm, can be 1165 // over one hundred cycles. 1166 MaxStoresPerMemset = 128; 1167 MaxStoresPerMemcpy = 128; 1168 MaxStoresPerMemmove = 128; 1169 MaxLoadsPerMemcmp = 128; 1170 } else { 1171 MaxLoadsPerMemcmp = 8; 1172 MaxLoadsPerMemcmpOptSize = 4; 1173 } 1174 } 1175 1176 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1177 /// the desired ByVal argument alignment. 1178 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1179 unsigned MaxMaxAlign) { 1180 if (MaxAlign == MaxMaxAlign) 1181 return; 1182 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1183 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1184 MaxAlign = 32; 1185 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1186 MaxAlign = 16; 1187 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1188 unsigned EltAlign = 0; 1189 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1190 if (EltAlign > MaxAlign) 1191 MaxAlign = EltAlign; 1192 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1193 for (auto *EltTy : STy->elements()) { 1194 unsigned EltAlign = 0; 1195 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1196 if (EltAlign > MaxAlign) 1197 MaxAlign = EltAlign; 1198 if (MaxAlign == MaxMaxAlign) 1199 break; 1200 } 1201 } 1202 } 1203 1204 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1205 /// function arguments in the caller parameter area. 1206 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1207 const DataLayout &DL) const { 1208 // Darwin passes everything on 4 byte boundary. 1209 if (Subtarget.isDarwin()) 1210 return 4; 1211 1212 // 16byte and wider vectors are passed on 16byte boundary. 1213 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1214 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1215 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1216 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1217 return Align; 1218 } 1219 1220 unsigned PPCTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 1221 CallingConv:: ID CC, 1222 EVT VT) const { 1223 if (Subtarget.hasSPE() && VT == MVT::f64) 1224 return 2; 1225 return PPCTargetLowering::getNumRegisters(Context, VT); 1226 } 1227 1228 MVT PPCTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 1229 CallingConv:: ID CC, 1230 EVT VT) const { 1231 if (Subtarget.hasSPE() && VT == MVT::f64) 1232 return MVT::i32; 1233 return PPCTargetLowering::getRegisterType(Context, VT); 1234 } 1235 1236 bool PPCTargetLowering::useSoftFloat() const { 1237 return Subtarget.useSoftFloat(); 1238 } 1239 1240 bool PPCTargetLowering::hasSPE() const { 1241 return Subtarget.hasSPE(); 1242 } 1243 1244 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1245 switch ((PPCISD::NodeType)Opcode) { 1246 case PPCISD::FIRST_NUMBER: break; 1247 case PPCISD::FSEL: return "PPCISD::FSEL"; 1248 case PPCISD::FCFID: return "PPCISD::FCFID"; 1249 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1250 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1251 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1252 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1253 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1254 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1255 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1256 case PPCISD::FP_TO_UINT_IN_VSR: 1257 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1258 case PPCISD::FP_TO_SINT_IN_VSR: 1259 return "PPCISD::FP_TO_SINT_IN_VSR"; 1260 case PPCISD::FRE: return "PPCISD::FRE"; 1261 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1262 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1263 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1264 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1265 case PPCISD::VPERM: return "PPCISD::VPERM"; 1266 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1267 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1268 case PPCISD::XXREVERSE: return "PPCISD::XXREVERSE"; 1269 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1270 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1271 case PPCISD::CMPB: return "PPCISD::CMPB"; 1272 case PPCISD::Hi: return "PPCISD::Hi"; 1273 case PPCISD::Lo: return "PPCISD::Lo"; 1274 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1275 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1276 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1277 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1278 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1279 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1280 case PPCISD::SRL: return "PPCISD::SRL"; 1281 case PPCISD::SRA: return "PPCISD::SRA"; 1282 case PPCISD::SHL: return "PPCISD::SHL"; 1283 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1284 case PPCISD::CALL: return "PPCISD::CALL"; 1285 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1286 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1287 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1288 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1289 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1290 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1291 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1292 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1293 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1294 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1295 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1296 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1297 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1298 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1299 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1300 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1301 case PPCISD::VCMP: return "PPCISD::VCMP"; 1302 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1303 case PPCISD::LBRX: return "PPCISD::LBRX"; 1304 case PPCISD::STBRX: return "PPCISD::STBRX"; 1305 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1306 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1307 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1308 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1309 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1310 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1311 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1312 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1313 case PPCISD::ST_VSR_SCAL_INT: 1314 return "PPCISD::ST_VSR_SCAL_INT"; 1315 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1316 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1317 case PPCISD::BDZ: return "PPCISD::BDZ"; 1318 case PPCISD::MFFS: return "PPCISD::MFFS"; 1319 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1320 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1321 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1322 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1323 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1324 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1325 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1326 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1327 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1328 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1329 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1330 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1331 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1332 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1333 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1334 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1335 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1336 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1337 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1338 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1339 case PPCISD::SC: return "PPCISD::SC"; 1340 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1341 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1342 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1343 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1344 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1345 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1346 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1347 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1348 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1349 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1350 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1351 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1352 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; 1353 } 1354 return nullptr; 1355 } 1356 1357 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1358 EVT VT) const { 1359 if (!VT.isVector()) 1360 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1361 1362 if (Subtarget.hasQPX()) 1363 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1364 1365 return VT.changeVectorElementTypeToInteger(); 1366 } 1367 1368 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1369 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1370 return true; 1371 } 1372 1373 //===----------------------------------------------------------------------===// 1374 // Node matching predicates, for use by the tblgen matching code. 1375 //===----------------------------------------------------------------------===// 1376 1377 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1378 static bool isFloatingPointZero(SDValue Op) { 1379 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1380 return CFP->getValueAPF().isZero(); 1381 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1382 // Maybe this has already been legalized into the constant pool? 1383 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1384 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1385 return CFP->getValueAPF().isZero(); 1386 } 1387 return false; 1388 } 1389 1390 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1391 /// true if Op is undef or if it matches the specified value. 1392 static bool isConstantOrUndef(int Op, int Val) { 1393 return Op < 0 || Op == Val; 1394 } 1395 1396 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1397 /// VPKUHUM instruction. 1398 /// The ShuffleKind distinguishes between big-endian operations with 1399 /// two different inputs (0), either-endian operations with two identical 1400 /// inputs (1), and little-endian operations with two different inputs (2). 1401 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1402 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1403 SelectionDAG &DAG) { 1404 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1405 if (ShuffleKind == 0) { 1406 if (IsLE) 1407 return false; 1408 for (unsigned i = 0; i != 16; ++i) 1409 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1410 return false; 1411 } else if (ShuffleKind == 2) { 1412 if (!IsLE) 1413 return false; 1414 for (unsigned i = 0; i != 16; ++i) 1415 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1416 return false; 1417 } else if (ShuffleKind == 1) { 1418 unsigned j = IsLE ? 0 : 1; 1419 for (unsigned i = 0; i != 8; ++i) 1420 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1421 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1422 return false; 1423 } 1424 return true; 1425 } 1426 1427 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1428 /// VPKUWUM instruction. 1429 /// The ShuffleKind distinguishes between big-endian operations with 1430 /// two different inputs (0), either-endian operations with two identical 1431 /// inputs (1), and little-endian operations with two different inputs (2). 1432 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1433 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1434 SelectionDAG &DAG) { 1435 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1436 if (ShuffleKind == 0) { 1437 if (IsLE) 1438 return false; 1439 for (unsigned i = 0; i != 16; i += 2) 1440 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1441 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1442 return false; 1443 } else if (ShuffleKind == 2) { 1444 if (!IsLE) 1445 return false; 1446 for (unsigned i = 0; i != 16; i += 2) 1447 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1448 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1449 return false; 1450 } else if (ShuffleKind == 1) { 1451 unsigned j = IsLE ? 0 : 2; 1452 for (unsigned i = 0; i != 8; i += 2) 1453 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1454 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1455 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1456 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1457 return false; 1458 } 1459 return true; 1460 } 1461 1462 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1463 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1464 /// current subtarget. 1465 /// 1466 /// The ShuffleKind distinguishes between big-endian operations with 1467 /// two different inputs (0), either-endian operations with two identical 1468 /// inputs (1), and little-endian operations with two different inputs (2). 1469 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1470 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1471 SelectionDAG &DAG) { 1472 const PPCSubtarget& Subtarget = 1473 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1474 if (!Subtarget.hasP8Vector()) 1475 return false; 1476 1477 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1478 if (ShuffleKind == 0) { 1479 if (IsLE) 1480 return false; 1481 for (unsigned i = 0; i != 16; i += 4) 1482 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1483 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1484 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1485 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1486 return false; 1487 } else if (ShuffleKind == 2) { 1488 if (!IsLE) 1489 return false; 1490 for (unsigned i = 0; i != 16; i += 4) 1491 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1492 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1493 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1494 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1495 return false; 1496 } else if (ShuffleKind == 1) { 1497 unsigned j = IsLE ? 0 : 4; 1498 for (unsigned i = 0; i != 8; i += 4) 1499 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1500 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1501 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1502 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1503 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1504 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1505 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1506 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1507 return false; 1508 } 1509 return true; 1510 } 1511 1512 /// isVMerge - Common function, used to match vmrg* shuffles. 1513 /// 1514 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1515 unsigned LHSStart, unsigned RHSStart) { 1516 if (N->getValueType(0) != MVT::v16i8) 1517 return false; 1518 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1519 "Unsupported merge size!"); 1520 1521 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1522 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1523 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1524 LHSStart+j+i*UnitSize) || 1525 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1526 RHSStart+j+i*UnitSize)) 1527 return false; 1528 } 1529 return true; 1530 } 1531 1532 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1533 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1534 /// The ShuffleKind distinguishes between big-endian merges with two 1535 /// different inputs (0), either-endian merges with two identical inputs (1), 1536 /// and little-endian merges with two different inputs (2). For the latter, 1537 /// the input operands are swapped (see PPCInstrAltivec.td). 1538 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1539 unsigned ShuffleKind, SelectionDAG &DAG) { 1540 if (DAG.getDataLayout().isLittleEndian()) { 1541 if (ShuffleKind == 1) // unary 1542 return isVMerge(N, UnitSize, 0, 0); 1543 else if (ShuffleKind == 2) // swapped 1544 return isVMerge(N, UnitSize, 0, 16); 1545 else 1546 return false; 1547 } else { 1548 if (ShuffleKind == 1) // unary 1549 return isVMerge(N, UnitSize, 8, 8); 1550 else if (ShuffleKind == 0) // normal 1551 return isVMerge(N, UnitSize, 8, 24); 1552 else 1553 return false; 1554 } 1555 } 1556 1557 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1558 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1559 /// The ShuffleKind distinguishes between big-endian merges with two 1560 /// different inputs (0), either-endian merges with two identical inputs (1), 1561 /// and little-endian merges with two different inputs (2). For the latter, 1562 /// the input operands are swapped (see PPCInstrAltivec.td). 1563 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1564 unsigned ShuffleKind, SelectionDAG &DAG) { 1565 if (DAG.getDataLayout().isLittleEndian()) { 1566 if (ShuffleKind == 1) // unary 1567 return isVMerge(N, UnitSize, 8, 8); 1568 else if (ShuffleKind == 2) // swapped 1569 return isVMerge(N, UnitSize, 8, 24); 1570 else 1571 return false; 1572 } else { 1573 if (ShuffleKind == 1) // unary 1574 return isVMerge(N, UnitSize, 0, 0); 1575 else if (ShuffleKind == 0) // normal 1576 return isVMerge(N, UnitSize, 0, 16); 1577 else 1578 return false; 1579 } 1580 } 1581 1582 /** 1583 * Common function used to match vmrgew and vmrgow shuffles 1584 * 1585 * The indexOffset determines whether to look for even or odd words in 1586 * the shuffle mask. This is based on the of the endianness of the target 1587 * machine. 1588 * - Little Endian: 1589 * - Use offset of 0 to check for odd elements 1590 * - Use offset of 4 to check for even elements 1591 * - Big Endian: 1592 * - Use offset of 0 to check for even elements 1593 * - Use offset of 4 to check for odd elements 1594 * A detailed description of the vector element ordering for little endian and 1595 * big endian can be found at 1596 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1597 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1598 * compiler differences mean to you 1599 * 1600 * The mask to the shuffle vector instruction specifies the indices of the 1601 * elements from the two input vectors to place in the result. The elements are 1602 * numbered in array-access order, starting with the first vector. These vectors 1603 * are always of type v16i8, thus each vector will contain 16 elements of size 1604 * 8. More info on the shuffle vector can be found in the 1605 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1606 * Language Reference. 1607 * 1608 * The RHSStartValue indicates whether the same input vectors are used (unary) 1609 * or two different input vectors are used, based on the following: 1610 * - If the instruction uses the same vector for both inputs, the range of the 1611 * indices will be 0 to 15. In this case, the RHSStart value passed should 1612 * be 0. 1613 * - If the instruction has two different vectors then the range of the 1614 * indices will be 0 to 31. In this case, the RHSStart value passed should 1615 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1616 * to 31 specify elements in the second vector). 1617 * 1618 * \param[in] N The shuffle vector SD Node to analyze 1619 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1620 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1621 * vector to the shuffle_vector instruction 1622 * \return true iff this shuffle vector represents an even or odd word merge 1623 */ 1624 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1625 unsigned RHSStartValue) { 1626 if (N->getValueType(0) != MVT::v16i8) 1627 return false; 1628 1629 for (unsigned i = 0; i < 2; ++i) 1630 for (unsigned j = 0; j < 4; ++j) 1631 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1632 i*RHSStartValue+j+IndexOffset) || 1633 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1634 i*RHSStartValue+j+IndexOffset+8)) 1635 return false; 1636 return true; 1637 } 1638 1639 /** 1640 * Determine if the specified shuffle mask is suitable for the vmrgew or 1641 * vmrgow instructions. 1642 * 1643 * \param[in] N The shuffle vector SD Node to analyze 1644 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1645 * \param[in] ShuffleKind Identify the type of merge: 1646 * - 0 = big-endian merge with two different inputs; 1647 * - 1 = either-endian merge with two identical inputs; 1648 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1649 * little-endian merges). 1650 * \param[in] DAG The current SelectionDAG 1651 * \return true iff this shuffle mask 1652 */ 1653 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1654 unsigned ShuffleKind, SelectionDAG &DAG) { 1655 if (DAG.getDataLayout().isLittleEndian()) { 1656 unsigned indexOffset = CheckEven ? 4 : 0; 1657 if (ShuffleKind == 1) // Unary 1658 return isVMerge(N, indexOffset, 0); 1659 else if (ShuffleKind == 2) // swapped 1660 return isVMerge(N, indexOffset, 16); 1661 else 1662 return false; 1663 } 1664 else { 1665 unsigned indexOffset = CheckEven ? 0 : 4; 1666 if (ShuffleKind == 1) // Unary 1667 return isVMerge(N, indexOffset, 0); 1668 else if (ShuffleKind == 0) // Normal 1669 return isVMerge(N, indexOffset, 16); 1670 else 1671 return false; 1672 } 1673 return false; 1674 } 1675 1676 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1677 /// amount, otherwise return -1. 1678 /// The ShuffleKind distinguishes between big-endian operations with two 1679 /// different inputs (0), either-endian operations with two identical inputs 1680 /// (1), and little-endian operations with two different inputs (2). For the 1681 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1682 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1683 SelectionDAG &DAG) { 1684 if (N->getValueType(0) != MVT::v16i8) 1685 return -1; 1686 1687 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1688 1689 // Find the first non-undef value in the shuffle mask. 1690 unsigned i; 1691 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1692 /*search*/; 1693 1694 if (i == 16) return -1; // all undef. 1695 1696 // Otherwise, check to see if the rest of the elements are consecutively 1697 // numbered from this value. 1698 unsigned ShiftAmt = SVOp->getMaskElt(i); 1699 if (ShiftAmt < i) return -1; 1700 1701 ShiftAmt -= i; 1702 bool isLE = DAG.getDataLayout().isLittleEndian(); 1703 1704 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1705 // Check the rest of the elements to see if they are consecutive. 1706 for (++i; i != 16; ++i) 1707 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1708 return -1; 1709 } else if (ShuffleKind == 1) { 1710 // Check the rest of the elements to see if they are consecutive. 1711 for (++i; i != 16; ++i) 1712 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1713 return -1; 1714 } else 1715 return -1; 1716 1717 if (isLE) 1718 ShiftAmt = 16 - ShiftAmt; 1719 1720 return ShiftAmt; 1721 } 1722 1723 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1724 /// specifies a splat of a single element that is suitable for input to 1725 /// VSPLTB/VSPLTH/VSPLTW. 1726 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1727 assert(N->getValueType(0) == MVT::v16i8 && 1728 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1729 1730 // The consecutive indices need to specify an element, not part of two 1731 // different elements. So abandon ship early if this isn't the case. 1732 if (N->getMaskElt(0) % EltSize != 0) 1733 return false; 1734 1735 // This is a splat operation if each element of the permute is the same, and 1736 // if the value doesn't reference the second vector. 1737 unsigned ElementBase = N->getMaskElt(0); 1738 1739 // FIXME: Handle UNDEF elements too! 1740 if (ElementBase >= 16) 1741 return false; 1742 1743 // Check that the indices are consecutive, in the case of a multi-byte element 1744 // splatted with a v16i8 mask. 1745 for (unsigned i = 1; i != EltSize; ++i) 1746 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1747 return false; 1748 1749 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1750 if (N->getMaskElt(i) < 0) continue; 1751 for (unsigned j = 0; j != EltSize; ++j) 1752 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1753 return false; 1754 } 1755 return true; 1756 } 1757 1758 /// Check that the mask is shuffling N byte elements. Within each N byte 1759 /// element of the mask, the indices could be either in increasing or 1760 /// decreasing order as long as they are consecutive. 1761 /// \param[in] N the shuffle vector SD Node to analyze 1762 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1763 /// Word/DoubleWord/QuadWord). 1764 /// \param[in] StepLen the delta indices number among the N byte element, if 1765 /// the mask is in increasing/decreasing order then it is 1/-1. 1766 /// \return true iff the mask is shuffling N byte elements. 1767 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1768 int StepLen) { 1769 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1770 "Unexpected element width."); 1771 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1772 1773 unsigned NumOfElem = 16 / Width; 1774 unsigned MaskVal[16]; // Width is never greater than 16 1775 for (unsigned i = 0; i < NumOfElem; ++i) { 1776 MaskVal[0] = N->getMaskElt(i * Width); 1777 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1778 return false; 1779 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1780 return false; 1781 } 1782 1783 for (unsigned int j = 1; j < Width; ++j) { 1784 MaskVal[j] = N->getMaskElt(i * Width + j); 1785 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1786 return false; 1787 } 1788 } 1789 } 1790 1791 return true; 1792 } 1793 1794 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1795 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1796 if (!isNByteElemShuffleMask(N, 4, 1)) 1797 return false; 1798 1799 // Now we look at mask elements 0,4,8,12 1800 unsigned M0 = N->getMaskElt(0) / 4; 1801 unsigned M1 = N->getMaskElt(4) / 4; 1802 unsigned M2 = N->getMaskElt(8) / 4; 1803 unsigned M3 = N->getMaskElt(12) / 4; 1804 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1805 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1806 1807 // Below, let H and L be arbitrary elements of the shuffle mask 1808 // where H is in the range [4,7] and L is in the range [0,3]. 1809 // H, 1, 2, 3 or L, 5, 6, 7 1810 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1811 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1812 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1813 InsertAtByte = IsLE ? 12 : 0; 1814 Swap = M0 < 4; 1815 return true; 1816 } 1817 // 0, H, 2, 3 or 4, L, 6, 7 1818 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1819 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1820 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1821 InsertAtByte = IsLE ? 8 : 4; 1822 Swap = M1 < 4; 1823 return true; 1824 } 1825 // 0, 1, H, 3 or 4, 5, L, 7 1826 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1827 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1828 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1829 InsertAtByte = IsLE ? 4 : 8; 1830 Swap = M2 < 4; 1831 return true; 1832 } 1833 // 0, 1, 2, H or 4, 5, 6, L 1834 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1835 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1836 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1837 InsertAtByte = IsLE ? 0 : 12; 1838 Swap = M3 < 4; 1839 return true; 1840 } 1841 1842 // If both vector operands for the shuffle are the same vector, the mask will 1843 // contain only elements from the first one and the second one will be undef. 1844 if (N->getOperand(1).isUndef()) { 1845 ShiftElts = 0; 1846 Swap = true; 1847 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1848 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1849 InsertAtByte = IsLE ? 12 : 0; 1850 return true; 1851 } 1852 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1853 InsertAtByte = IsLE ? 8 : 4; 1854 return true; 1855 } 1856 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1857 InsertAtByte = IsLE ? 4 : 8; 1858 return true; 1859 } 1860 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1861 InsertAtByte = IsLE ? 0 : 12; 1862 return true; 1863 } 1864 } 1865 1866 return false; 1867 } 1868 1869 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1870 bool &Swap, bool IsLE) { 1871 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1872 // Ensure each byte index of the word is consecutive. 1873 if (!isNByteElemShuffleMask(N, 4, 1)) 1874 return false; 1875 1876 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1877 unsigned M0 = N->getMaskElt(0) / 4; 1878 unsigned M1 = N->getMaskElt(4) / 4; 1879 unsigned M2 = N->getMaskElt(8) / 4; 1880 unsigned M3 = N->getMaskElt(12) / 4; 1881 1882 // If both vector operands for the shuffle are the same vector, the mask will 1883 // contain only elements from the first one and the second one will be undef. 1884 if (N->getOperand(1).isUndef()) { 1885 assert(M0 < 4 && "Indexing into an undef vector?"); 1886 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1887 return false; 1888 1889 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1890 Swap = false; 1891 return true; 1892 } 1893 1894 // Ensure each word index of the ShuffleVector Mask is consecutive. 1895 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1896 return false; 1897 1898 if (IsLE) { 1899 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1900 // Input vectors don't need to be swapped if the leading element 1901 // of the result is one of the 3 left elements of the second vector 1902 // (or if there is no shift to be done at all). 1903 Swap = false; 1904 ShiftElts = (8 - M0) % 8; 1905 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 1906 // Input vectors need to be swapped if the leading element 1907 // of the result is one of the 3 left elements of the first vector 1908 // (or if we're shifting by 4 - thereby simply swapping the vectors). 1909 Swap = true; 1910 ShiftElts = (4 - M0) % 4; 1911 } 1912 1913 return true; 1914 } else { // BE 1915 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 1916 // Input vectors don't need to be swapped if the leading element 1917 // of the result is one of the 4 elements of the first vector. 1918 Swap = false; 1919 ShiftElts = M0; 1920 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 1921 // Input vectors need to be swapped if the leading element 1922 // of the result is one of the 4 elements of the right vector. 1923 Swap = true; 1924 ShiftElts = M0 - 4; 1925 } 1926 1927 return true; 1928 } 1929 } 1930 1931 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 1932 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1933 1934 if (!isNByteElemShuffleMask(N, Width, -1)) 1935 return false; 1936 1937 for (int i = 0; i < 16; i += Width) 1938 if (N->getMaskElt(i) != i + Width - 1) 1939 return false; 1940 1941 return true; 1942 } 1943 1944 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 1945 return isXXBRShuffleMaskHelper(N, 2); 1946 } 1947 1948 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 1949 return isXXBRShuffleMaskHelper(N, 4); 1950 } 1951 1952 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 1953 return isXXBRShuffleMaskHelper(N, 8); 1954 } 1955 1956 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 1957 return isXXBRShuffleMaskHelper(N, 16); 1958 } 1959 1960 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 1961 /// if the inputs to the instruction should be swapped and set \p DM to the 1962 /// value for the immediate. 1963 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 1964 /// AND element 0 of the result comes from the first input (LE) or second input 1965 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 1966 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 1967 /// mask. 1968 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 1969 bool &Swap, bool IsLE) { 1970 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1971 1972 // Ensure each byte index of the double word is consecutive. 1973 if (!isNByteElemShuffleMask(N, 8, 1)) 1974 return false; 1975 1976 unsigned M0 = N->getMaskElt(0) / 8; 1977 unsigned M1 = N->getMaskElt(8) / 8; 1978 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 1979 1980 // If both vector operands for the shuffle are the same vector, the mask will 1981 // contain only elements from the first one and the second one will be undef. 1982 if (N->getOperand(1).isUndef()) { 1983 if ((M0 | M1) < 2) { 1984 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 1985 Swap = false; 1986 return true; 1987 } else 1988 return false; 1989 } 1990 1991 if (IsLE) { 1992 if (M0 > 1 && M1 < 2) { 1993 Swap = false; 1994 } else if (M0 < 2 && M1 > 1) { 1995 M0 = (M0 + 2) % 4; 1996 M1 = (M1 + 2) % 4; 1997 Swap = true; 1998 } else 1999 return false; 2000 2001 // Note: if control flow comes here that means Swap is already set above 2002 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2003 return true; 2004 } else { // BE 2005 if (M0 < 2 && M1 > 1) { 2006 Swap = false; 2007 } else if (M0 > 1 && M1 < 2) { 2008 M0 = (M0 + 2) % 4; 2009 M1 = (M1 + 2) % 4; 2010 Swap = true; 2011 } else 2012 return false; 2013 2014 // Note: if control flow comes here that means Swap is already set above 2015 DM = (M0 << 1) + (M1 & 1); 2016 return true; 2017 } 2018 } 2019 2020 2021 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 2022 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 2023 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 2024 SelectionDAG &DAG) { 2025 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2026 assert(isSplatShuffleMask(SVOp, EltSize)); 2027 if (DAG.getDataLayout().isLittleEndian()) 2028 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2029 else 2030 return SVOp->getMaskElt(0) / EltSize; 2031 } 2032 2033 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2034 /// by using a vspltis[bhw] instruction of the specified element size, return 2035 /// the constant being splatted. The ByteSize field indicates the number of 2036 /// bytes of each element [124] -> [bhw]. 2037 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2038 SDValue OpVal(nullptr, 0); 2039 2040 // If ByteSize of the splat is bigger than the element size of the 2041 // build_vector, then we have a case where we are checking for a splat where 2042 // multiple elements of the buildvector are folded together into a single 2043 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2044 unsigned EltSize = 16/N->getNumOperands(); 2045 if (EltSize < ByteSize) { 2046 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2047 SDValue UniquedVals[4]; 2048 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2049 2050 // See if all of the elements in the buildvector agree across. 2051 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2052 if (N->getOperand(i).isUndef()) continue; 2053 // If the element isn't a constant, bail fully out. 2054 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2055 2056 if (!UniquedVals[i&(Multiple-1)].getNode()) 2057 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2058 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2059 return SDValue(); // no match. 2060 } 2061 2062 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2063 // either constant or undef values that are identical for each chunk. See 2064 // if these chunks can form into a larger vspltis*. 2065 2066 // Check to see if all of the leading entries are either 0 or -1. If 2067 // neither, then this won't fit into the immediate field. 2068 bool LeadingZero = true; 2069 bool LeadingOnes = true; 2070 for (unsigned i = 0; i != Multiple-1; ++i) { 2071 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2072 2073 LeadingZero &= isNullConstant(UniquedVals[i]); 2074 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2075 } 2076 // Finally, check the least significant entry. 2077 if (LeadingZero) { 2078 if (!UniquedVals[Multiple-1].getNode()) 2079 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2080 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2081 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2082 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2083 } 2084 if (LeadingOnes) { 2085 if (!UniquedVals[Multiple-1].getNode()) 2086 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2087 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2088 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2089 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2090 } 2091 2092 return SDValue(); 2093 } 2094 2095 // Check to see if this buildvec has a single non-undef value in its elements. 2096 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2097 if (N->getOperand(i).isUndef()) continue; 2098 if (!OpVal.getNode()) 2099 OpVal = N->getOperand(i); 2100 else if (OpVal != N->getOperand(i)) 2101 return SDValue(); 2102 } 2103 2104 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2105 2106 unsigned ValSizeInBytes = EltSize; 2107 uint64_t Value = 0; 2108 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2109 Value = CN->getZExtValue(); 2110 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2111 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2112 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2113 } 2114 2115 // If the splat value is larger than the element value, then we can never do 2116 // this splat. The only case that we could fit the replicated bits into our 2117 // immediate field for would be zero, and we prefer to use vxor for it. 2118 if (ValSizeInBytes < ByteSize) return SDValue(); 2119 2120 // If the element value is larger than the splat value, check if it consists 2121 // of a repeated bit pattern of size ByteSize. 2122 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2123 return SDValue(); 2124 2125 // Properly sign extend the value. 2126 int MaskVal = SignExtend32(Value, ByteSize * 8); 2127 2128 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2129 if (MaskVal == 0) return SDValue(); 2130 2131 // Finally, if this value fits in a 5 bit sext field, return it 2132 if (SignExtend32<5>(MaskVal) == MaskVal) 2133 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2134 return SDValue(); 2135 } 2136 2137 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2138 /// amount, otherwise return -1. 2139 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2140 EVT VT = N->getValueType(0); 2141 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2142 return -1; 2143 2144 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2145 2146 // Find the first non-undef value in the shuffle mask. 2147 unsigned i; 2148 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2149 /*search*/; 2150 2151 if (i == 4) return -1; // all undef. 2152 2153 // Otherwise, check to see if the rest of the elements are consecutively 2154 // numbered from this value. 2155 unsigned ShiftAmt = SVOp->getMaskElt(i); 2156 if (ShiftAmt < i) return -1; 2157 ShiftAmt -= i; 2158 2159 // Check the rest of the elements to see if they are consecutive. 2160 for (++i; i != 4; ++i) 2161 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2162 return -1; 2163 2164 return ShiftAmt; 2165 } 2166 2167 //===----------------------------------------------------------------------===// 2168 // Addressing Mode Selection 2169 //===----------------------------------------------------------------------===// 2170 2171 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2172 /// or 64-bit immediate, and if the value can be accurately represented as a 2173 /// sign extension from a 16-bit value. If so, this returns true and the 2174 /// immediate. 2175 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2176 if (!isa<ConstantSDNode>(N)) 2177 return false; 2178 2179 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2180 if (N->getValueType(0) == MVT::i32) 2181 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2182 else 2183 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2184 } 2185 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2186 return isIntS16Immediate(Op.getNode(), Imm); 2187 } 2188 2189 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2190 /// can be represented as an indexed [r+r] operation. Returns false if it 2191 /// can be more efficiently represented with [r+imm]. 2192 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2193 SDValue &Index, 2194 SelectionDAG &DAG) const { 2195 int16_t imm = 0; 2196 if (N.getOpcode() == ISD::ADD) { 2197 if (isIntS16Immediate(N.getOperand(1), imm)) 2198 return false; // r+i 2199 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2200 return false; // r+i 2201 2202 Base = N.getOperand(0); 2203 Index = N.getOperand(1); 2204 return true; 2205 } else if (N.getOpcode() == ISD::OR) { 2206 if (isIntS16Immediate(N.getOperand(1), imm)) 2207 return false; // r+i can fold it if we can. 2208 2209 // If this is an or of disjoint bitfields, we can codegen this as an add 2210 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2211 // disjoint. 2212 KnownBits LHSKnown, RHSKnown; 2213 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2214 2215 if (LHSKnown.Zero.getBoolValue()) { 2216 DAG.computeKnownBits(N.getOperand(1), RHSKnown); 2217 // If all of the bits are known zero on the LHS or RHS, the add won't 2218 // carry. 2219 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2220 Base = N.getOperand(0); 2221 Index = N.getOperand(1); 2222 return true; 2223 } 2224 } 2225 } 2226 2227 return false; 2228 } 2229 2230 // If we happen to be doing an i64 load or store into a stack slot that has 2231 // less than a 4-byte alignment, then the frame-index elimination may need to 2232 // use an indexed load or store instruction (because the offset may not be a 2233 // multiple of 4). The extra register needed to hold the offset comes from the 2234 // register scavenger, and it is possible that the scavenger will need to use 2235 // an emergency spill slot. As a result, we need to make sure that a spill slot 2236 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2237 // stack slot. 2238 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2239 // FIXME: This does not handle the LWA case. 2240 if (VT != MVT::i64) 2241 return; 2242 2243 // NOTE: We'll exclude negative FIs here, which come from argument 2244 // lowering, because there are no known test cases triggering this problem 2245 // using packed structures (or similar). We can remove this exclusion if 2246 // we find such a test case. The reason why this is so test-case driven is 2247 // because this entire 'fixup' is only to prevent crashes (from the 2248 // register scavenger) on not-really-valid inputs. For example, if we have: 2249 // %a = alloca i1 2250 // %b = bitcast i1* %a to i64* 2251 // store i64* a, i64 b 2252 // then the store should really be marked as 'align 1', but is not. If it 2253 // were marked as 'align 1' then the indexed form would have been 2254 // instruction-selected initially, and the problem this 'fixup' is preventing 2255 // won't happen regardless. 2256 if (FrameIdx < 0) 2257 return; 2258 2259 MachineFunction &MF = DAG.getMachineFunction(); 2260 MachineFrameInfo &MFI = MF.getFrameInfo(); 2261 2262 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2263 if (Align >= 4) 2264 return; 2265 2266 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2267 FuncInfo->setHasNonRISpills(); 2268 } 2269 2270 /// Returns true if the address N can be represented by a base register plus 2271 /// a signed 16-bit displacement [r+imm], and if it is not better 2272 /// represented as reg+reg. If \p Alignment is non-zero, only accept 2273 /// displacements that are multiples of that value. 2274 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2275 SDValue &Base, 2276 SelectionDAG &DAG, 2277 unsigned Alignment) const { 2278 // FIXME dl should come from parent load or store, not from address 2279 SDLoc dl(N); 2280 // If this can be more profitably realized as r+r, fail. 2281 if (SelectAddressRegReg(N, Disp, Base, DAG)) 2282 return false; 2283 2284 if (N.getOpcode() == ISD::ADD) { 2285 int16_t imm = 0; 2286 if (isIntS16Immediate(N.getOperand(1), imm) && 2287 (!Alignment || (imm % Alignment) == 0)) { 2288 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2289 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2290 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2291 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2292 } else { 2293 Base = N.getOperand(0); 2294 } 2295 return true; // [r+i] 2296 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2297 // Match LOAD (ADD (X, Lo(G))). 2298 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2299 && "Cannot handle constant offsets yet!"); 2300 Disp = N.getOperand(1).getOperand(0); // The global address. 2301 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2302 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2303 Disp.getOpcode() == ISD::TargetConstantPool || 2304 Disp.getOpcode() == ISD::TargetJumpTable); 2305 Base = N.getOperand(0); 2306 return true; // [&g+r] 2307 } 2308 } else if (N.getOpcode() == ISD::OR) { 2309 int16_t imm = 0; 2310 if (isIntS16Immediate(N.getOperand(1), imm) && 2311 (!Alignment || (imm % Alignment) == 0)) { 2312 // If this is an or of disjoint bitfields, we can codegen this as an add 2313 // (for better address arithmetic) if the LHS and RHS of the OR are 2314 // provably disjoint. 2315 KnownBits LHSKnown; 2316 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2317 2318 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2319 // If all of the bits are known zero on the LHS or RHS, the add won't 2320 // carry. 2321 if (FrameIndexSDNode *FI = 2322 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2323 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2324 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2325 } else { 2326 Base = N.getOperand(0); 2327 } 2328 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2329 return true; 2330 } 2331 } 2332 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2333 // Loading from a constant address. 2334 2335 // If this address fits entirely in a 16-bit sext immediate field, codegen 2336 // this as "d, 0" 2337 int16_t Imm; 2338 if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) { 2339 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2340 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2341 CN->getValueType(0)); 2342 return true; 2343 } 2344 2345 // Handle 32-bit sext immediates with LIS + addr mode. 2346 if ((CN->getValueType(0) == MVT::i32 || 2347 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2348 (!Alignment || (CN->getZExtValue() % Alignment) == 0)) { 2349 int Addr = (int)CN->getZExtValue(); 2350 2351 // Otherwise, break this down into an LIS + disp. 2352 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2353 2354 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2355 MVT::i32); 2356 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2357 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2358 return true; 2359 } 2360 } 2361 2362 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2363 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2364 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2365 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2366 } else 2367 Base = N; 2368 return true; // [r+0] 2369 } 2370 2371 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2372 /// represented as an indexed [r+r] operation. 2373 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2374 SDValue &Index, 2375 SelectionDAG &DAG) const { 2376 // Check to see if we can easily represent this as an [r+r] address. This 2377 // will fail if it thinks that the address is more profitably represented as 2378 // reg+imm, e.g. where imm = 0. 2379 if (SelectAddressRegReg(N, Base, Index, DAG)) 2380 return true; 2381 2382 // If the address is the result of an add, we will utilize the fact that the 2383 // address calculation includes an implicit add. However, we can reduce 2384 // register pressure if we do not materialize a constant just for use as the 2385 // index register. We only get rid of the add if it is not an add of a 2386 // value and a 16-bit signed constant and both have a single use. 2387 int16_t imm = 0; 2388 if (N.getOpcode() == ISD::ADD && 2389 (!isIntS16Immediate(N.getOperand(1), imm) || 2390 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2391 Base = N.getOperand(0); 2392 Index = N.getOperand(1); 2393 return true; 2394 } 2395 2396 // Otherwise, do it the hard way, using R0 as the base register. 2397 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2398 N.getValueType()); 2399 Index = N; 2400 return true; 2401 } 2402 2403 /// Returns true if we should use a direct load into vector instruction 2404 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. 2405 static bool usePartialVectorLoads(SDNode *N) { 2406 if (!N->hasOneUse()) 2407 return false; 2408 2409 // If there are any other uses other than scalar to vector, then we should 2410 // keep it as a scalar load -> direct move pattern to prevent multiple 2411 // loads. Currently, only check for i64 since we have lxsd/lfd to do this 2412 // efficiently, but no update equivalent. 2413 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2414 EVT MemVT = LD->getMemoryVT(); 2415 if (MemVT.isSimple() && MemVT.getSimpleVT().SimpleTy == MVT::i64) { 2416 SDNode *User = *(LD->use_begin()); 2417 if (User->getOpcode() == ISD::SCALAR_TO_VECTOR) 2418 return true; 2419 } 2420 } 2421 2422 return false; 2423 } 2424 2425 /// getPreIndexedAddressParts - returns true by value, base pointer and 2426 /// offset pointer and addressing mode by reference if the node's address 2427 /// can be legally represented as pre-indexed load / store address. 2428 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2429 SDValue &Offset, 2430 ISD::MemIndexedMode &AM, 2431 SelectionDAG &DAG) const { 2432 if (DisablePPCPreinc) return false; 2433 2434 bool isLoad = true; 2435 SDValue Ptr; 2436 EVT VT; 2437 unsigned Alignment; 2438 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2439 Ptr = LD->getBasePtr(); 2440 VT = LD->getMemoryVT(); 2441 Alignment = LD->getAlignment(); 2442 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2443 Ptr = ST->getBasePtr(); 2444 VT = ST->getMemoryVT(); 2445 Alignment = ST->getAlignment(); 2446 isLoad = false; 2447 } else 2448 return false; 2449 2450 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector 2451 // instructions because we can fold these into a more efficient instruction 2452 // instead, (such as LXSD). 2453 if (isLoad && usePartialVectorLoads(N)) { 2454 return false; 2455 } 2456 2457 // PowerPC doesn't have preinc load/store instructions for vectors (except 2458 // for QPX, which does have preinc r+r forms). 2459 if (VT.isVector()) { 2460 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2461 return false; 2462 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2463 AM = ISD::PRE_INC; 2464 return true; 2465 } 2466 } 2467 2468 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2469 // Common code will reject creating a pre-inc form if the base pointer 2470 // is a frame index, or if N is a store and the base pointer is either 2471 // the same as or a predecessor of the value being stored. Check for 2472 // those situations here, and try with swapped Base/Offset instead. 2473 bool Swap = false; 2474 2475 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2476 Swap = true; 2477 else if (!isLoad) { 2478 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2479 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2480 Swap = true; 2481 } 2482 2483 if (Swap) 2484 std::swap(Base, Offset); 2485 2486 AM = ISD::PRE_INC; 2487 return true; 2488 } 2489 2490 // LDU/STU can only handle immediates that are a multiple of 4. 2491 if (VT != MVT::i64) { 2492 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2493 return false; 2494 } else { 2495 // LDU/STU need an address with at least 4-byte alignment. 2496 if (Alignment < 4) 2497 return false; 2498 2499 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2500 return false; 2501 } 2502 2503 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2504 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2505 // sext i32 to i64 when addr mode is r+i. 2506 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2507 LD->getExtensionType() == ISD::SEXTLOAD && 2508 isa<ConstantSDNode>(Offset)) 2509 return false; 2510 } 2511 2512 AM = ISD::PRE_INC; 2513 return true; 2514 } 2515 2516 //===----------------------------------------------------------------------===// 2517 // LowerOperation implementation 2518 //===----------------------------------------------------------------------===// 2519 2520 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2521 /// and LoOpFlags to the target MO flags. 2522 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2523 unsigned &HiOpFlags, unsigned &LoOpFlags, 2524 const GlobalValue *GV = nullptr) { 2525 HiOpFlags = PPCII::MO_HA; 2526 LoOpFlags = PPCII::MO_LO; 2527 2528 // Don't use the pic base if not in PIC relocation model. 2529 if (IsPIC) { 2530 HiOpFlags |= PPCII::MO_PIC_FLAG; 2531 LoOpFlags |= PPCII::MO_PIC_FLAG; 2532 } 2533 2534 // If this is a reference to a global value that requires a non-lazy-ptr, make 2535 // sure that instruction lowering adds it. 2536 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2537 HiOpFlags |= PPCII::MO_NLP_FLAG; 2538 LoOpFlags |= PPCII::MO_NLP_FLAG; 2539 2540 if (GV->hasHiddenVisibility()) { 2541 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2542 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2543 } 2544 } 2545 } 2546 2547 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2548 SelectionDAG &DAG) { 2549 SDLoc DL(HiPart); 2550 EVT PtrVT = HiPart.getValueType(); 2551 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2552 2553 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2554 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2555 2556 // With PIC, the first instruction is actually "GR+hi(&G)". 2557 if (isPIC) 2558 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2559 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2560 2561 // Generate non-pic code that has direct accesses to the constant pool. 2562 // The address of the global is just (hi(&g)+lo(&g)). 2563 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2564 } 2565 2566 static void setUsesTOCBasePtr(MachineFunction &MF) { 2567 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2568 FuncInfo->setUsesTOCBasePtr(); 2569 } 2570 2571 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2572 setUsesTOCBasePtr(DAG.getMachineFunction()); 2573 } 2574 2575 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2576 SDValue GA) { 2577 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2578 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2579 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2580 2581 SDValue Ops[] = { GA, Reg }; 2582 return DAG.getMemIntrinsicNode( 2583 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2584 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, 2585 MachineMemOperand::MOLoad); 2586 } 2587 2588 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2589 SelectionDAG &DAG) const { 2590 EVT PtrVT = Op.getValueType(); 2591 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2592 const Constant *C = CP->getConstVal(); 2593 2594 // 64-bit SVR4 ABI code is always position-independent. 2595 // The actual address of the GlobalValue is stored in the TOC. 2596 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2597 setUsesTOCBasePtr(DAG); 2598 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2599 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2600 } 2601 2602 unsigned MOHiFlag, MOLoFlag; 2603 bool IsPIC = isPositionIndependent(); 2604 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2605 2606 if (IsPIC && Subtarget.isSVR4ABI()) { 2607 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2608 PPCII::MO_PIC_FLAG); 2609 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2610 } 2611 2612 SDValue CPIHi = 2613 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2614 SDValue CPILo = 2615 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2616 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2617 } 2618 2619 // For 64-bit PowerPC, prefer the more compact relative encodings. 2620 // This trades 32 bits per jump table entry for one or two instructions 2621 // on the jump site. 2622 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2623 if (isJumpTableRelative()) 2624 return MachineJumpTableInfo::EK_LabelDifference32; 2625 2626 return TargetLowering::getJumpTableEncoding(); 2627 } 2628 2629 bool PPCTargetLowering::isJumpTableRelative() const { 2630 if (Subtarget.isPPC64()) 2631 return true; 2632 return TargetLowering::isJumpTableRelative(); 2633 } 2634 2635 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2636 SelectionDAG &DAG) const { 2637 if (!Subtarget.isPPC64()) 2638 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2639 2640 switch (getTargetMachine().getCodeModel()) { 2641 case CodeModel::Small: 2642 case CodeModel::Medium: 2643 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2644 default: 2645 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2646 getPointerTy(DAG.getDataLayout())); 2647 } 2648 } 2649 2650 const MCExpr * 2651 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2652 unsigned JTI, 2653 MCContext &Ctx) const { 2654 if (!Subtarget.isPPC64()) 2655 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2656 2657 switch (getTargetMachine().getCodeModel()) { 2658 case CodeModel::Small: 2659 case CodeModel::Medium: 2660 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2661 default: 2662 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2663 } 2664 } 2665 2666 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2667 EVT PtrVT = Op.getValueType(); 2668 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2669 2670 // 64-bit SVR4 ABI code is always position-independent. 2671 // The actual address of the GlobalValue is stored in the TOC. 2672 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2673 setUsesTOCBasePtr(DAG); 2674 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2675 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2676 } 2677 2678 unsigned MOHiFlag, MOLoFlag; 2679 bool IsPIC = isPositionIndependent(); 2680 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2681 2682 if (IsPIC && Subtarget.isSVR4ABI()) { 2683 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2684 PPCII::MO_PIC_FLAG); 2685 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2686 } 2687 2688 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2689 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2690 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2691 } 2692 2693 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2694 SelectionDAG &DAG) const { 2695 EVT PtrVT = Op.getValueType(); 2696 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2697 const BlockAddress *BA = BASDN->getBlockAddress(); 2698 2699 // 64-bit SVR4 ABI code is always position-independent. 2700 // The actual BlockAddress is stored in the TOC. 2701 if (Subtarget.isSVR4ABI() && 2702 (Subtarget.isPPC64() || isPositionIndependent())) { 2703 if (Subtarget.isPPC64()) 2704 setUsesTOCBasePtr(DAG); 2705 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2706 return getTOCEntry(DAG, SDLoc(BASDN), Subtarget.isPPC64(), GA); 2707 } 2708 2709 unsigned MOHiFlag, MOLoFlag; 2710 bool IsPIC = isPositionIndependent(); 2711 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2712 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2713 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2714 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2715 } 2716 2717 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2718 SelectionDAG &DAG) const { 2719 // FIXME: TLS addresses currently use medium model code sequences, 2720 // which is the most useful form. Eventually support for small and 2721 // large models could be added if users need it, at the cost of 2722 // additional complexity. 2723 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2724 if (DAG.getTarget().useEmulatedTLS()) 2725 return LowerToTLSEmulatedModel(GA, DAG); 2726 2727 SDLoc dl(GA); 2728 const GlobalValue *GV = GA->getGlobal(); 2729 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2730 bool is64bit = Subtarget.isPPC64(); 2731 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 2732 PICLevel::Level picLevel = M->getPICLevel(); 2733 2734 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2735 2736 if (Model == TLSModel::LocalExec) { 2737 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2738 PPCII::MO_TPREL_HA); 2739 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2740 PPCII::MO_TPREL_LO); 2741 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2742 : DAG.getRegister(PPC::R2, MVT::i32); 2743 2744 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2745 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2746 } 2747 2748 if (Model == TLSModel::InitialExec) { 2749 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2750 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2751 PPCII::MO_TLS); 2752 SDValue GOTPtr; 2753 if (is64bit) { 2754 setUsesTOCBasePtr(DAG); 2755 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2756 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2757 PtrVT, GOTReg, TGA); 2758 } else 2759 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2760 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2761 PtrVT, TGA, GOTPtr); 2762 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2763 } 2764 2765 if (Model == TLSModel::GeneralDynamic) { 2766 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2767 SDValue GOTPtr; 2768 if (is64bit) { 2769 setUsesTOCBasePtr(DAG); 2770 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2771 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2772 GOTReg, TGA); 2773 } else { 2774 if (picLevel == PICLevel::SmallPIC) 2775 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2776 else 2777 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2778 } 2779 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2780 GOTPtr, TGA, TGA); 2781 } 2782 2783 if (Model == TLSModel::LocalDynamic) { 2784 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2785 SDValue GOTPtr; 2786 if (is64bit) { 2787 setUsesTOCBasePtr(DAG); 2788 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2789 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2790 GOTReg, TGA); 2791 } else { 2792 if (picLevel == PICLevel::SmallPIC) 2793 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2794 else 2795 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2796 } 2797 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2798 PtrVT, GOTPtr, TGA, TGA); 2799 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2800 PtrVT, TLSAddr, TGA); 2801 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2802 } 2803 2804 llvm_unreachable("Unknown TLS model!"); 2805 } 2806 2807 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2808 SelectionDAG &DAG) const { 2809 EVT PtrVT = Op.getValueType(); 2810 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2811 SDLoc DL(GSDN); 2812 const GlobalValue *GV = GSDN->getGlobal(); 2813 2814 // 64-bit SVR4 ABI code is always position-independent. 2815 // The actual address of the GlobalValue is stored in the TOC. 2816 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2817 setUsesTOCBasePtr(DAG); 2818 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2819 return getTOCEntry(DAG, DL, true, GA); 2820 } 2821 2822 unsigned MOHiFlag, MOLoFlag; 2823 bool IsPIC = isPositionIndependent(); 2824 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2825 2826 if (IsPIC && Subtarget.isSVR4ABI()) { 2827 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2828 GSDN->getOffset(), 2829 PPCII::MO_PIC_FLAG); 2830 return getTOCEntry(DAG, DL, false, GA); 2831 } 2832 2833 SDValue GAHi = 2834 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2835 SDValue GALo = 2836 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2837 2838 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2839 2840 // If the global reference is actually to a non-lazy-pointer, we have to do an 2841 // extra load to get the address of the global. 2842 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2843 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2844 return Ptr; 2845 } 2846 2847 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2848 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2849 SDLoc dl(Op); 2850 2851 if (Op.getValueType() == MVT::v2i64) { 2852 // When the operands themselves are v2i64 values, we need to do something 2853 // special because VSX has no underlying comparison operations for these. 2854 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2855 // Equality can be handled by casting to the legal type for Altivec 2856 // comparisons, everything else needs to be expanded. 2857 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2858 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2859 DAG.getSetCC(dl, MVT::v4i32, 2860 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2861 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2862 CC)); 2863 } 2864 2865 return SDValue(); 2866 } 2867 2868 // We handle most of these in the usual way. 2869 return Op; 2870 } 2871 2872 // If we're comparing for equality to zero, expose the fact that this is 2873 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2874 // fold the new nodes. 2875 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 2876 return V; 2877 2878 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2879 // Leave comparisons against 0 and -1 alone for now, since they're usually 2880 // optimized. FIXME: revisit this when we can custom lower all setcc 2881 // optimizations. 2882 if (C->isAllOnesValue() || C->isNullValue()) 2883 return SDValue(); 2884 } 2885 2886 // If we have an integer seteq/setne, turn it into a compare against zero 2887 // by xor'ing the rhs with the lhs, which is faster than setting a 2888 // condition register, reading it back out, and masking the correct bit. The 2889 // normal approach here uses sub to do this instead of xor. Using xor exposes 2890 // the result to other bit-twiddling opportunities. 2891 EVT LHSVT = Op.getOperand(0).getValueType(); 2892 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2893 EVT VT = Op.getValueType(); 2894 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2895 Op.getOperand(1)); 2896 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2897 } 2898 return SDValue(); 2899 } 2900 2901 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2902 SDNode *Node = Op.getNode(); 2903 EVT VT = Node->getValueType(0); 2904 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2905 SDValue InChain = Node->getOperand(0); 2906 SDValue VAListPtr = Node->getOperand(1); 2907 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2908 SDLoc dl(Node); 2909 2910 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2911 2912 // gpr_index 2913 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2914 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2915 InChain = GprIndex.getValue(1); 2916 2917 if (VT == MVT::i64) { 2918 // Check if GprIndex is even 2919 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2920 DAG.getConstant(1, dl, MVT::i32)); 2921 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2922 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2923 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2924 DAG.getConstant(1, dl, MVT::i32)); 2925 // Align GprIndex to be even if it isn't 2926 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2927 GprIndex); 2928 } 2929 2930 // fpr index is 1 byte after gpr 2931 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2932 DAG.getConstant(1, dl, MVT::i32)); 2933 2934 // fpr 2935 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2936 FprPtr, MachinePointerInfo(SV), MVT::i8); 2937 InChain = FprIndex.getValue(1); 2938 2939 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2940 DAG.getConstant(8, dl, MVT::i32)); 2941 2942 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2943 DAG.getConstant(4, dl, MVT::i32)); 2944 2945 // areas 2946 SDValue OverflowArea = 2947 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2948 InChain = OverflowArea.getValue(1); 2949 2950 SDValue RegSaveArea = 2951 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2952 InChain = RegSaveArea.getValue(1); 2953 2954 // select overflow_area if index > 8 2955 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2956 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2957 2958 // adjustment constant gpr_index * 4/8 2959 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2960 VT.isInteger() ? GprIndex : FprIndex, 2961 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2962 MVT::i32)); 2963 2964 // OurReg = RegSaveArea + RegConstant 2965 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2966 RegConstant); 2967 2968 // Floating types are 32 bytes into RegSaveArea 2969 if (VT.isFloatingPoint()) 2970 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2971 DAG.getConstant(32, dl, MVT::i32)); 2972 2973 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2974 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2975 VT.isInteger() ? GprIndex : FprIndex, 2976 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2977 MVT::i32)); 2978 2979 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2980 VT.isInteger() ? VAListPtr : FprPtr, 2981 MachinePointerInfo(SV), MVT::i8); 2982 2983 // determine if we should load from reg_save_area or overflow_area 2984 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2985 2986 // increase overflow_area by 4/8 if gpr/fpr > 8 2987 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2988 DAG.getConstant(VT.isInteger() ? 4 : 8, 2989 dl, MVT::i32)); 2990 2991 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2992 OverflowAreaPlusN); 2993 2994 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 2995 MachinePointerInfo(), MVT::i32); 2996 2997 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 2998 } 2999 3000 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 3001 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 3002 3003 // We have to copy the entire va_list struct: 3004 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 3005 return DAG.getMemcpy(Op.getOperand(0), Op, 3006 Op.getOperand(1), Op.getOperand(2), 3007 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 3008 false, MachinePointerInfo(), MachinePointerInfo()); 3009 } 3010 3011 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 3012 SelectionDAG &DAG) const { 3013 return Op.getOperand(0); 3014 } 3015 3016 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 3017 SelectionDAG &DAG) const { 3018 SDValue Chain = Op.getOperand(0); 3019 SDValue Trmp = Op.getOperand(1); // trampoline 3020 SDValue FPtr = Op.getOperand(2); // nested function 3021 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 3022 SDLoc dl(Op); 3023 3024 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3025 bool isPPC64 = (PtrVT == MVT::i64); 3026 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3027 3028 TargetLowering::ArgListTy Args; 3029 TargetLowering::ArgListEntry Entry; 3030 3031 Entry.Ty = IntPtrTy; 3032 Entry.Node = Trmp; Args.push_back(Entry); 3033 3034 // TrampSize == (isPPC64 ? 48 : 40); 3035 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3036 isPPC64 ? MVT::i64 : MVT::i32); 3037 Args.push_back(Entry); 3038 3039 Entry.Node = FPtr; Args.push_back(Entry); 3040 Entry.Node = Nest; Args.push_back(Entry); 3041 3042 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3043 TargetLowering::CallLoweringInfo CLI(DAG); 3044 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3045 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3046 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3047 3048 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3049 return CallResult.second; 3050 } 3051 3052 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3053 MachineFunction &MF = DAG.getMachineFunction(); 3054 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3055 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3056 3057 SDLoc dl(Op); 3058 3059 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 3060 // vastart just stores the address of the VarArgsFrameIndex slot into the 3061 // memory location argument. 3062 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3063 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3064 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3065 MachinePointerInfo(SV)); 3066 } 3067 3068 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3069 // We suppose the given va_list is already allocated. 3070 // 3071 // typedef struct { 3072 // char gpr; /* index into the array of 8 GPRs 3073 // * stored in the register save area 3074 // * gpr=0 corresponds to r3, 3075 // * gpr=1 to r4, etc. 3076 // */ 3077 // char fpr; /* index into the array of 8 FPRs 3078 // * stored in the register save area 3079 // * fpr=0 corresponds to f1, 3080 // * fpr=1 to f2, etc. 3081 // */ 3082 // char *overflow_arg_area; 3083 // /* location on stack that holds 3084 // * the next overflow argument 3085 // */ 3086 // char *reg_save_area; 3087 // /* where r3:r10 and f1:f8 (if saved) 3088 // * are stored 3089 // */ 3090 // } va_list[1]; 3091 3092 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3093 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3094 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3095 PtrVT); 3096 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3097 PtrVT); 3098 3099 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3100 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3101 3102 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3103 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3104 3105 uint64_t FPROffset = 1; 3106 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3107 3108 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3109 3110 // Store first byte : number of int regs 3111 SDValue firstStore = 3112 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3113 MachinePointerInfo(SV), MVT::i8); 3114 uint64_t nextOffset = FPROffset; 3115 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3116 ConstFPROffset); 3117 3118 // Store second byte : number of float regs 3119 SDValue secondStore = 3120 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3121 MachinePointerInfo(SV, nextOffset), MVT::i8); 3122 nextOffset += StackOffset; 3123 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3124 3125 // Store second word : arguments given on stack 3126 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3127 MachinePointerInfo(SV, nextOffset)); 3128 nextOffset += FrameOffset; 3129 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3130 3131 // Store third word : arguments given in registers 3132 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3133 MachinePointerInfo(SV, nextOffset)); 3134 } 3135 3136 #include "PPCGenCallingConv.inc" 3137 3138 // Function whose sole purpose is to kill compiler warnings 3139 // stemming from unused functions included from PPCGenCallingConv.inc. 3140 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 3141 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 3142 } 3143 3144 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 3145 CCValAssign::LocInfo &LocInfo, 3146 ISD::ArgFlagsTy &ArgFlags, 3147 CCState &State) { 3148 return true; 3149 } 3150 3151 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 3152 MVT &LocVT, 3153 CCValAssign::LocInfo &LocInfo, 3154 ISD::ArgFlagsTy &ArgFlags, 3155 CCState &State) { 3156 static const MCPhysReg ArgRegs[] = { 3157 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3158 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3159 }; 3160 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3161 3162 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3163 3164 // Skip one register if the first unallocated register has an even register 3165 // number and there are still argument registers available which have not been 3166 // allocated yet. RegNum is actually an index into ArgRegs, which means we 3167 // need to skip a register if RegNum is odd. 3168 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 3169 State.AllocateReg(ArgRegs[RegNum]); 3170 } 3171 3172 // Always return false here, as this function only makes sure that the first 3173 // unallocated register has an odd register number and does not actually 3174 // allocate a register for the current argument. 3175 return false; 3176 } 3177 3178 bool 3179 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 3180 MVT &LocVT, 3181 CCValAssign::LocInfo &LocInfo, 3182 ISD::ArgFlagsTy &ArgFlags, 3183 CCState &State) { 3184 static const MCPhysReg ArgRegs[] = { 3185 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3186 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3187 }; 3188 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3189 3190 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3191 int RegsLeft = NumArgRegs - RegNum; 3192 3193 // Skip if there is not enough registers left for long double type (4 gpr regs 3194 // in soft float mode) and put long double argument on the stack. 3195 if (RegNum != NumArgRegs && RegsLeft < 4) { 3196 for (int i = 0; i < RegsLeft; i++) { 3197 State.AllocateReg(ArgRegs[RegNum + i]); 3198 } 3199 } 3200 3201 return false; 3202 } 3203 3204 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 3205 MVT &LocVT, 3206 CCValAssign::LocInfo &LocInfo, 3207 ISD::ArgFlagsTy &ArgFlags, 3208 CCState &State) { 3209 static const MCPhysReg ArgRegs[] = { 3210 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3211 PPC::F8 3212 }; 3213 3214 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3215 3216 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3217 3218 // If there is only one Floating-point register left we need to put both f64 3219 // values of a split ppc_fp128 value on the stack. 3220 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 3221 State.AllocateReg(ArgRegs[RegNum]); 3222 } 3223 3224 // Always return false here, as this function only makes sure that the two f64 3225 // values a ppc_fp128 value is split into are both passed in registers or both 3226 // passed on the stack and does not actually allocate a register for the 3227 // current argument. 3228 return false; 3229 } 3230 3231 /// FPR - The set of FP registers that should be allocated for arguments, 3232 /// on Darwin. 3233 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3234 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3235 PPC::F11, PPC::F12, PPC::F13}; 3236 3237 /// QFPR - The set of QPX registers that should be allocated for arguments. 3238 static const MCPhysReg QFPR[] = { 3239 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3240 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3241 3242 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3243 /// the stack. 3244 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3245 unsigned PtrByteSize) { 3246 unsigned ArgSize = ArgVT.getStoreSize(); 3247 if (Flags.isByVal()) 3248 ArgSize = Flags.getByValSize(); 3249 3250 // Round up to multiples of the pointer size, except for array members, 3251 // which are always packed. 3252 if (!Flags.isInConsecutiveRegs()) 3253 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3254 3255 return ArgSize; 3256 } 3257 3258 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3259 /// on the stack. 3260 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3261 ISD::ArgFlagsTy Flags, 3262 unsigned PtrByteSize) { 3263 unsigned Align = PtrByteSize; 3264 3265 // Altivec parameters are padded to a 16 byte boundary. 3266 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3267 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3268 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3269 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3270 Align = 16; 3271 // QPX vector types stored in double-precision are padded to a 32 byte 3272 // boundary. 3273 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3274 Align = 32; 3275 3276 // ByVal parameters are aligned as requested. 3277 if (Flags.isByVal()) { 3278 unsigned BVAlign = Flags.getByValAlign(); 3279 if (BVAlign > PtrByteSize) { 3280 if (BVAlign % PtrByteSize != 0) 3281 llvm_unreachable( 3282 "ByVal alignment is not a multiple of the pointer size"); 3283 3284 Align = BVAlign; 3285 } 3286 } 3287 3288 // Array members are always packed to their original alignment. 3289 if (Flags.isInConsecutiveRegs()) { 3290 // If the array member was split into multiple registers, the first 3291 // needs to be aligned to the size of the full type. (Except for 3292 // ppcf128, which is only aligned as its f64 components.) 3293 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3294 Align = OrigVT.getStoreSize(); 3295 else 3296 Align = ArgVT.getStoreSize(); 3297 } 3298 3299 return Align; 3300 } 3301 3302 /// CalculateStackSlotUsed - Return whether this argument will use its 3303 /// stack slot (instead of being passed in registers). ArgOffset, 3304 /// AvailableFPRs, and AvailableVRs must hold the current argument 3305 /// position, and will be updated to account for this argument. 3306 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3307 ISD::ArgFlagsTy Flags, 3308 unsigned PtrByteSize, 3309 unsigned LinkageSize, 3310 unsigned ParamAreaSize, 3311 unsigned &ArgOffset, 3312 unsigned &AvailableFPRs, 3313 unsigned &AvailableVRs, bool HasQPX) { 3314 bool UseMemory = false; 3315 3316 // Respect alignment of argument on the stack. 3317 unsigned Align = 3318 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3319 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3320 // If there's no space left in the argument save area, we must 3321 // use memory (this check also catches zero-sized arguments). 3322 if (ArgOffset >= LinkageSize + ParamAreaSize) 3323 UseMemory = true; 3324 3325 // Allocate argument on the stack. 3326 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3327 if (Flags.isInConsecutiveRegsLast()) 3328 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3329 // If we overran the argument save area, we must use memory 3330 // (this check catches arguments passed partially in memory) 3331 if (ArgOffset > LinkageSize + ParamAreaSize) 3332 UseMemory = true; 3333 3334 // However, if the argument is actually passed in an FPR or a VR, 3335 // we don't use memory after all. 3336 if (!Flags.isByVal()) { 3337 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3338 // QPX registers overlap with the scalar FP registers. 3339 (HasQPX && (ArgVT == MVT::v4f32 || 3340 ArgVT == MVT::v4f64 || 3341 ArgVT == MVT::v4i1))) 3342 if (AvailableFPRs > 0) { 3343 --AvailableFPRs; 3344 return false; 3345 } 3346 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3347 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3348 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3349 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3350 if (AvailableVRs > 0) { 3351 --AvailableVRs; 3352 return false; 3353 } 3354 } 3355 3356 return UseMemory; 3357 } 3358 3359 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3360 /// ensure minimum alignment required for target. 3361 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3362 unsigned NumBytes) { 3363 unsigned TargetAlign = Lowering->getStackAlignment(); 3364 unsigned AlignMask = TargetAlign - 1; 3365 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3366 return NumBytes; 3367 } 3368 3369 SDValue PPCTargetLowering::LowerFormalArguments( 3370 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3371 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3372 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3373 if (Subtarget.isSVR4ABI()) { 3374 if (Subtarget.isPPC64()) 3375 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 3376 dl, DAG, InVals); 3377 else 3378 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 3379 dl, DAG, InVals); 3380 } else { 3381 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 3382 dl, DAG, InVals); 3383 } 3384 } 3385 3386 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3387 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3388 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3389 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3390 3391 // 32-bit SVR4 ABI Stack Frame Layout: 3392 // +-----------------------------------+ 3393 // +--> | Back chain | 3394 // | +-----------------------------------+ 3395 // | | Floating-point register save area | 3396 // | +-----------------------------------+ 3397 // | | General register save area | 3398 // | +-----------------------------------+ 3399 // | | CR save word | 3400 // | +-----------------------------------+ 3401 // | | VRSAVE save word | 3402 // | +-----------------------------------+ 3403 // | | Alignment padding | 3404 // | +-----------------------------------+ 3405 // | | Vector register save area | 3406 // | +-----------------------------------+ 3407 // | | Local variable space | 3408 // | +-----------------------------------+ 3409 // | | Parameter list area | 3410 // | +-----------------------------------+ 3411 // | | LR save word | 3412 // | +-----------------------------------+ 3413 // SP--> +--- | Back chain | 3414 // +-----------------------------------+ 3415 // 3416 // Specifications: 3417 // System V Application Binary Interface PowerPC Processor Supplement 3418 // AltiVec Technology Programming Interface Manual 3419 3420 MachineFunction &MF = DAG.getMachineFunction(); 3421 MachineFrameInfo &MFI = MF.getFrameInfo(); 3422 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3423 3424 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3425 // Potential tail calls could cause overwriting of argument stack slots. 3426 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3427 (CallConv == CallingConv::Fast)); 3428 unsigned PtrByteSize = 4; 3429 3430 // Assign locations to all of the incoming arguments. 3431 SmallVector<CCValAssign, 16> ArgLocs; 3432 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3433 *DAG.getContext()); 3434 3435 // Reserve space for the linkage area on the stack. 3436 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3437 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3438 if (useSoftFloat() || hasSPE()) 3439 CCInfo.PreAnalyzeFormalArguments(Ins); 3440 3441 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3442 CCInfo.clearWasPPCF128(); 3443 3444 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3445 CCValAssign &VA = ArgLocs[i]; 3446 3447 // Arguments stored in registers. 3448 if (VA.isRegLoc()) { 3449 const TargetRegisterClass *RC; 3450 EVT ValVT = VA.getValVT(); 3451 3452 switch (ValVT.getSimpleVT().SimpleTy) { 3453 default: 3454 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3455 case MVT::i1: 3456 case MVT::i32: 3457 RC = &PPC::GPRCRegClass; 3458 break; 3459 case MVT::f32: 3460 if (Subtarget.hasP8Vector()) 3461 RC = &PPC::VSSRCRegClass; 3462 else if (Subtarget.hasSPE()) 3463 RC = &PPC::SPE4RCRegClass; 3464 else 3465 RC = &PPC::F4RCRegClass; 3466 break; 3467 case MVT::f64: 3468 if (Subtarget.hasVSX()) 3469 RC = &PPC::VSFRCRegClass; 3470 else if (Subtarget.hasSPE()) 3471 RC = &PPC::SPERCRegClass; 3472 else 3473 RC = &PPC::F8RCRegClass; 3474 break; 3475 case MVT::v16i8: 3476 case MVT::v8i16: 3477 case MVT::v4i32: 3478 RC = &PPC::VRRCRegClass; 3479 break; 3480 case MVT::v4f32: 3481 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3482 break; 3483 case MVT::v2f64: 3484 case MVT::v2i64: 3485 RC = &PPC::VRRCRegClass; 3486 break; 3487 case MVT::v4f64: 3488 RC = &PPC::QFRCRegClass; 3489 break; 3490 case MVT::v4i1: 3491 RC = &PPC::QBRCRegClass; 3492 break; 3493 } 3494 3495 // Transform the arguments stored in physical registers into virtual ones. 3496 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3497 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3498 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3499 3500 if (ValVT == MVT::i1) 3501 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3502 3503 InVals.push_back(ArgValue); 3504 } else { 3505 // Argument stored in memory. 3506 assert(VA.isMemLoc()); 3507 3508 // Get the extended size of the argument type in stack 3509 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3510 // Get the actual size of the argument type 3511 unsigned ObjSize = VA.getValVT().getStoreSize(); 3512 unsigned ArgOffset = VA.getLocMemOffset(); 3513 // Stack objects in PPC32 are right justified. 3514 ArgOffset += ArgSize - ObjSize; 3515 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); 3516 3517 // Create load nodes to retrieve arguments from the stack. 3518 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3519 InVals.push_back( 3520 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3521 } 3522 } 3523 3524 // Assign locations to all of the incoming aggregate by value arguments. 3525 // Aggregates passed by value are stored in the local variable space of the 3526 // caller's stack frame, right above the parameter list area. 3527 SmallVector<CCValAssign, 16> ByValArgLocs; 3528 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3529 ByValArgLocs, *DAG.getContext()); 3530 3531 // Reserve stack space for the allocations in CCInfo. 3532 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3533 3534 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3535 3536 // Area that is at least reserved in the caller of this function. 3537 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3538 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3539 3540 // Set the size that is at least reserved in caller of this function. Tail 3541 // call optimized function's reserved stack space needs to be aligned so that 3542 // taking the difference between two stack areas will result in an aligned 3543 // stack. 3544 MinReservedArea = 3545 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3546 FuncInfo->setMinReservedArea(MinReservedArea); 3547 3548 SmallVector<SDValue, 8> MemOps; 3549 3550 // If the function takes variable number of arguments, make a frame index for 3551 // the start of the first vararg value... for expansion of llvm.va_start. 3552 if (isVarArg) { 3553 static const MCPhysReg GPArgRegs[] = { 3554 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3555 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3556 }; 3557 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3558 3559 static const MCPhysReg FPArgRegs[] = { 3560 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3561 PPC::F8 3562 }; 3563 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3564 3565 if (useSoftFloat() || hasSPE()) 3566 NumFPArgRegs = 0; 3567 3568 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3569 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3570 3571 // Make room for NumGPArgRegs and NumFPArgRegs. 3572 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3573 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3574 3575 FuncInfo->setVarArgsStackOffset( 3576 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3577 CCInfo.getNextStackOffset(), true)); 3578 3579 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3580 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3581 3582 // The fixed integer arguments of a variadic function are stored to the 3583 // VarArgsFrameIndex on the stack so that they may be loaded by 3584 // dereferencing the result of va_next. 3585 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3586 // Get an existing live-in vreg, or add a new one. 3587 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3588 if (!VReg) 3589 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3590 3591 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3592 SDValue Store = 3593 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3594 MemOps.push_back(Store); 3595 // Increment the address by four for the next argument to store 3596 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3597 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3598 } 3599 3600 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3601 // is set. 3602 // The double arguments are stored to the VarArgsFrameIndex 3603 // on the stack. 3604 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3605 // Get an existing live-in vreg, or add a new one. 3606 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3607 if (!VReg) 3608 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3609 3610 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3611 SDValue Store = 3612 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3613 MemOps.push_back(Store); 3614 // Increment the address by eight for the next argument to store 3615 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3616 PtrVT); 3617 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3618 } 3619 } 3620 3621 if (!MemOps.empty()) 3622 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3623 3624 return Chain; 3625 } 3626 3627 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3628 // value to MVT::i64 and then truncate to the correct register size. 3629 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3630 EVT ObjectVT, SelectionDAG &DAG, 3631 SDValue ArgVal, 3632 const SDLoc &dl) const { 3633 if (Flags.isSExt()) 3634 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3635 DAG.getValueType(ObjectVT)); 3636 else if (Flags.isZExt()) 3637 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3638 DAG.getValueType(ObjectVT)); 3639 3640 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3641 } 3642 3643 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3644 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3645 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3646 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3647 // TODO: add description of PPC stack frame format, or at least some docs. 3648 // 3649 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3650 bool isLittleEndian = Subtarget.isLittleEndian(); 3651 MachineFunction &MF = DAG.getMachineFunction(); 3652 MachineFrameInfo &MFI = MF.getFrameInfo(); 3653 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3654 3655 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3656 "fastcc not supported on varargs functions"); 3657 3658 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3659 // Potential tail calls could cause overwriting of argument stack slots. 3660 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3661 (CallConv == CallingConv::Fast)); 3662 unsigned PtrByteSize = 8; 3663 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3664 3665 static const MCPhysReg GPR[] = { 3666 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3667 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3668 }; 3669 static const MCPhysReg VR[] = { 3670 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3671 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3672 }; 3673 3674 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3675 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3676 const unsigned Num_VR_Regs = array_lengthof(VR); 3677 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3678 3679 // Do a first pass over the arguments to determine whether the ABI 3680 // guarantees that our caller has allocated the parameter save area 3681 // on its stack frame. In the ELFv1 ABI, this is always the case; 3682 // in the ELFv2 ABI, it is true if this is a vararg function or if 3683 // any parameter is located in a stack slot. 3684 3685 bool HasParameterArea = !isELFv2ABI || isVarArg; 3686 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3687 unsigned NumBytes = LinkageSize; 3688 unsigned AvailableFPRs = Num_FPR_Regs; 3689 unsigned AvailableVRs = Num_VR_Regs; 3690 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3691 if (Ins[i].Flags.isNest()) 3692 continue; 3693 3694 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3695 PtrByteSize, LinkageSize, ParamAreaSize, 3696 NumBytes, AvailableFPRs, AvailableVRs, 3697 Subtarget.hasQPX())) 3698 HasParameterArea = true; 3699 } 3700 3701 // Add DAG nodes to load the arguments or copy them out of registers. On 3702 // entry to a function on PPC, the arguments start after the linkage area, 3703 // although the first ones are often in registers. 3704 3705 unsigned ArgOffset = LinkageSize; 3706 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3707 unsigned &QFPR_idx = FPR_idx; 3708 SmallVector<SDValue, 8> MemOps; 3709 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3710 unsigned CurArgIdx = 0; 3711 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3712 SDValue ArgVal; 3713 bool needsLoad = false; 3714 EVT ObjectVT = Ins[ArgNo].VT; 3715 EVT OrigVT = Ins[ArgNo].ArgVT; 3716 unsigned ObjSize = ObjectVT.getStoreSize(); 3717 unsigned ArgSize = ObjSize; 3718 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3719 if (Ins[ArgNo].isOrigArg()) { 3720 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3721 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3722 } 3723 // We re-align the argument offset for each argument, except when using the 3724 // fast calling convention, when we need to make sure we do that only when 3725 // we'll actually use a stack slot. 3726 unsigned CurArgOffset, Align; 3727 auto ComputeArgOffset = [&]() { 3728 /* Respect alignment of argument on the stack. */ 3729 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3730 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3731 CurArgOffset = ArgOffset; 3732 }; 3733 3734 if (CallConv != CallingConv::Fast) { 3735 ComputeArgOffset(); 3736 3737 /* Compute GPR index associated with argument offset. */ 3738 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3739 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3740 } 3741 3742 // FIXME the codegen can be much improved in some cases. 3743 // We do not have to keep everything in memory. 3744 if (Flags.isByVal()) { 3745 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3746 3747 if (CallConv == CallingConv::Fast) 3748 ComputeArgOffset(); 3749 3750 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3751 ObjSize = Flags.getByValSize(); 3752 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3753 // Empty aggregate parameters do not take up registers. Examples: 3754 // struct { } a; 3755 // union { } b; 3756 // int c[0]; 3757 // etc. However, we have to provide a place-holder in InVals, so 3758 // pretend we have an 8-byte item at the current address for that 3759 // purpose. 3760 if (!ObjSize) { 3761 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3762 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3763 InVals.push_back(FIN); 3764 continue; 3765 } 3766 3767 // Create a stack object covering all stack doublewords occupied 3768 // by the argument. If the argument is (fully or partially) on 3769 // the stack, or if the argument is fully in registers but the 3770 // caller has allocated the parameter save anyway, we can refer 3771 // directly to the caller's stack frame. Otherwise, create a 3772 // local copy in our own frame. 3773 int FI; 3774 if (HasParameterArea || 3775 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3776 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3777 else 3778 FI = MFI.CreateStackObject(ArgSize, Align, false); 3779 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3780 3781 // Handle aggregates smaller than 8 bytes. 3782 if (ObjSize < PtrByteSize) { 3783 // The value of the object is its address, which differs from the 3784 // address of the enclosing doubleword on big-endian systems. 3785 SDValue Arg = FIN; 3786 if (!isLittleEndian) { 3787 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3788 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3789 } 3790 InVals.push_back(Arg); 3791 3792 if (GPR_idx != Num_GPR_Regs) { 3793 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3794 FuncInfo->addLiveInAttr(VReg, Flags); 3795 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3796 SDValue Store; 3797 3798 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3799 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3800 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3801 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3802 MachinePointerInfo(&*FuncArg), ObjType); 3803 } else { 3804 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3805 // store the whole register as-is to the parameter save area 3806 // slot. 3807 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3808 MachinePointerInfo(&*FuncArg)); 3809 } 3810 3811 MemOps.push_back(Store); 3812 } 3813 // Whether we copied from a register or not, advance the offset 3814 // into the parameter save area by a full doubleword. 3815 ArgOffset += PtrByteSize; 3816 continue; 3817 } 3818 3819 // The value of the object is its address, which is the address of 3820 // its first stack doubleword. 3821 InVals.push_back(FIN); 3822 3823 // Store whatever pieces of the object are in registers to memory. 3824 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3825 if (GPR_idx == Num_GPR_Regs) 3826 break; 3827 3828 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3829 FuncInfo->addLiveInAttr(VReg, Flags); 3830 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3831 SDValue Addr = FIN; 3832 if (j) { 3833 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3834 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3835 } 3836 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3837 MachinePointerInfo(&*FuncArg, j)); 3838 MemOps.push_back(Store); 3839 ++GPR_idx; 3840 } 3841 ArgOffset += ArgSize; 3842 continue; 3843 } 3844 3845 switch (ObjectVT.getSimpleVT().SimpleTy) { 3846 default: llvm_unreachable("Unhandled argument type!"); 3847 case MVT::i1: 3848 case MVT::i32: 3849 case MVT::i64: 3850 if (Flags.isNest()) { 3851 // The 'nest' parameter, if any, is passed in R11. 3852 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3853 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3854 3855 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3856 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3857 3858 break; 3859 } 3860 3861 // These can be scalar arguments or elements of an integer array type 3862 // passed directly. Clang may use those instead of "byval" aggregate 3863 // types to avoid forcing arguments to memory unnecessarily. 3864 if (GPR_idx != Num_GPR_Regs) { 3865 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3866 FuncInfo->addLiveInAttr(VReg, Flags); 3867 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3868 3869 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3870 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3871 // value to MVT::i64 and then truncate to the correct register size. 3872 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3873 } else { 3874 if (CallConv == CallingConv::Fast) 3875 ComputeArgOffset(); 3876 3877 needsLoad = true; 3878 ArgSize = PtrByteSize; 3879 } 3880 if (CallConv != CallingConv::Fast || needsLoad) 3881 ArgOffset += 8; 3882 break; 3883 3884 case MVT::f32: 3885 case MVT::f64: 3886 // These can be scalar arguments or elements of a float array type 3887 // passed directly. The latter are used to implement ELFv2 homogenous 3888 // float aggregates. 3889 if (FPR_idx != Num_FPR_Regs) { 3890 unsigned VReg; 3891 3892 if (ObjectVT == MVT::f32) 3893 VReg = MF.addLiveIn(FPR[FPR_idx], 3894 Subtarget.hasP8Vector() 3895 ? &PPC::VSSRCRegClass 3896 : &PPC::F4RCRegClass); 3897 else 3898 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3899 ? &PPC::VSFRCRegClass 3900 : &PPC::F8RCRegClass); 3901 3902 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3903 ++FPR_idx; 3904 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3905 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3906 // once we support fp <-> gpr moves. 3907 3908 // This can only ever happen in the presence of f32 array types, 3909 // since otherwise we never run out of FPRs before running out 3910 // of GPRs. 3911 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3912 FuncInfo->addLiveInAttr(VReg, Flags); 3913 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3914 3915 if (ObjectVT == MVT::f32) { 3916 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3917 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3918 DAG.getConstant(32, dl, MVT::i32)); 3919 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3920 } 3921 3922 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3923 } else { 3924 if (CallConv == CallingConv::Fast) 3925 ComputeArgOffset(); 3926 3927 needsLoad = true; 3928 } 3929 3930 // When passing an array of floats, the array occupies consecutive 3931 // space in the argument area; only round up to the next doubleword 3932 // at the end of the array. Otherwise, each float takes 8 bytes. 3933 if (CallConv != CallingConv::Fast || needsLoad) { 3934 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3935 ArgOffset += ArgSize; 3936 if (Flags.isInConsecutiveRegsLast()) 3937 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3938 } 3939 break; 3940 case MVT::v4f32: 3941 case MVT::v4i32: 3942 case MVT::v8i16: 3943 case MVT::v16i8: 3944 case MVT::v2f64: 3945 case MVT::v2i64: 3946 case MVT::v1i128: 3947 case MVT::f128: 3948 if (!Subtarget.hasQPX()) { 3949 // These can be scalar arguments or elements of a vector array type 3950 // passed directly. The latter are used to implement ELFv2 homogenous 3951 // vector aggregates. 3952 if (VR_idx != Num_VR_Regs) { 3953 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3954 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3955 ++VR_idx; 3956 } else { 3957 if (CallConv == CallingConv::Fast) 3958 ComputeArgOffset(); 3959 needsLoad = true; 3960 } 3961 if (CallConv != CallingConv::Fast || needsLoad) 3962 ArgOffset += 16; 3963 break; 3964 } // not QPX 3965 3966 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3967 "Invalid QPX parameter type"); 3968 /* fall through */ 3969 3970 case MVT::v4f64: 3971 case MVT::v4i1: 3972 // QPX vectors are treated like their scalar floating-point subregisters 3973 // (except that they're larger). 3974 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3975 if (QFPR_idx != Num_QFPR_Regs) { 3976 const TargetRegisterClass *RC; 3977 switch (ObjectVT.getSimpleVT().SimpleTy) { 3978 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3979 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3980 default: RC = &PPC::QBRCRegClass; break; 3981 } 3982 3983 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3984 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3985 ++QFPR_idx; 3986 } else { 3987 if (CallConv == CallingConv::Fast) 3988 ComputeArgOffset(); 3989 needsLoad = true; 3990 } 3991 if (CallConv != CallingConv::Fast || needsLoad) 3992 ArgOffset += Sz; 3993 break; 3994 } 3995 3996 // We need to load the argument to a virtual register if we determined 3997 // above that we ran out of physical registers of the appropriate type. 3998 if (needsLoad) { 3999 if (ObjSize < ArgSize && !isLittleEndian) 4000 CurArgOffset += ArgSize - ObjSize; 4001 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 4002 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4003 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4004 } 4005 4006 InVals.push_back(ArgVal); 4007 } 4008 4009 // Area that is at least reserved in the caller of this function. 4010 unsigned MinReservedArea; 4011 if (HasParameterArea) 4012 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 4013 else 4014 MinReservedArea = LinkageSize; 4015 4016 // Set the size that is at least reserved in caller of this function. Tail 4017 // call optimized functions' reserved stack space needs to be aligned so that 4018 // taking the difference between two stack areas will result in an aligned 4019 // stack. 4020 MinReservedArea = 4021 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4022 FuncInfo->setMinReservedArea(MinReservedArea); 4023 4024 // If the function takes variable number of arguments, make a frame index for 4025 // the start of the first vararg value... for expansion of llvm.va_start. 4026 if (isVarArg) { 4027 int Depth = ArgOffset; 4028 4029 FuncInfo->setVarArgsFrameIndex( 4030 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4031 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4032 4033 // If this function is vararg, store any remaining integer argument regs 4034 // to their spots on the stack so that they may be loaded by dereferencing 4035 // the result of va_next. 4036 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4037 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4038 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4039 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4040 SDValue Store = 4041 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4042 MemOps.push_back(Store); 4043 // Increment the address by four for the next argument to store 4044 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4045 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4046 } 4047 } 4048 4049 if (!MemOps.empty()) 4050 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4051 4052 return Chain; 4053 } 4054 4055 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 4056 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4057 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4058 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4059 // TODO: add description of PPC stack frame format, or at least some docs. 4060 // 4061 MachineFunction &MF = DAG.getMachineFunction(); 4062 MachineFrameInfo &MFI = MF.getFrameInfo(); 4063 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4064 4065 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4066 bool isPPC64 = PtrVT == MVT::i64; 4067 // Potential tail calls could cause overwriting of argument stack slots. 4068 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4069 (CallConv == CallingConv::Fast)); 4070 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4071 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4072 unsigned ArgOffset = LinkageSize; 4073 // Area that is at least reserved in caller of this function. 4074 unsigned MinReservedArea = ArgOffset; 4075 4076 static const MCPhysReg GPR_32[] = { // 32-bit registers. 4077 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4078 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4079 }; 4080 static const MCPhysReg GPR_64[] = { // 64-bit registers. 4081 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4082 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4083 }; 4084 static const MCPhysReg VR[] = { 4085 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4086 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4087 }; 4088 4089 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 4090 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4091 const unsigned Num_VR_Regs = array_lengthof( VR); 4092 4093 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4094 4095 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 4096 4097 // In 32-bit non-varargs functions, the stack space for vectors is after the 4098 // stack space for non-vectors. We do not use this space unless we have 4099 // too many vectors to fit in registers, something that only occurs in 4100 // constructed examples:), but we have to walk the arglist to figure 4101 // that out...for the pathological case, compute VecArgOffset as the 4102 // start of the vector parameter area. Computing VecArgOffset is the 4103 // entire point of the following loop. 4104 unsigned VecArgOffset = ArgOffset; 4105 if (!isVarArg && !isPPC64) { 4106 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 4107 ++ArgNo) { 4108 EVT ObjectVT = Ins[ArgNo].VT; 4109 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4110 4111 if (Flags.isByVal()) { 4112 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 4113 unsigned ObjSize = Flags.getByValSize(); 4114 unsigned ArgSize = 4115 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4116 VecArgOffset += ArgSize; 4117 continue; 4118 } 4119 4120 switch(ObjectVT.getSimpleVT().SimpleTy) { 4121 default: llvm_unreachable("Unhandled argument type!"); 4122 case MVT::i1: 4123 case MVT::i32: 4124 case MVT::f32: 4125 VecArgOffset += 4; 4126 break; 4127 case MVT::i64: // PPC64 4128 case MVT::f64: 4129 // FIXME: We are guaranteed to be !isPPC64 at this point. 4130 // Does MVT::i64 apply? 4131 VecArgOffset += 8; 4132 break; 4133 case MVT::v4f32: 4134 case MVT::v4i32: 4135 case MVT::v8i16: 4136 case MVT::v16i8: 4137 // Nothing to do, we're only looking at Nonvector args here. 4138 break; 4139 } 4140 } 4141 } 4142 // We've found where the vector parameter area in memory is. Skip the 4143 // first 12 parameters; these don't use that memory. 4144 VecArgOffset = ((VecArgOffset+15)/16)*16; 4145 VecArgOffset += 12*16; 4146 4147 // Add DAG nodes to load the arguments or copy them out of registers. On 4148 // entry to a function on PPC, the arguments start after the linkage area, 4149 // although the first ones are often in registers. 4150 4151 SmallVector<SDValue, 8> MemOps; 4152 unsigned nAltivecParamsAtEnd = 0; 4153 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4154 unsigned CurArgIdx = 0; 4155 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4156 SDValue ArgVal; 4157 bool needsLoad = false; 4158 EVT ObjectVT = Ins[ArgNo].VT; 4159 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 4160 unsigned ArgSize = ObjSize; 4161 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4162 if (Ins[ArgNo].isOrigArg()) { 4163 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4164 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4165 } 4166 unsigned CurArgOffset = ArgOffset; 4167 4168 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4169 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4170 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4171 if (isVarArg || isPPC64) { 4172 MinReservedArea = ((MinReservedArea+15)/16)*16; 4173 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4174 Flags, 4175 PtrByteSize); 4176 } else nAltivecParamsAtEnd++; 4177 } else 4178 // Calculate min reserved area. 4179 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4180 Flags, 4181 PtrByteSize); 4182 4183 // FIXME the codegen can be much improved in some cases. 4184 // We do not have to keep everything in memory. 4185 if (Flags.isByVal()) { 4186 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4187 4188 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4189 ObjSize = Flags.getByValSize(); 4190 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4191 // Objects of size 1 and 2 are right justified, everything else is 4192 // left justified. This means the memory address is adjusted forwards. 4193 if (ObjSize==1 || ObjSize==2) { 4194 CurArgOffset = CurArgOffset + (4 - ObjSize); 4195 } 4196 // The value of the object is its address. 4197 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4198 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4199 InVals.push_back(FIN); 4200 if (ObjSize==1 || ObjSize==2) { 4201 if (GPR_idx != Num_GPR_Regs) { 4202 unsigned VReg; 4203 if (isPPC64) 4204 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4205 else 4206 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4207 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4208 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4209 SDValue Store = 4210 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4211 MachinePointerInfo(&*FuncArg), ObjType); 4212 MemOps.push_back(Store); 4213 ++GPR_idx; 4214 } 4215 4216 ArgOffset += PtrByteSize; 4217 4218 continue; 4219 } 4220 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4221 // Store whatever pieces of the object are in registers 4222 // to memory. ArgOffset will be the address of the beginning 4223 // of the object. 4224 if (GPR_idx != Num_GPR_Regs) { 4225 unsigned VReg; 4226 if (isPPC64) 4227 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4228 else 4229 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4230 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4231 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4232 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4233 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4234 MachinePointerInfo(&*FuncArg, j)); 4235 MemOps.push_back(Store); 4236 ++GPR_idx; 4237 ArgOffset += PtrByteSize; 4238 } else { 4239 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4240 break; 4241 } 4242 } 4243 continue; 4244 } 4245 4246 switch (ObjectVT.getSimpleVT().SimpleTy) { 4247 default: llvm_unreachable("Unhandled argument type!"); 4248 case MVT::i1: 4249 case MVT::i32: 4250 if (!isPPC64) { 4251 if (GPR_idx != Num_GPR_Regs) { 4252 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4253 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4254 4255 if (ObjectVT == MVT::i1) 4256 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4257 4258 ++GPR_idx; 4259 } else { 4260 needsLoad = true; 4261 ArgSize = PtrByteSize; 4262 } 4263 // All int arguments reserve stack space in the Darwin ABI. 4264 ArgOffset += PtrByteSize; 4265 break; 4266 } 4267 LLVM_FALLTHROUGH; 4268 case MVT::i64: // PPC64 4269 if (GPR_idx != Num_GPR_Regs) { 4270 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4271 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4272 4273 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4274 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4275 // value to MVT::i64 and then truncate to the correct register size. 4276 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4277 4278 ++GPR_idx; 4279 } else { 4280 needsLoad = true; 4281 ArgSize = PtrByteSize; 4282 } 4283 // All int arguments reserve stack space in the Darwin ABI. 4284 ArgOffset += 8; 4285 break; 4286 4287 case MVT::f32: 4288 case MVT::f64: 4289 // Every 4 bytes of argument space consumes one of the GPRs available for 4290 // argument passing. 4291 if (GPR_idx != Num_GPR_Regs) { 4292 ++GPR_idx; 4293 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4294 ++GPR_idx; 4295 } 4296 if (FPR_idx != Num_FPR_Regs) { 4297 unsigned VReg; 4298 4299 if (ObjectVT == MVT::f32) 4300 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4301 else 4302 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4303 4304 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4305 ++FPR_idx; 4306 } else { 4307 needsLoad = true; 4308 } 4309 4310 // All FP arguments reserve stack space in the Darwin ABI. 4311 ArgOffset += isPPC64 ? 8 : ObjSize; 4312 break; 4313 case MVT::v4f32: 4314 case MVT::v4i32: 4315 case MVT::v8i16: 4316 case MVT::v16i8: 4317 // Note that vector arguments in registers don't reserve stack space, 4318 // except in varargs functions. 4319 if (VR_idx != Num_VR_Regs) { 4320 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4321 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4322 if (isVarArg) { 4323 while ((ArgOffset % 16) != 0) { 4324 ArgOffset += PtrByteSize; 4325 if (GPR_idx != Num_GPR_Regs) 4326 GPR_idx++; 4327 } 4328 ArgOffset += 16; 4329 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4330 } 4331 ++VR_idx; 4332 } else { 4333 if (!isVarArg && !isPPC64) { 4334 // Vectors go after all the nonvectors. 4335 CurArgOffset = VecArgOffset; 4336 VecArgOffset += 16; 4337 } else { 4338 // Vectors are aligned. 4339 ArgOffset = ((ArgOffset+15)/16)*16; 4340 CurArgOffset = ArgOffset; 4341 ArgOffset += 16; 4342 } 4343 needsLoad = true; 4344 } 4345 break; 4346 } 4347 4348 // We need to load the argument to a virtual register if we determined above 4349 // that we ran out of physical registers of the appropriate type. 4350 if (needsLoad) { 4351 int FI = MFI.CreateFixedObject(ObjSize, 4352 CurArgOffset + (ArgSize - ObjSize), 4353 isImmutable); 4354 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4355 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4356 } 4357 4358 InVals.push_back(ArgVal); 4359 } 4360 4361 // Allow for Altivec parameters at the end, if needed. 4362 if (nAltivecParamsAtEnd) { 4363 MinReservedArea = ((MinReservedArea+15)/16)*16; 4364 MinReservedArea += 16*nAltivecParamsAtEnd; 4365 } 4366 4367 // Area that is at least reserved in the caller of this function. 4368 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4369 4370 // Set the size that is at least reserved in caller of this function. Tail 4371 // call optimized functions' reserved stack space needs to be aligned so that 4372 // taking the difference between two stack areas will result in an aligned 4373 // stack. 4374 MinReservedArea = 4375 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4376 FuncInfo->setMinReservedArea(MinReservedArea); 4377 4378 // If the function takes variable number of arguments, make a frame index for 4379 // the start of the first vararg value... for expansion of llvm.va_start. 4380 if (isVarArg) { 4381 int Depth = ArgOffset; 4382 4383 FuncInfo->setVarArgsFrameIndex( 4384 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4385 Depth, true)); 4386 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4387 4388 // If this function is vararg, store any remaining integer argument regs 4389 // to their spots on the stack so that they may be loaded by dereferencing 4390 // the result of va_next. 4391 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4392 unsigned VReg; 4393 4394 if (isPPC64) 4395 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4396 else 4397 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4398 4399 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4400 SDValue Store = 4401 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4402 MemOps.push_back(Store); 4403 // Increment the address by four for the next argument to store 4404 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4405 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4406 } 4407 } 4408 4409 if (!MemOps.empty()) 4410 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4411 4412 return Chain; 4413 } 4414 4415 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4416 /// adjusted to accommodate the arguments for the tailcall. 4417 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4418 unsigned ParamSize) { 4419 4420 if (!isTailCall) return 0; 4421 4422 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4423 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4424 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4425 // Remember only if the new adjustment is bigger. 4426 if (SPDiff < FI->getTailCallSPDelta()) 4427 FI->setTailCallSPDelta(SPDiff); 4428 4429 return SPDiff; 4430 } 4431 4432 static bool isFunctionGlobalAddress(SDValue Callee); 4433 4434 static bool 4435 callsShareTOCBase(const Function *Caller, SDValue Callee, 4436 const TargetMachine &TM) { 4437 // If !G, Callee can be an external symbol. 4438 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4439 if (!G) 4440 return false; 4441 4442 // The medium and large code models are expected to provide a sufficiently 4443 // large TOC to provide all data addressing needs of a module with a 4444 // single TOC. Since each module will be addressed with a single TOC then we 4445 // only need to check that caller and callee don't cross dso boundaries. 4446 if (CodeModel::Medium == TM.getCodeModel() || 4447 CodeModel::Large == TM.getCodeModel()) 4448 return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal()); 4449 4450 // Otherwise we need to ensure callee and caller are in the same section, 4451 // since the linker may allocate multiple TOCs, and we don't know which 4452 // sections will belong to the same TOC base. 4453 4454 const GlobalValue *GV = G->getGlobal(); 4455 if (!GV->isStrongDefinitionForLinker()) 4456 return false; 4457 4458 // Any explicitly-specified sections and section prefixes must also match. 4459 // Also, if we're using -ffunction-sections, then each function is always in 4460 // a different section (the same is true for COMDAT functions). 4461 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4462 GV->getSection() != Caller->getSection()) 4463 return false; 4464 if (const auto *F = dyn_cast<Function>(GV)) { 4465 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4466 return false; 4467 } 4468 4469 // If the callee might be interposed, then we can't assume the ultimate call 4470 // target will be in the same section. Even in cases where we can assume that 4471 // interposition won't happen, in any case where the linker might insert a 4472 // stub to allow for interposition, we must generate code as though 4473 // interposition might occur. To understand why this matters, consider a 4474 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4475 // in the same section, but a is in a different module (i.e. has a different 4476 // TOC base pointer). If the linker allows for interposition between b and c, 4477 // then it will generate a stub for the call edge between b and c which will 4478 // save the TOC pointer into the designated stack slot allocated by b. If we 4479 // return true here, and therefore allow a tail call between b and c, that 4480 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4481 // pointer into the stack slot allocated by a (where the a -> b stub saved 4482 // a's TOC base pointer). If we're not considering a tail call, but rather, 4483 // whether a nop is needed after the call instruction in b, because the linker 4484 // will insert a stub, it might complain about a missing nop if we omit it 4485 // (although many don't complain in this case). 4486 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4487 return false; 4488 4489 return true; 4490 } 4491 4492 static bool 4493 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4494 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4495 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 4496 4497 const unsigned PtrByteSize = 8; 4498 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4499 4500 static const MCPhysReg GPR[] = { 4501 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4502 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4503 }; 4504 static const MCPhysReg VR[] = { 4505 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4506 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4507 }; 4508 4509 const unsigned NumGPRs = array_lengthof(GPR); 4510 const unsigned NumFPRs = 13; 4511 const unsigned NumVRs = array_lengthof(VR); 4512 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4513 4514 unsigned NumBytes = LinkageSize; 4515 unsigned AvailableFPRs = NumFPRs; 4516 unsigned AvailableVRs = NumVRs; 4517 4518 for (const ISD::OutputArg& Param : Outs) { 4519 if (Param.Flags.isNest()) continue; 4520 4521 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4522 PtrByteSize, LinkageSize, ParamAreaSize, 4523 NumBytes, AvailableFPRs, AvailableVRs, 4524 Subtarget.hasQPX())) 4525 return true; 4526 } 4527 return false; 4528 } 4529 4530 static bool 4531 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4532 if (CS.arg_size() != CallerFn->arg_size()) 4533 return false; 4534 4535 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4536 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4537 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4538 4539 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4540 const Value* CalleeArg = *CalleeArgIter; 4541 const Value* CallerArg = &(*CallerArgIter); 4542 if (CalleeArg == CallerArg) 4543 continue; 4544 4545 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4546 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4547 // } 4548 // 1st argument of callee is undef and has the same type as caller. 4549 if (CalleeArg->getType() == CallerArg->getType() && 4550 isa<UndefValue>(CalleeArg)) 4551 continue; 4552 4553 return false; 4554 } 4555 4556 return true; 4557 } 4558 4559 // Returns true if TCO is possible between the callers and callees 4560 // calling conventions. 4561 static bool 4562 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4563 CallingConv::ID CalleeCC) { 4564 // Tail calls are possible with fastcc and ccc. 4565 auto isTailCallableCC = [] (CallingConv::ID CC){ 4566 return CC == CallingConv::C || CC == CallingConv::Fast; 4567 }; 4568 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4569 return false; 4570 4571 // We can safely tail call both fastcc and ccc callees from a c calling 4572 // convention caller. If the caller is fastcc, we may have less stack space 4573 // than a non-fastcc caller with the same signature so disable tail-calls in 4574 // that case. 4575 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4576 } 4577 4578 bool 4579 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4580 SDValue Callee, 4581 CallingConv::ID CalleeCC, 4582 ImmutableCallSite CS, 4583 bool isVarArg, 4584 const SmallVectorImpl<ISD::OutputArg> &Outs, 4585 const SmallVectorImpl<ISD::InputArg> &Ins, 4586 SelectionDAG& DAG) const { 4587 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4588 4589 if (DisableSCO && !TailCallOpt) return false; 4590 4591 // Variadic argument functions are not supported. 4592 if (isVarArg) return false; 4593 4594 auto &Caller = DAG.getMachineFunction().getFunction(); 4595 // Check that the calling conventions are compatible for tco. 4596 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4597 return false; 4598 4599 // Caller contains any byval parameter is not supported. 4600 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4601 return false; 4602 4603 // Callee contains any byval parameter is not supported, too. 4604 // Note: This is a quick work around, because in some cases, e.g. 4605 // caller's stack size > callee's stack size, we are still able to apply 4606 // sibling call optimization. For example, gcc is able to do SCO for caller1 4607 // in the following example, but not for caller2. 4608 // struct test { 4609 // long int a; 4610 // char ary[56]; 4611 // } gTest; 4612 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4613 // b->a = v.a; 4614 // return 0; 4615 // } 4616 // void caller1(struct test a, struct test c, struct test *b) { 4617 // callee(gTest, b); } 4618 // void caller2(struct test *b) { callee(gTest, b); } 4619 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4620 return false; 4621 4622 // If callee and caller use different calling conventions, we cannot pass 4623 // parameters on stack since offsets for the parameter area may be different. 4624 if (Caller.getCallingConv() != CalleeCC && 4625 needStackSlotPassParameters(Subtarget, Outs)) 4626 return false; 4627 4628 // No TCO/SCO on indirect call because Caller have to restore its TOC 4629 if (!isFunctionGlobalAddress(Callee) && 4630 !isa<ExternalSymbolSDNode>(Callee)) 4631 return false; 4632 4633 // If the caller and callee potentially have different TOC bases then we 4634 // cannot tail call since we need to restore the TOC pointer after the call. 4635 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4636 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4637 return false; 4638 4639 // TCO allows altering callee ABI, so we don't have to check further. 4640 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4641 return true; 4642 4643 if (DisableSCO) return false; 4644 4645 // If callee use the same argument list that caller is using, then we can 4646 // apply SCO on this case. If it is not, then we need to check if callee needs 4647 // stack for passing arguments. 4648 if (!hasSameArgumentList(&Caller, CS) && 4649 needStackSlotPassParameters(Subtarget, Outs)) { 4650 return false; 4651 } 4652 4653 return true; 4654 } 4655 4656 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4657 /// for tail call optimization. Targets which want to do tail call 4658 /// optimization should implement this function. 4659 bool 4660 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4661 CallingConv::ID CalleeCC, 4662 bool isVarArg, 4663 const SmallVectorImpl<ISD::InputArg> &Ins, 4664 SelectionDAG& DAG) const { 4665 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4666 return false; 4667 4668 // Variable argument functions are not supported. 4669 if (isVarArg) 4670 return false; 4671 4672 MachineFunction &MF = DAG.getMachineFunction(); 4673 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4674 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4675 // Functions containing by val parameters are not supported. 4676 for (unsigned i = 0; i != Ins.size(); i++) { 4677 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4678 if (Flags.isByVal()) return false; 4679 } 4680 4681 // Non-PIC/GOT tail calls are supported. 4682 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4683 return true; 4684 4685 // At the moment we can only do local tail calls (in same module, hidden 4686 // or protected) if we are generating PIC. 4687 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4688 return G->getGlobal()->hasHiddenVisibility() 4689 || G->getGlobal()->hasProtectedVisibility(); 4690 } 4691 4692 return false; 4693 } 4694 4695 /// isCallCompatibleAddress - Return the immediate to use if the specified 4696 /// 32-bit value is representable in the immediate field of a BxA instruction. 4697 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4698 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4699 if (!C) return nullptr; 4700 4701 int Addr = C->getZExtValue(); 4702 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4703 SignExtend32<26>(Addr) != Addr) 4704 return nullptr; // Top 6 bits have to be sext of immediate. 4705 4706 return DAG 4707 .getConstant( 4708 (int)C->getZExtValue() >> 2, SDLoc(Op), 4709 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4710 .getNode(); 4711 } 4712 4713 namespace { 4714 4715 struct TailCallArgumentInfo { 4716 SDValue Arg; 4717 SDValue FrameIdxOp; 4718 int FrameIdx = 0; 4719 4720 TailCallArgumentInfo() = default; 4721 }; 4722 4723 } // end anonymous namespace 4724 4725 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4726 static void StoreTailCallArgumentsToStackSlot( 4727 SelectionDAG &DAG, SDValue Chain, 4728 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4729 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4730 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4731 SDValue Arg = TailCallArgs[i].Arg; 4732 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4733 int FI = TailCallArgs[i].FrameIdx; 4734 // Store relative to framepointer. 4735 MemOpChains.push_back(DAG.getStore( 4736 Chain, dl, Arg, FIN, 4737 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4738 } 4739 } 4740 4741 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4742 /// the appropriate stack slot for the tail call optimized function call. 4743 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4744 SDValue OldRetAddr, SDValue OldFP, 4745 int SPDiff, const SDLoc &dl) { 4746 if (SPDiff) { 4747 // Calculate the new stack slot for the return address. 4748 MachineFunction &MF = DAG.getMachineFunction(); 4749 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4750 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4751 bool isPPC64 = Subtarget.isPPC64(); 4752 int SlotSize = isPPC64 ? 8 : 4; 4753 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4754 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4755 NewRetAddrLoc, true); 4756 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4757 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4758 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4759 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4760 4761 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4762 // slot as the FP is never overwritten. 4763 if (Subtarget.isDarwinABI()) { 4764 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4765 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4766 true); 4767 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4768 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4769 MachinePointerInfo::getFixedStack( 4770 DAG.getMachineFunction(), NewFPIdx)); 4771 } 4772 } 4773 return Chain; 4774 } 4775 4776 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4777 /// the position of the argument. 4778 static void 4779 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4780 SDValue Arg, int SPDiff, unsigned ArgOffset, 4781 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4782 int Offset = ArgOffset + SPDiff; 4783 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4784 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4785 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4786 SDValue FIN = DAG.getFrameIndex(FI, VT); 4787 TailCallArgumentInfo Info; 4788 Info.Arg = Arg; 4789 Info.FrameIdxOp = FIN; 4790 Info.FrameIdx = FI; 4791 TailCallArguments.push_back(Info); 4792 } 4793 4794 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4795 /// stack slot. Returns the chain as result and the loaded frame pointers in 4796 /// LROpOut/FPOpout. Used when tail calling. 4797 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4798 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4799 SDValue &FPOpOut, const SDLoc &dl) const { 4800 if (SPDiff) { 4801 // Load the LR and FP stack slot for later adjusting. 4802 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4803 LROpOut = getReturnAddrFrameIndex(DAG); 4804 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4805 Chain = SDValue(LROpOut.getNode(), 1); 4806 4807 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4808 // slot as the FP is never overwritten. 4809 if (Subtarget.isDarwinABI()) { 4810 FPOpOut = getFramePointerFrameIndex(DAG); 4811 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4812 Chain = SDValue(FPOpOut.getNode(), 1); 4813 } 4814 } 4815 return Chain; 4816 } 4817 4818 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4819 /// by "Src" to address "Dst" of size "Size". Alignment information is 4820 /// specified by the specific parameter attribute. The copy will be passed as 4821 /// a byval function parameter. 4822 /// Sometimes what we are copying is the end of a larger object, the part that 4823 /// does not fit in registers. 4824 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4825 SDValue Chain, ISD::ArgFlagsTy Flags, 4826 SelectionDAG &DAG, const SDLoc &dl) { 4827 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4828 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4829 false, false, false, MachinePointerInfo(), 4830 MachinePointerInfo()); 4831 } 4832 4833 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4834 /// tail calls. 4835 static void LowerMemOpCallTo( 4836 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4837 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4838 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4839 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4840 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4841 if (!isTailCall) { 4842 if (isVector) { 4843 SDValue StackPtr; 4844 if (isPPC64) 4845 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4846 else 4847 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4848 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4849 DAG.getConstant(ArgOffset, dl, PtrVT)); 4850 } 4851 MemOpChains.push_back( 4852 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4853 // Calculate and remember argument location. 4854 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4855 TailCallArguments); 4856 } 4857 4858 static void 4859 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4860 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4861 SDValue FPOp, 4862 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4863 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4864 // might overwrite each other in case of tail call optimization. 4865 SmallVector<SDValue, 8> MemOpChains2; 4866 // Do not flag preceding copytoreg stuff together with the following stuff. 4867 InFlag = SDValue(); 4868 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4869 MemOpChains2, dl); 4870 if (!MemOpChains2.empty()) 4871 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4872 4873 // Store the return address to the appropriate stack slot. 4874 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4875 4876 // Emit callseq_end just before tailcall node. 4877 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4878 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4879 InFlag = Chain.getValue(1); 4880 } 4881 4882 // Is this global address that of a function that can be called by name? (as 4883 // opposed to something that must hold a descriptor for an indirect call). 4884 static bool isFunctionGlobalAddress(SDValue Callee) { 4885 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4886 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4887 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4888 return false; 4889 4890 return G->getGlobal()->getValueType()->isFunctionTy(); 4891 } 4892 4893 return false; 4894 } 4895 4896 static unsigned 4897 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4898 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4899 bool isPatchPoint, bool hasNest, 4900 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4901 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4902 ImmutableCallSite CS, const PPCSubtarget &Subtarget) { 4903 bool isPPC64 = Subtarget.isPPC64(); 4904 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4905 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4906 4907 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4908 NodeTys.push_back(MVT::Other); // Returns a chain 4909 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4910 4911 unsigned CallOpc = PPCISD::CALL; 4912 4913 bool needIndirectCall = true; 4914 if (!isSVR4ABI || !isPPC64) 4915 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4916 // If this is an absolute destination address, use the munged value. 4917 Callee = SDValue(Dest, 0); 4918 needIndirectCall = false; 4919 } 4920 4921 // PC-relative references to external symbols should go through $stub, unless 4922 // we're building with the leopard linker or later, which automatically 4923 // synthesizes these stubs. 4924 const TargetMachine &TM = DAG.getTarget(); 4925 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 4926 const GlobalValue *GV = nullptr; 4927 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4928 GV = G->getGlobal(); 4929 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4930 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4931 4932 if (isFunctionGlobalAddress(Callee)) { 4933 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4934 // A call to a TLS address is actually an indirect call to a 4935 // thread-specific pointer. 4936 unsigned OpFlags = 0; 4937 if (UsePlt) 4938 OpFlags = PPCII::MO_PLT; 4939 4940 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4941 // every direct call is) turn it into a TargetGlobalAddress / 4942 // TargetExternalSymbol node so that legalize doesn't hack it. 4943 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4944 Callee.getValueType(), 0, OpFlags); 4945 needIndirectCall = false; 4946 } 4947 4948 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4949 unsigned char OpFlags = 0; 4950 4951 if (UsePlt) 4952 OpFlags = PPCII::MO_PLT; 4953 4954 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4955 OpFlags); 4956 needIndirectCall = false; 4957 } 4958 4959 if (isPatchPoint) { 4960 // We'll form an invalid direct call when lowering a patchpoint; the full 4961 // sequence for an indirect call is complicated, and many of the 4962 // instructions introduced might have side effects (and, thus, can't be 4963 // removed later). The call itself will be removed as soon as the 4964 // argument/return lowering is complete, so the fact that it has the wrong 4965 // kind of operands should not really matter. 4966 needIndirectCall = false; 4967 } 4968 4969 if (needIndirectCall) { 4970 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4971 // to do the call, we can't use PPCISD::CALL. 4972 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4973 4974 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4975 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4976 // entry point, but to the function descriptor (the function entry point 4977 // address is part of the function descriptor though). 4978 // The function descriptor is a three doubleword structure with the 4979 // following fields: function entry point, TOC base address and 4980 // environment pointer. 4981 // Thus for a call through a function pointer, the following actions need 4982 // to be performed: 4983 // 1. Save the TOC of the caller in the TOC save area of its stack 4984 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4985 // 2. Load the address of the function entry point from the function 4986 // descriptor. 4987 // 3. Load the TOC of the callee from the function descriptor into r2. 4988 // 4. Load the environment pointer from the function descriptor into 4989 // r11. 4990 // 5. Branch to the function entry point address. 4991 // 6. On return of the callee, the TOC of the caller needs to be 4992 // restored (this is done in FinishCall()). 4993 // 4994 // The loads are scheduled at the beginning of the call sequence, and the 4995 // register copies are flagged together to ensure that no other 4996 // operations can be scheduled in between. E.g. without flagging the 4997 // copies together, a TOC access in the caller could be scheduled between 4998 // the assignment of the callee TOC and the branch to the callee, which 4999 // results in the TOC access going through the TOC of the callee instead 5000 // of going through the TOC of the caller, which leads to incorrect code. 5001 5002 // Load the address of the function entry point from the function 5003 // descriptor. 5004 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 5005 if (LDChain.getValueType() == MVT::Glue) 5006 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 5007 5008 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 5009 ? (MachineMemOperand::MODereferenceable | 5010 MachineMemOperand::MOInvariant) 5011 : MachineMemOperand::MONone; 5012 5013 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 5014 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 5015 /* Alignment = */ 8, MMOFlags); 5016 5017 // Load environment pointer into r11. 5018 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 5019 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 5020 SDValue LoadEnvPtr = 5021 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 5022 /* Alignment = */ 8, MMOFlags); 5023 5024 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 5025 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 5026 SDValue TOCPtr = 5027 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 5028 /* Alignment = */ 8, MMOFlags); 5029 5030 setUsesTOCBasePtr(DAG); 5031 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 5032 InFlag); 5033 Chain = TOCVal.getValue(0); 5034 InFlag = TOCVal.getValue(1); 5035 5036 // If the function call has an explicit 'nest' parameter, it takes the 5037 // place of the environment pointer. 5038 if (!hasNest) { 5039 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 5040 InFlag); 5041 5042 Chain = EnvVal.getValue(0); 5043 InFlag = EnvVal.getValue(1); 5044 } 5045 5046 MTCTROps[0] = Chain; 5047 MTCTROps[1] = LoadFuncPtr; 5048 MTCTROps[2] = InFlag; 5049 } 5050 5051 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 5052 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 5053 InFlag = Chain.getValue(1); 5054 5055 NodeTys.clear(); 5056 NodeTys.push_back(MVT::Other); 5057 NodeTys.push_back(MVT::Glue); 5058 Ops.push_back(Chain); 5059 CallOpc = PPCISD::BCTRL; 5060 Callee.setNode(nullptr); 5061 // Add use of X11 (holding environment pointer) 5062 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 5063 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 5064 // Add CTR register as callee so a bctr can be emitted later. 5065 if (isTailCall) 5066 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 5067 } 5068 5069 // If this is a direct call, pass the chain and the callee. 5070 if (Callee.getNode()) { 5071 Ops.push_back(Chain); 5072 Ops.push_back(Callee); 5073 } 5074 // If this is a tail call add stack pointer delta. 5075 if (isTailCall) 5076 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5077 5078 // Add argument registers to the end of the list so that they are known live 5079 // into the call. 5080 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5081 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5082 RegsToPass[i].second.getValueType())); 5083 5084 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 5085 // into the call. 5086 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 5087 setUsesTOCBasePtr(DAG); 5088 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 5089 } 5090 5091 return CallOpc; 5092 } 5093 5094 SDValue PPCTargetLowering::LowerCallResult( 5095 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 5096 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5097 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 5098 SmallVector<CCValAssign, 16> RVLocs; 5099 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5100 *DAG.getContext()); 5101 5102 CCRetInfo.AnalyzeCallResult( 5103 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 5104 ? RetCC_PPC_Cold 5105 : RetCC_PPC); 5106 5107 // Copy all of the result registers out of their specified physreg. 5108 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 5109 CCValAssign &VA = RVLocs[i]; 5110 assert(VA.isRegLoc() && "Can only return in registers!"); 5111 5112 SDValue Val = DAG.getCopyFromReg(Chain, dl, 5113 VA.getLocReg(), VA.getLocVT(), InFlag); 5114 Chain = Val.getValue(1); 5115 InFlag = Val.getValue(2); 5116 5117 switch (VA.getLocInfo()) { 5118 default: llvm_unreachable("Unknown loc info!"); 5119 case CCValAssign::Full: break; 5120 case CCValAssign::AExt: 5121 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5122 break; 5123 case CCValAssign::ZExt: 5124 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 5125 DAG.getValueType(VA.getValVT())); 5126 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5127 break; 5128 case CCValAssign::SExt: 5129 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 5130 DAG.getValueType(VA.getValVT())); 5131 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5132 break; 5133 } 5134 5135 InVals.push_back(Val); 5136 } 5137 5138 return Chain; 5139 } 5140 5141 SDValue PPCTargetLowering::FinishCall( 5142 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 5143 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 5144 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 5145 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5146 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5147 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 5148 std::vector<EVT> NodeTys; 5149 SmallVector<SDValue, 8> Ops; 5150 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 5151 SPDiff, isTailCall, isPatchPoint, hasNest, 5152 RegsToPass, Ops, NodeTys, CS, Subtarget); 5153 5154 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5155 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 5156 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5157 5158 // When performing tail call optimization the callee pops its arguments off 5159 // the stack. Account for this here so these bytes can be pushed back on in 5160 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5161 int BytesCalleePops = 5162 (CallConv == CallingConv::Fast && 5163 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 5164 5165 // Add a register mask operand representing the call-preserved registers. 5166 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5167 const uint32_t *Mask = 5168 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 5169 assert(Mask && "Missing call preserved mask for calling convention"); 5170 Ops.push_back(DAG.getRegisterMask(Mask)); 5171 5172 if (InFlag.getNode()) 5173 Ops.push_back(InFlag); 5174 5175 // Emit tail call. 5176 if (isTailCall) { 5177 assert(((Callee.getOpcode() == ISD::Register && 5178 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5179 Callee.getOpcode() == ISD::TargetExternalSymbol || 5180 Callee.getOpcode() == ISD::TargetGlobalAddress || 5181 isa<ConstantSDNode>(Callee)) && 5182 "Expecting an global address, external symbol, absolute value or register"); 5183 5184 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5185 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 5186 } 5187 5188 // Add a NOP immediately after the branch instruction when using the 64-bit 5189 // SVR4 ABI. At link time, if caller and callee are in a different module and 5190 // thus have a different TOC, the call will be replaced with a call to a stub 5191 // function which saves the current TOC, loads the TOC of the callee and 5192 // branches to the callee. The NOP will be replaced with a load instruction 5193 // which restores the TOC of the caller from the TOC save slot of the current 5194 // stack frame. If caller and callee belong to the same module (and have the 5195 // same TOC), the NOP will remain unchanged. 5196 5197 MachineFunction &MF = DAG.getMachineFunction(); 5198 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 5199 !isPatchPoint) { 5200 if (CallOpc == PPCISD::BCTRL) { 5201 // This is a call through a function pointer. 5202 // Restore the caller TOC from the save area into R2. 5203 // See PrepareCall() for more information about calls through function 5204 // pointers in the 64-bit SVR4 ABI. 5205 // We are using a target-specific load with r2 hard coded, because the 5206 // result of a target-independent load would never go directly into r2, 5207 // since r2 is a reserved register (which prevents the register allocator 5208 // from allocating it), resulting in an additional register being 5209 // allocated and an unnecessary move instruction being generated. 5210 CallOpc = PPCISD::BCTRL_LOAD_TOC; 5211 5212 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5213 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 5214 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5215 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5216 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 5217 5218 // The address needs to go after the chain input but before the flag (or 5219 // any other variadic arguments). 5220 Ops.insert(std::next(Ops.begin()), AddTOC); 5221 } else if (CallOpc == PPCISD::CALL && 5222 !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) { 5223 // Otherwise insert NOP for non-local calls. 5224 CallOpc = PPCISD::CALL_NOP; 5225 } 5226 } 5227 5228 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 5229 InFlag = Chain.getValue(1); 5230 5231 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5232 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5233 InFlag, dl); 5234 if (!Ins.empty()) 5235 InFlag = Chain.getValue(1); 5236 5237 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 5238 Ins, dl, DAG, InVals); 5239 } 5240 5241 SDValue 5242 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5243 SmallVectorImpl<SDValue> &InVals) const { 5244 SelectionDAG &DAG = CLI.DAG; 5245 SDLoc &dl = CLI.DL; 5246 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5247 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5248 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5249 SDValue Chain = CLI.Chain; 5250 SDValue Callee = CLI.Callee; 5251 bool &isTailCall = CLI.IsTailCall; 5252 CallingConv::ID CallConv = CLI.CallConv; 5253 bool isVarArg = CLI.IsVarArg; 5254 bool isPatchPoint = CLI.IsPatchPoint; 5255 ImmutableCallSite CS = CLI.CS; 5256 5257 if (isTailCall) { 5258 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5259 isTailCall = false; 5260 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5261 isTailCall = 5262 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5263 isVarArg, Outs, Ins, DAG); 5264 else 5265 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5266 Ins, DAG); 5267 if (isTailCall) { 5268 ++NumTailCalls; 5269 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5270 ++NumSiblingCalls; 5271 5272 assert(isa<GlobalAddressSDNode>(Callee) && 5273 "Callee should be an llvm::Function object."); 5274 LLVM_DEBUG( 5275 const GlobalValue *GV = 5276 cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5277 const unsigned Width = 5278 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0"); 5279 dbgs() << "TCO caller: " 5280 << left_justify(DAG.getMachineFunction().getName(), Width) 5281 << ", callee linkage: " << GV->getVisibility() << ", " 5282 << GV->getLinkage() << "\n"); 5283 } 5284 } 5285 5286 if (!isTailCall && CS && CS.isMustTailCall()) 5287 report_fatal_error("failed to perform tail call elimination on a call " 5288 "site marked musttail"); 5289 5290 // When long calls (i.e. indirect calls) are always used, calls are always 5291 // made via function pointer. If we have a function name, first translate it 5292 // into a pointer. 5293 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5294 !isTailCall) 5295 Callee = LowerGlobalAddress(Callee, DAG); 5296 5297 if (Subtarget.isSVR4ABI()) { 5298 if (Subtarget.isPPC64()) 5299 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5300 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5301 dl, DAG, InVals, CS); 5302 else 5303 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5304 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5305 dl, DAG, InVals, CS); 5306 } 5307 5308 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5309 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5310 dl, DAG, InVals, CS); 5311 } 5312 5313 SDValue PPCTargetLowering::LowerCall_32SVR4( 5314 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5315 bool isTailCall, bool isPatchPoint, 5316 const SmallVectorImpl<ISD::OutputArg> &Outs, 5317 const SmallVectorImpl<SDValue> &OutVals, 5318 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5319 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5320 ImmutableCallSite CS) const { 5321 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5322 // of the 32-bit SVR4 ABI stack frame layout. 5323 5324 assert((CallConv == CallingConv::C || 5325 CallConv == CallingConv::Cold || 5326 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5327 5328 unsigned PtrByteSize = 4; 5329 5330 MachineFunction &MF = DAG.getMachineFunction(); 5331 5332 // Mark this function as potentially containing a function that contains a 5333 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5334 // and restoring the callers stack pointer in this functions epilog. This is 5335 // done because by tail calling the called function might overwrite the value 5336 // in this function's (MF) stack pointer stack slot 0(SP). 5337 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5338 CallConv == CallingConv::Fast) 5339 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5340 5341 // Count how many bytes are to be pushed on the stack, including the linkage 5342 // area, parameter list area and the part of the local variable space which 5343 // contains copies of aggregates which are passed by value. 5344 5345 // Assign locations to all of the outgoing arguments. 5346 SmallVector<CCValAssign, 16> ArgLocs; 5347 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5348 5349 // Reserve space for the linkage area on the stack. 5350 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5351 PtrByteSize); 5352 if (useSoftFloat()) 5353 CCInfo.PreAnalyzeCallOperands(Outs); 5354 5355 if (isVarArg) { 5356 // Handle fixed and variable vector arguments differently. 5357 // Fixed vector arguments go into registers as long as registers are 5358 // available. Variable vector arguments always go into memory. 5359 unsigned NumArgs = Outs.size(); 5360 5361 for (unsigned i = 0; i != NumArgs; ++i) { 5362 MVT ArgVT = Outs[i].VT; 5363 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5364 bool Result; 5365 5366 if (Outs[i].IsFixed) { 5367 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5368 CCInfo); 5369 } else { 5370 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5371 ArgFlags, CCInfo); 5372 } 5373 5374 if (Result) { 5375 #ifndef NDEBUG 5376 errs() << "Call operand #" << i << " has unhandled type " 5377 << EVT(ArgVT).getEVTString() << "\n"; 5378 #endif 5379 llvm_unreachable(nullptr); 5380 } 5381 } 5382 } else { 5383 // All arguments are treated the same. 5384 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5385 } 5386 CCInfo.clearWasPPCF128(); 5387 5388 // Assign locations to all of the outgoing aggregate by value arguments. 5389 SmallVector<CCValAssign, 16> ByValArgLocs; 5390 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5391 5392 // Reserve stack space for the allocations in CCInfo. 5393 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5394 5395 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5396 5397 // Size of the linkage area, parameter list area and the part of the local 5398 // space variable where copies of aggregates which are passed by value are 5399 // stored. 5400 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5401 5402 // Calculate by how many bytes the stack has to be adjusted in case of tail 5403 // call optimization. 5404 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5405 5406 // Adjust the stack pointer for the new arguments... 5407 // These operations are automatically eliminated by the prolog/epilog pass 5408 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5409 SDValue CallSeqStart = Chain; 5410 5411 // Load the return address and frame pointer so it can be moved somewhere else 5412 // later. 5413 SDValue LROp, FPOp; 5414 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5415 5416 // Set up a copy of the stack pointer for use loading and storing any 5417 // arguments that may not fit in the registers available for argument 5418 // passing. 5419 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5420 5421 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5422 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5423 SmallVector<SDValue, 8> MemOpChains; 5424 5425 bool seenFloatArg = false; 5426 // Walk the register/memloc assignments, inserting copies/loads. 5427 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 5428 i != e; 5429 ++i) { 5430 CCValAssign &VA = ArgLocs[i]; 5431 SDValue Arg = OutVals[i]; 5432 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5433 5434 if (Flags.isByVal()) { 5435 // Argument is an aggregate which is passed by value, thus we need to 5436 // create a copy of it in the local variable space of the current stack 5437 // frame (which is the stack frame of the caller) and pass the address of 5438 // this copy to the callee. 5439 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5440 CCValAssign &ByValVA = ByValArgLocs[j++]; 5441 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5442 5443 // Memory reserved in the local variable space of the callers stack frame. 5444 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5445 5446 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5447 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5448 StackPtr, PtrOff); 5449 5450 // Create a copy of the argument in the local area of the current 5451 // stack frame. 5452 SDValue MemcpyCall = 5453 CreateCopyOfByValArgument(Arg, PtrOff, 5454 CallSeqStart.getNode()->getOperand(0), 5455 Flags, DAG, dl); 5456 5457 // This must go outside the CALLSEQ_START..END. 5458 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5459 SDLoc(MemcpyCall)); 5460 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5461 NewCallSeqStart.getNode()); 5462 Chain = CallSeqStart = NewCallSeqStart; 5463 5464 // Pass the address of the aggregate copy on the stack either in a 5465 // physical register or in the parameter list area of the current stack 5466 // frame to the callee. 5467 Arg = PtrOff; 5468 } 5469 5470 // When useCRBits() is true, there can be i1 arguments. 5471 // It is because getRegisterType(MVT::i1) => MVT::i1, 5472 // and for other integer types getRegisterType() => MVT::i32. 5473 // Extend i1 and ensure callee will get i32. 5474 if (Arg.getValueType() == MVT::i1) 5475 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 5476 dl, MVT::i32, Arg); 5477 5478 if (VA.isRegLoc()) { 5479 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5480 // Put argument in a physical register. 5481 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5482 } else { 5483 // Put argument in the parameter list area of the current stack frame. 5484 assert(VA.isMemLoc()); 5485 unsigned LocMemOffset = VA.getLocMemOffset(); 5486 5487 if (!isTailCall) { 5488 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5489 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5490 StackPtr, PtrOff); 5491 5492 MemOpChains.push_back( 5493 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5494 } else { 5495 // Calculate and remember argument location. 5496 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5497 TailCallArguments); 5498 } 5499 } 5500 } 5501 5502 if (!MemOpChains.empty()) 5503 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5504 5505 // Build a sequence of copy-to-reg nodes chained together with token chain 5506 // and flag operands which copy the outgoing args into the appropriate regs. 5507 SDValue InFlag; 5508 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5509 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5510 RegsToPass[i].second, InFlag); 5511 InFlag = Chain.getValue(1); 5512 } 5513 5514 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5515 // registers. 5516 if (isVarArg) { 5517 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5518 SDValue Ops[] = { Chain, InFlag }; 5519 5520 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5521 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5522 5523 InFlag = Chain.getValue(1); 5524 } 5525 5526 if (isTailCall) 5527 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5528 TailCallArguments); 5529 5530 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5531 /* unused except on PPC64 ELFv1 */ false, DAG, 5532 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5533 NumBytes, Ins, InVals, CS); 5534 } 5535 5536 // Copy an argument into memory, being careful to do this outside the 5537 // call sequence for the call to which the argument belongs. 5538 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5539 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5540 SelectionDAG &DAG, const SDLoc &dl) const { 5541 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5542 CallSeqStart.getNode()->getOperand(0), 5543 Flags, DAG, dl); 5544 // The MEMCPY must go outside the CALLSEQ_START..END. 5545 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5546 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5547 SDLoc(MemcpyCall)); 5548 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5549 NewCallSeqStart.getNode()); 5550 return NewCallSeqStart; 5551 } 5552 5553 SDValue PPCTargetLowering::LowerCall_64SVR4( 5554 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5555 bool isTailCall, bool isPatchPoint, 5556 const SmallVectorImpl<ISD::OutputArg> &Outs, 5557 const SmallVectorImpl<SDValue> &OutVals, 5558 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5559 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5560 ImmutableCallSite CS) const { 5561 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5562 bool isLittleEndian = Subtarget.isLittleEndian(); 5563 unsigned NumOps = Outs.size(); 5564 bool hasNest = false; 5565 bool IsSibCall = false; 5566 5567 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5568 unsigned PtrByteSize = 8; 5569 5570 MachineFunction &MF = DAG.getMachineFunction(); 5571 5572 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5573 IsSibCall = true; 5574 5575 // Mark this function as potentially containing a function that contains a 5576 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5577 // and restoring the callers stack pointer in this functions epilog. This is 5578 // done because by tail calling the called function might overwrite the value 5579 // in this function's (MF) stack pointer stack slot 0(SP). 5580 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5581 CallConv == CallingConv::Fast) 5582 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5583 5584 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5585 "fastcc not supported on varargs functions"); 5586 5587 // Count how many bytes are to be pushed on the stack, including the linkage 5588 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5589 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5590 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5591 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5592 unsigned NumBytes = LinkageSize; 5593 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5594 unsigned &QFPR_idx = FPR_idx; 5595 5596 static const MCPhysReg GPR[] = { 5597 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5598 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5599 }; 5600 static const MCPhysReg VR[] = { 5601 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5602 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5603 }; 5604 5605 const unsigned NumGPRs = array_lengthof(GPR); 5606 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5607 const unsigned NumVRs = array_lengthof(VR); 5608 const unsigned NumQFPRs = NumFPRs; 5609 5610 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5611 // can be passed to the callee in registers. 5612 // For the fast calling convention, there is another check below. 5613 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5614 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5615 if (!HasParameterArea) { 5616 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5617 unsigned AvailableFPRs = NumFPRs; 5618 unsigned AvailableVRs = NumVRs; 5619 unsigned NumBytesTmp = NumBytes; 5620 for (unsigned i = 0; i != NumOps; ++i) { 5621 if (Outs[i].Flags.isNest()) continue; 5622 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5623 PtrByteSize, LinkageSize, ParamAreaSize, 5624 NumBytesTmp, AvailableFPRs, AvailableVRs, 5625 Subtarget.hasQPX())) 5626 HasParameterArea = true; 5627 } 5628 } 5629 5630 // When using the fast calling convention, we don't provide backing for 5631 // arguments that will be in registers. 5632 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5633 5634 // Avoid allocating parameter area for fastcc functions if all the arguments 5635 // can be passed in the registers. 5636 if (CallConv == CallingConv::Fast) 5637 HasParameterArea = false; 5638 5639 // Add up all the space actually used. 5640 for (unsigned i = 0; i != NumOps; ++i) { 5641 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5642 EVT ArgVT = Outs[i].VT; 5643 EVT OrigVT = Outs[i].ArgVT; 5644 5645 if (Flags.isNest()) 5646 continue; 5647 5648 if (CallConv == CallingConv::Fast) { 5649 if (Flags.isByVal()) { 5650 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5651 if (NumGPRsUsed > NumGPRs) 5652 HasParameterArea = true; 5653 } else { 5654 switch (ArgVT.getSimpleVT().SimpleTy) { 5655 default: llvm_unreachable("Unexpected ValueType for argument!"); 5656 case MVT::i1: 5657 case MVT::i32: 5658 case MVT::i64: 5659 if (++NumGPRsUsed <= NumGPRs) 5660 continue; 5661 break; 5662 case MVT::v4i32: 5663 case MVT::v8i16: 5664 case MVT::v16i8: 5665 case MVT::v2f64: 5666 case MVT::v2i64: 5667 case MVT::v1i128: 5668 case MVT::f128: 5669 if (++NumVRsUsed <= NumVRs) 5670 continue; 5671 break; 5672 case MVT::v4f32: 5673 // When using QPX, this is handled like a FP register, otherwise, it 5674 // is an Altivec register. 5675 if (Subtarget.hasQPX()) { 5676 if (++NumFPRsUsed <= NumFPRs) 5677 continue; 5678 } else { 5679 if (++NumVRsUsed <= NumVRs) 5680 continue; 5681 } 5682 break; 5683 case MVT::f32: 5684 case MVT::f64: 5685 case MVT::v4f64: // QPX 5686 case MVT::v4i1: // QPX 5687 if (++NumFPRsUsed <= NumFPRs) 5688 continue; 5689 break; 5690 } 5691 HasParameterArea = true; 5692 } 5693 } 5694 5695 /* Respect alignment of argument on the stack. */ 5696 unsigned Align = 5697 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5698 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5699 5700 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5701 if (Flags.isInConsecutiveRegsLast()) 5702 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5703 } 5704 5705 unsigned NumBytesActuallyUsed = NumBytes; 5706 5707 // In the old ELFv1 ABI, 5708 // the prolog code of the callee may store up to 8 GPR argument registers to 5709 // the stack, allowing va_start to index over them in memory if its varargs. 5710 // Because we cannot tell if this is needed on the caller side, we have to 5711 // conservatively assume that it is needed. As such, make sure we have at 5712 // least enough stack space for the caller to store the 8 GPRs. 5713 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5714 // really requires memory operands, e.g. a vararg function. 5715 if (HasParameterArea) 5716 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5717 else 5718 NumBytes = LinkageSize; 5719 5720 // Tail call needs the stack to be aligned. 5721 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5722 CallConv == CallingConv::Fast) 5723 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5724 5725 int SPDiff = 0; 5726 5727 // Calculate by how many bytes the stack has to be adjusted in case of tail 5728 // call optimization. 5729 if (!IsSibCall) 5730 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5731 5732 // To protect arguments on the stack from being clobbered in a tail call, 5733 // force all the loads to happen before doing any other lowering. 5734 if (isTailCall) 5735 Chain = DAG.getStackArgumentTokenFactor(Chain); 5736 5737 // Adjust the stack pointer for the new arguments... 5738 // These operations are automatically eliminated by the prolog/epilog pass 5739 if (!IsSibCall) 5740 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5741 SDValue CallSeqStart = Chain; 5742 5743 // Load the return address and frame pointer so it can be move somewhere else 5744 // later. 5745 SDValue LROp, FPOp; 5746 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5747 5748 // Set up a copy of the stack pointer for use loading and storing any 5749 // arguments that may not fit in the registers available for argument 5750 // passing. 5751 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5752 5753 // Figure out which arguments are going to go in registers, and which in 5754 // memory. Also, if this is a vararg function, floating point operations 5755 // must be stored to our stack, and loaded into integer regs as well, if 5756 // any integer regs are available for argument passing. 5757 unsigned ArgOffset = LinkageSize; 5758 5759 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5760 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5761 5762 SmallVector<SDValue, 8> MemOpChains; 5763 for (unsigned i = 0; i != NumOps; ++i) { 5764 SDValue Arg = OutVals[i]; 5765 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5766 EVT ArgVT = Outs[i].VT; 5767 EVT OrigVT = Outs[i].ArgVT; 5768 5769 // PtrOff will be used to store the current argument to the stack if a 5770 // register cannot be found for it. 5771 SDValue PtrOff; 5772 5773 // We re-align the argument offset for each argument, except when using the 5774 // fast calling convention, when we need to make sure we do that only when 5775 // we'll actually use a stack slot. 5776 auto ComputePtrOff = [&]() { 5777 /* Respect alignment of argument on the stack. */ 5778 unsigned Align = 5779 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5780 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5781 5782 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5783 5784 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5785 }; 5786 5787 if (CallConv != CallingConv::Fast) { 5788 ComputePtrOff(); 5789 5790 /* Compute GPR index associated with argument offset. */ 5791 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5792 GPR_idx = std::min(GPR_idx, NumGPRs); 5793 } 5794 5795 // Promote integers to 64-bit values. 5796 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5797 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5798 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5799 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5800 } 5801 5802 // FIXME memcpy is used way more than necessary. Correctness first. 5803 // Note: "by value" is code for passing a structure by value, not 5804 // basic types. 5805 if (Flags.isByVal()) { 5806 // Note: Size includes alignment padding, so 5807 // struct x { short a; char b; } 5808 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5809 // These are the proper values we need for right-justifying the 5810 // aggregate in a parameter register. 5811 unsigned Size = Flags.getByValSize(); 5812 5813 // An empty aggregate parameter takes up no storage and no 5814 // registers. 5815 if (Size == 0) 5816 continue; 5817 5818 if (CallConv == CallingConv::Fast) 5819 ComputePtrOff(); 5820 5821 // All aggregates smaller than 8 bytes must be passed right-justified. 5822 if (Size==1 || Size==2 || Size==4) { 5823 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5824 if (GPR_idx != NumGPRs) { 5825 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5826 MachinePointerInfo(), VT); 5827 MemOpChains.push_back(Load.getValue(1)); 5828 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5829 5830 ArgOffset += PtrByteSize; 5831 continue; 5832 } 5833 } 5834 5835 if (GPR_idx == NumGPRs && Size < 8) { 5836 SDValue AddPtr = PtrOff; 5837 if (!isLittleEndian) { 5838 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5839 PtrOff.getValueType()); 5840 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5841 } 5842 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5843 CallSeqStart, 5844 Flags, DAG, dl); 5845 ArgOffset += PtrByteSize; 5846 continue; 5847 } 5848 // Copy entire object into memory. There are cases where gcc-generated 5849 // code assumes it is there, even if it could be put entirely into 5850 // registers. (This is not what the doc says.) 5851 5852 // FIXME: The above statement is likely due to a misunderstanding of the 5853 // documents. All arguments must be copied into the parameter area BY 5854 // THE CALLEE in the event that the callee takes the address of any 5855 // formal argument. That has not yet been implemented. However, it is 5856 // reasonable to use the stack area as a staging area for the register 5857 // load. 5858 5859 // Skip this for small aggregates, as we will use the same slot for a 5860 // right-justified copy, below. 5861 if (Size >= 8) 5862 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5863 CallSeqStart, 5864 Flags, DAG, dl); 5865 5866 // When a register is available, pass a small aggregate right-justified. 5867 if (Size < 8 && GPR_idx != NumGPRs) { 5868 // The easiest way to get this right-justified in a register 5869 // is to copy the structure into the rightmost portion of a 5870 // local variable slot, then load the whole slot into the 5871 // register. 5872 // FIXME: The memcpy seems to produce pretty awful code for 5873 // small aggregates, particularly for packed ones. 5874 // FIXME: It would be preferable to use the slot in the 5875 // parameter save area instead of a new local variable. 5876 SDValue AddPtr = PtrOff; 5877 if (!isLittleEndian) { 5878 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5879 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5880 } 5881 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5882 CallSeqStart, 5883 Flags, DAG, dl); 5884 5885 // Load the slot into the register. 5886 SDValue Load = 5887 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5888 MemOpChains.push_back(Load.getValue(1)); 5889 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5890 5891 // Done with this argument. 5892 ArgOffset += PtrByteSize; 5893 continue; 5894 } 5895 5896 // For aggregates larger than PtrByteSize, copy the pieces of the 5897 // object that fit into registers from the parameter save area. 5898 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5899 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5900 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5901 if (GPR_idx != NumGPRs) { 5902 SDValue Load = 5903 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5904 MemOpChains.push_back(Load.getValue(1)); 5905 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5906 ArgOffset += PtrByteSize; 5907 } else { 5908 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5909 break; 5910 } 5911 } 5912 continue; 5913 } 5914 5915 switch (Arg.getSimpleValueType().SimpleTy) { 5916 default: llvm_unreachable("Unexpected ValueType for argument!"); 5917 case MVT::i1: 5918 case MVT::i32: 5919 case MVT::i64: 5920 if (Flags.isNest()) { 5921 // The 'nest' parameter, if any, is passed in R11. 5922 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5923 hasNest = true; 5924 break; 5925 } 5926 5927 // These can be scalar arguments or elements of an integer array type 5928 // passed directly. Clang may use those instead of "byval" aggregate 5929 // types to avoid forcing arguments to memory unnecessarily. 5930 if (GPR_idx != NumGPRs) { 5931 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5932 } else { 5933 if (CallConv == CallingConv::Fast) 5934 ComputePtrOff(); 5935 5936 assert(HasParameterArea && 5937 "Parameter area must exist to pass an argument in memory."); 5938 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5939 true, isTailCall, false, MemOpChains, 5940 TailCallArguments, dl); 5941 if (CallConv == CallingConv::Fast) 5942 ArgOffset += PtrByteSize; 5943 } 5944 if (CallConv != CallingConv::Fast) 5945 ArgOffset += PtrByteSize; 5946 break; 5947 case MVT::f32: 5948 case MVT::f64: { 5949 // These can be scalar arguments or elements of a float array type 5950 // passed directly. The latter are used to implement ELFv2 homogenous 5951 // float aggregates. 5952 5953 // Named arguments go into FPRs first, and once they overflow, the 5954 // remaining arguments go into GPRs and then the parameter save area. 5955 // Unnamed arguments for vararg functions always go to GPRs and 5956 // then the parameter save area. For now, put all arguments to vararg 5957 // routines always in both locations (FPR *and* GPR or stack slot). 5958 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5959 bool NeededLoad = false; 5960 5961 // First load the argument into the next available FPR. 5962 if (FPR_idx != NumFPRs) 5963 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5964 5965 // Next, load the argument into GPR or stack slot if needed. 5966 if (!NeedGPROrStack) 5967 ; 5968 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5969 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5970 // once we support fp <-> gpr moves. 5971 5972 // In the non-vararg case, this can only ever happen in the 5973 // presence of f32 array types, since otherwise we never run 5974 // out of FPRs before running out of GPRs. 5975 SDValue ArgVal; 5976 5977 // Double values are always passed in a single GPR. 5978 if (Arg.getValueType() != MVT::f32) { 5979 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5980 5981 // Non-array float values are extended and passed in a GPR. 5982 } else if (!Flags.isInConsecutiveRegs()) { 5983 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5984 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5985 5986 // If we have an array of floats, we collect every odd element 5987 // together with its predecessor into one GPR. 5988 } else if (ArgOffset % PtrByteSize != 0) { 5989 SDValue Lo, Hi; 5990 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5991 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5992 if (!isLittleEndian) 5993 std::swap(Lo, Hi); 5994 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5995 5996 // The final element, if even, goes into the first half of a GPR. 5997 } else if (Flags.isInConsecutiveRegsLast()) { 5998 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5999 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6000 if (!isLittleEndian) 6001 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 6002 DAG.getConstant(32, dl, MVT::i32)); 6003 6004 // Non-final even elements are skipped; they will be handled 6005 // together the with subsequent argument on the next go-around. 6006 } else 6007 ArgVal = SDValue(); 6008 6009 if (ArgVal.getNode()) 6010 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 6011 } else { 6012 if (CallConv == CallingConv::Fast) 6013 ComputePtrOff(); 6014 6015 // Single-precision floating-point values are mapped to the 6016 // second (rightmost) word of the stack doubleword. 6017 if (Arg.getValueType() == MVT::f32 && 6018 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 6019 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6020 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6021 } 6022 6023 assert(HasParameterArea && 6024 "Parameter area must exist to pass an argument in memory."); 6025 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6026 true, isTailCall, false, MemOpChains, 6027 TailCallArguments, dl); 6028 6029 NeededLoad = true; 6030 } 6031 // When passing an array of floats, the array occupies consecutive 6032 // space in the argument area; only round up to the next doubleword 6033 // at the end of the array. Otherwise, each float takes 8 bytes. 6034 if (CallConv != CallingConv::Fast || NeededLoad) { 6035 ArgOffset += (Arg.getValueType() == MVT::f32 && 6036 Flags.isInConsecutiveRegs()) ? 4 : 8; 6037 if (Flags.isInConsecutiveRegsLast()) 6038 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6039 } 6040 break; 6041 } 6042 case MVT::v4f32: 6043 case MVT::v4i32: 6044 case MVT::v8i16: 6045 case MVT::v16i8: 6046 case MVT::v2f64: 6047 case MVT::v2i64: 6048 case MVT::v1i128: 6049 case MVT::f128: 6050 if (!Subtarget.hasQPX()) { 6051 // These can be scalar arguments or elements of a vector array type 6052 // passed directly. The latter are used to implement ELFv2 homogenous 6053 // vector aggregates. 6054 6055 // For a varargs call, named arguments go into VRs or on the stack as 6056 // usual; unnamed arguments always go to the stack or the corresponding 6057 // GPRs when within range. For now, we always put the value in both 6058 // locations (or even all three). 6059 if (isVarArg) { 6060 assert(HasParameterArea && 6061 "Parameter area must exist if we have a varargs call."); 6062 // We could elide this store in the case where the object fits 6063 // entirely in R registers. Maybe later. 6064 SDValue Store = 6065 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6066 MemOpChains.push_back(Store); 6067 if (VR_idx != NumVRs) { 6068 SDValue Load = 6069 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6070 MemOpChains.push_back(Load.getValue(1)); 6071 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6072 } 6073 ArgOffset += 16; 6074 for (unsigned i=0; i<16; i+=PtrByteSize) { 6075 if (GPR_idx == NumGPRs) 6076 break; 6077 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6078 DAG.getConstant(i, dl, PtrVT)); 6079 SDValue Load = 6080 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6081 MemOpChains.push_back(Load.getValue(1)); 6082 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6083 } 6084 break; 6085 } 6086 6087 // Non-varargs Altivec params go into VRs or on the stack. 6088 if (VR_idx != NumVRs) { 6089 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6090 } else { 6091 if (CallConv == CallingConv::Fast) 6092 ComputePtrOff(); 6093 6094 assert(HasParameterArea && 6095 "Parameter area must exist to pass an argument in memory."); 6096 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6097 true, isTailCall, true, MemOpChains, 6098 TailCallArguments, dl); 6099 if (CallConv == CallingConv::Fast) 6100 ArgOffset += 16; 6101 } 6102 6103 if (CallConv != CallingConv::Fast) 6104 ArgOffset += 16; 6105 break; 6106 } // not QPX 6107 6108 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 6109 "Invalid QPX parameter type"); 6110 6111 /* fall through */ 6112 case MVT::v4f64: 6113 case MVT::v4i1: { 6114 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 6115 if (isVarArg) { 6116 assert(HasParameterArea && 6117 "Parameter area must exist if we have a varargs call."); 6118 // We could elide this store in the case where the object fits 6119 // entirely in R registers. Maybe later. 6120 SDValue Store = 6121 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6122 MemOpChains.push_back(Store); 6123 if (QFPR_idx != NumQFPRs) { 6124 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 6125 PtrOff, MachinePointerInfo()); 6126 MemOpChains.push_back(Load.getValue(1)); 6127 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 6128 } 6129 ArgOffset += (IsF32 ? 16 : 32); 6130 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 6131 if (GPR_idx == NumGPRs) 6132 break; 6133 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6134 DAG.getConstant(i, dl, PtrVT)); 6135 SDValue Load = 6136 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6137 MemOpChains.push_back(Load.getValue(1)); 6138 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6139 } 6140 break; 6141 } 6142 6143 // Non-varargs QPX params go into registers or on the stack. 6144 if (QFPR_idx != NumQFPRs) { 6145 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 6146 } else { 6147 if (CallConv == CallingConv::Fast) 6148 ComputePtrOff(); 6149 6150 assert(HasParameterArea && 6151 "Parameter area must exist to pass an argument in memory."); 6152 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6153 true, isTailCall, true, MemOpChains, 6154 TailCallArguments, dl); 6155 if (CallConv == CallingConv::Fast) 6156 ArgOffset += (IsF32 ? 16 : 32); 6157 } 6158 6159 if (CallConv != CallingConv::Fast) 6160 ArgOffset += (IsF32 ? 16 : 32); 6161 break; 6162 } 6163 } 6164 } 6165 6166 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6167 "mismatch in size of parameter area"); 6168 (void)NumBytesActuallyUsed; 6169 6170 if (!MemOpChains.empty()) 6171 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6172 6173 // Check if this is an indirect call (MTCTR/BCTRL). 6174 // See PrepareCall() for more information about calls through function 6175 // pointers in the 64-bit SVR4 ABI. 6176 if (!isTailCall && !isPatchPoint && 6177 !isFunctionGlobalAddress(Callee) && 6178 !isa<ExternalSymbolSDNode>(Callee)) { 6179 // Load r2 into a virtual register and store it to the TOC save area. 6180 setUsesTOCBasePtr(DAG); 6181 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6182 // TOC save area offset. 6183 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6184 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6185 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6186 Chain = DAG.getStore( 6187 Val.getValue(1), dl, Val, AddPtr, 6188 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 6189 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6190 // This does not mean the MTCTR instruction must use R12; it's easier 6191 // to model this as an extra parameter, so do that. 6192 if (isELFv2ABI && !isPatchPoint) 6193 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6194 } 6195 6196 // Build a sequence of copy-to-reg nodes chained together with token chain 6197 // and flag operands which copy the outgoing args into the appropriate regs. 6198 SDValue InFlag; 6199 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6200 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6201 RegsToPass[i].second, InFlag); 6202 InFlag = Chain.getValue(1); 6203 } 6204 6205 if (isTailCall && !IsSibCall) 6206 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6207 TailCallArguments); 6208 6209 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 6210 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 6211 SPDiff, NumBytes, Ins, InVals, CS); 6212 } 6213 6214 SDValue PPCTargetLowering::LowerCall_Darwin( 6215 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6216 bool isTailCall, bool isPatchPoint, 6217 const SmallVectorImpl<ISD::OutputArg> &Outs, 6218 const SmallVectorImpl<SDValue> &OutVals, 6219 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6220 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6221 ImmutableCallSite CS) const { 6222 unsigned NumOps = Outs.size(); 6223 6224 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6225 bool isPPC64 = PtrVT == MVT::i64; 6226 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6227 6228 MachineFunction &MF = DAG.getMachineFunction(); 6229 6230 // Mark this function as potentially containing a function that contains a 6231 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6232 // and restoring the callers stack pointer in this functions epilog. This is 6233 // done because by tail calling the called function might overwrite the value 6234 // in this function's (MF) stack pointer stack slot 0(SP). 6235 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6236 CallConv == CallingConv::Fast) 6237 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6238 6239 // Count how many bytes are to be pushed on the stack, including the linkage 6240 // area, and parameter passing area. We start with 24/48 bytes, which is 6241 // prereserved space for [SP][CR][LR][3 x unused]. 6242 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6243 unsigned NumBytes = LinkageSize; 6244 6245 // Add up all the space actually used. 6246 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6247 // they all go in registers, but we must reserve stack space for them for 6248 // possible use by the caller. In varargs or 64-bit calls, parameters are 6249 // assigned stack space in order, with padding so Altivec parameters are 6250 // 16-byte aligned. 6251 unsigned nAltivecParamsAtEnd = 0; 6252 for (unsigned i = 0; i != NumOps; ++i) { 6253 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6254 EVT ArgVT = Outs[i].VT; 6255 // Varargs Altivec parameters are padded to a 16 byte boundary. 6256 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6257 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6258 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6259 if (!isVarArg && !isPPC64) { 6260 // Non-varargs Altivec parameters go after all the non-Altivec 6261 // parameters; handle those later so we know how much padding we need. 6262 nAltivecParamsAtEnd++; 6263 continue; 6264 } 6265 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6266 NumBytes = ((NumBytes+15)/16)*16; 6267 } 6268 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6269 } 6270 6271 // Allow for Altivec parameters at the end, if needed. 6272 if (nAltivecParamsAtEnd) { 6273 NumBytes = ((NumBytes+15)/16)*16; 6274 NumBytes += 16*nAltivecParamsAtEnd; 6275 } 6276 6277 // The prolog code of the callee may store up to 8 GPR argument registers to 6278 // the stack, allowing va_start to index over them in memory if its varargs. 6279 // Because we cannot tell if this is needed on the caller side, we have to 6280 // conservatively assume that it is needed. As such, make sure we have at 6281 // least enough stack space for the caller to store the 8 GPRs. 6282 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6283 6284 // Tail call needs the stack to be aligned. 6285 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6286 CallConv == CallingConv::Fast) 6287 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6288 6289 // Calculate by how many bytes the stack has to be adjusted in case of tail 6290 // call optimization. 6291 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6292 6293 // To protect arguments on the stack from being clobbered in a tail call, 6294 // force all the loads to happen before doing any other lowering. 6295 if (isTailCall) 6296 Chain = DAG.getStackArgumentTokenFactor(Chain); 6297 6298 // Adjust the stack pointer for the new arguments... 6299 // These operations are automatically eliminated by the prolog/epilog pass 6300 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6301 SDValue CallSeqStart = Chain; 6302 6303 // Load the return address and frame pointer so it can be move somewhere else 6304 // later. 6305 SDValue LROp, FPOp; 6306 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6307 6308 // Set up a copy of the stack pointer for use loading and storing any 6309 // arguments that may not fit in the registers available for argument 6310 // passing. 6311 SDValue StackPtr; 6312 if (isPPC64) 6313 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6314 else 6315 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6316 6317 // Figure out which arguments are going to go in registers, and which in 6318 // memory. Also, if this is a vararg function, floating point operations 6319 // must be stored to our stack, and loaded into integer regs as well, if 6320 // any integer regs are available for argument passing. 6321 unsigned ArgOffset = LinkageSize; 6322 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6323 6324 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6325 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6326 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6327 }; 6328 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6329 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6330 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6331 }; 6332 static const MCPhysReg VR[] = { 6333 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6334 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6335 }; 6336 const unsigned NumGPRs = array_lengthof(GPR_32); 6337 const unsigned NumFPRs = 13; 6338 const unsigned NumVRs = array_lengthof(VR); 6339 6340 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6341 6342 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6343 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6344 6345 SmallVector<SDValue, 8> MemOpChains; 6346 for (unsigned i = 0; i != NumOps; ++i) { 6347 SDValue Arg = OutVals[i]; 6348 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6349 6350 // PtrOff will be used to store the current argument to the stack if a 6351 // register cannot be found for it. 6352 SDValue PtrOff; 6353 6354 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6355 6356 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6357 6358 // On PPC64, promote integers to 64-bit values. 6359 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6360 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6361 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6362 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6363 } 6364 6365 // FIXME memcpy is used way more than necessary. Correctness first. 6366 // Note: "by value" is code for passing a structure by value, not 6367 // basic types. 6368 if (Flags.isByVal()) { 6369 unsigned Size = Flags.getByValSize(); 6370 // Very small objects are passed right-justified. Everything else is 6371 // passed left-justified. 6372 if (Size==1 || Size==2) { 6373 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6374 if (GPR_idx != NumGPRs) { 6375 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6376 MachinePointerInfo(), VT); 6377 MemOpChains.push_back(Load.getValue(1)); 6378 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6379 6380 ArgOffset += PtrByteSize; 6381 } else { 6382 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6383 PtrOff.getValueType()); 6384 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6385 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6386 CallSeqStart, 6387 Flags, DAG, dl); 6388 ArgOffset += PtrByteSize; 6389 } 6390 continue; 6391 } 6392 // Copy entire object into memory. There are cases where gcc-generated 6393 // code assumes it is there, even if it could be put entirely into 6394 // registers. (This is not what the doc says.) 6395 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6396 CallSeqStart, 6397 Flags, DAG, dl); 6398 6399 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6400 // copy the pieces of the object that fit into registers from the 6401 // parameter save area. 6402 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6403 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6404 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6405 if (GPR_idx != NumGPRs) { 6406 SDValue Load = 6407 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6408 MemOpChains.push_back(Load.getValue(1)); 6409 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6410 ArgOffset += PtrByteSize; 6411 } else { 6412 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6413 break; 6414 } 6415 } 6416 continue; 6417 } 6418 6419 switch (Arg.getSimpleValueType().SimpleTy) { 6420 default: llvm_unreachable("Unexpected ValueType for argument!"); 6421 case MVT::i1: 6422 case MVT::i32: 6423 case MVT::i64: 6424 if (GPR_idx != NumGPRs) { 6425 if (Arg.getValueType() == MVT::i1) 6426 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6427 6428 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6429 } else { 6430 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6431 isPPC64, isTailCall, false, MemOpChains, 6432 TailCallArguments, dl); 6433 } 6434 ArgOffset += PtrByteSize; 6435 break; 6436 case MVT::f32: 6437 case MVT::f64: 6438 if (FPR_idx != NumFPRs) { 6439 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6440 6441 if (isVarArg) { 6442 SDValue Store = 6443 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6444 MemOpChains.push_back(Store); 6445 6446 // Float varargs are always shadowed in available integer registers 6447 if (GPR_idx != NumGPRs) { 6448 SDValue Load = 6449 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6450 MemOpChains.push_back(Load.getValue(1)); 6451 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6452 } 6453 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6454 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6455 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6456 SDValue Load = 6457 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6458 MemOpChains.push_back(Load.getValue(1)); 6459 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6460 } 6461 } else { 6462 // If we have any FPRs remaining, we may also have GPRs remaining. 6463 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6464 // GPRs. 6465 if (GPR_idx != NumGPRs) 6466 ++GPR_idx; 6467 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6468 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6469 ++GPR_idx; 6470 } 6471 } else 6472 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6473 isPPC64, isTailCall, false, MemOpChains, 6474 TailCallArguments, dl); 6475 if (isPPC64) 6476 ArgOffset += 8; 6477 else 6478 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6479 break; 6480 case MVT::v4f32: 6481 case MVT::v4i32: 6482 case MVT::v8i16: 6483 case MVT::v16i8: 6484 if (isVarArg) { 6485 // These go aligned on the stack, or in the corresponding R registers 6486 // when within range. The Darwin PPC ABI doc claims they also go in 6487 // V registers; in fact gcc does this only for arguments that are 6488 // prototyped, not for those that match the ... We do it for all 6489 // arguments, seems to work. 6490 while (ArgOffset % 16 !=0) { 6491 ArgOffset += PtrByteSize; 6492 if (GPR_idx != NumGPRs) 6493 GPR_idx++; 6494 } 6495 // We could elide this store in the case where the object fits 6496 // entirely in R registers. Maybe later. 6497 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6498 DAG.getConstant(ArgOffset, dl, PtrVT)); 6499 SDValue Store = 6500 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6501 MemOpChains.push_back(Store); 6502 if (VR_idx != NumVRs) { 6503 SDValue Load = 6504 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6505 MemOpChains.push_back(Load.getValue(1)); 6506 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6507 } 6508 ArgOffset += 16; 6509 for (unsigned i=0; i<16; i+=PtrByteSize) { 6510 if (GPR_idx == NumGPRs) 6511 break; 6512 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6513 DAG.getConstant(i, dl, PtrVT)); 6514 SDValue Load = 6515 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6516 MemOpChains.push_back(Load.getValue(1)); 6517 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6518 } 6519 break; 6520 } 6521 6522 // Non-varargs Altivec params generally go in registers, but have 6523 // stack space allocated at the end. 6524 if (VR_idx != NumVRs) { 6525 // Doesn't have GPR space allocated. 6526 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6527 } else if (nAltivecParamsAtEnd==0) { 6528 // We are emitting Altivec params in order. 6529 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6530 isPPC64, isTailCall, true, MemOpChains, 6531 TailCallArguments, dl); 6532 ArgOffset += 16; 6533 } 6534 break; 6535 } 6536 } 6537 // If all Altivec parameters fit in registers, as they usually do, 6538 // they get stack space following the non-Altivec parameters. We 6539 // don't track this here because nobody below needs it. 6540 // If there are more Altivec parameters than fit in registers emit 6541 // the stores here. 6542 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6543 unsigned j = 0; 6544 // Offset is aligned; skip 1st 12 params which go in V registers. 6545 ArgOffset = ((ArgOffset+15)/16)*16; 6546 ArgOffset += 12*16; 6547 for (unsigned i = 0; i != NumOps; ++i) { 6548 SDValue Arg = OutVals[i]; 6549 EVT ArgType = Outs[i].VT; 6550 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6551 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6552 if (++j > NumVRs) { 6553 SDValue PtrOff; 6554 // We are emitting Altivec params in order. 6555 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6556 isPPC64, isTailCall, true, MemOpChains, 6557 TailCallArguments, dl); 6558 ArgOffset += 16; 6559 } 6560 } 6561 } 6562 } 6563 6564 if (!MemOpChains.empty()) 6565 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6566 6567 // On Darwin, R12 must contain the address of an indirect callee. This does 6568 // not mean the MTCTR instruction must use R12; it's easier to model this as 6569 // an extra parameter, so do that. 6570 if (!isTailCall && 6571 !isFunctionGlobalAddress(Callee) && 6572 !isa<ExternalSymbolSDNode>(Callee) && 6573 !isBLACompatibleAddress(Callee, DAG)) 6574 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6575 PPC::R12), Callee)); 6576 6577 // Build a sequence of copy-to-reg nodes chained together with token chain 6578 // and flag operands which copy the outgoing args into the appropriate regs. 6579 SDValue InFlag; 6580 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6581 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6582 RegsToPass[i].second, InFlag); 6583 InFlag = Chain.getValue(1); 6584 } 6585 6586 if (isTailCall) 6587 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6588 TailCallArguments); 6589 6590 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6591 /* unused except on PPC64 ELFv1 */ false, DAG, 6592 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6593 NumBytes, Ins, InVals, CS); 6594 } 6595 6596 bool 6597 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6598 MachineFunction &MF, bool isVarArg, 6599 const SmallVectorImpl<ISD::OutputArg> &Outs, 6600 LLVMContext &Context) const { 6601 SmallVector<CCValAssign, 16> RVLocs; 6602 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6603 return CCInfo.CheckReturn( 6604 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6605 ? RetCC_PPC_Cold 6606 : RetCC_PPC); 6607 } 6608 6609 SDValue 6610 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6611 bool isVarArg, 6612 const SmallVectorImpl<ISD::OutputArg> &Outs, 6613 const SmallVectorImpl<SDValue> &OutVals, 6614 const SDLoc &dl, SelectionDAG &DAG) const { 6615 SmallVector<CCValAssign, 16> RVLocs; 6616 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6617 *DAG.getContext()); 6618 CCInfo.AnalyzeReturn(Outs, 6619 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6620 ? RetCC_PPC_Cold 6621 : RetCC_PPC); 6622 6623 SDValue Flag; 6624 SmallVector<SDValue, 4> RetOps(1, Chain); 6625 6626 // Copy the result values into the output registers. 6627 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6628 CCValAssign &VA = RVLocs[i]; 6629 assert(VA.isRegLoc() && "Can only return in registers!"); 6630 6631 SDValue Arg = OutVals[i]; 6632 6633 switch (VA.getLocInfo()) { 6634 default: llvm_unreachable("Unknown loc info!"); 6635 case CCValAssign::Full: break; 6636 case CCValAssign::AExt: 6637 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6638 break; 6639 case CCValAssign::ZExt: 6640 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6641 break; 6642 case CCValAssign::SExt: 6643 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6644 break; 6645 } 6646 6647 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6648 Flag = Chain.getValue(1); 6649 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6650 } 6651 6652 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6653 const MCPhysReg *I = 6654 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6655 if (I) { 6656 for (; *I; ++I) { 6657 6658 if (PPC::G8RCRegClass.contains(*I)) 6659 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6660 else if (PPC::F8RCRegClass.contains(*I)) 6661 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6662 else if (PPC::CRRCRegClass.contains(*I)) 6663 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6664 else if (PPC::VRRCRegClass.contains(*I)) 6665 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6666 else 6667 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6668 } 6669 } 6670 6671 RetOps[0] = Chain; // Update chain. 6672 6673 // Add the flag if we have it. 6674 if (Flag.getNode()) 6675 RetOps.push_back(Flag); 6676 6677 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6678 } 6679 6680 SDValue 6681 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6682 SelectionDAG &DAG) const { 6683 SDLoc dl(Op); 6684 6685 // Get the correct type for integers. 6686 EVT IntVT = Op.getValueType(); 6687 6688 // Get the inputs. 6689 SDValue Chain = Op.getOperand(0); 6690 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6691 // Build a DYNAREAOFFSET node. 6692 SDValue Ops[2] = {Chain, FPSIdx}; 6693 SDVTList VTs = DAG.getVTList(IntVT); 6694 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6695 } 6696 6697 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6698 SelectionDAG &DAG) const { 6699 // When we pop the dynamic allocation we need to restore the SP link. 6700 SDLoc dl(Op); 6701 6702 // Get the correct type for pointers. 6703 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6704 6705 // Construct the stack pointer operand. 6706 bool isPPC64 = Subtarget.isPPC64(); 6707 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6708 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6709 6710 // Get the operands for the STACKRESTORE. 6711 SDValue Chain = Op.getOperand(0); 6712 SDValue SaveSP = Op.getOperand(1); 6713 6714 // Load the old link SP. 6715 SDValue LoadLinkSP = 6716 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6717 6718 // Restore the stack pointer. 6719 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6720 6721 // Store the old link SP. 6722 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6723 } 6724 6725 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6726 MachineFunction &MF = DAG.getMachineFunction(); 6727 bool isPPC64 = Subtarget.isPPC64(); 6728 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6729 6730 // Get current frame pointer save index. The users of this index will be 6731 // primarily DYNALLOC instructions. 6732 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6733 int RASI = FI->getReturnAddrSaveIndex(); 6734 6735 // If the frame pointer save index hasn't been defined yet. 6736 if (!RASI) { 6737 // Find out what the fix offset of the frame pointer save area. 6738 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6739 // Allocate the frame index for frame pointer save area. 6740 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6741 // Save the result. 6742 FI->setReturnAddrSaveIndex(RASI); 6743 } 6744 return DAG.getFrameIndex(RASI, PtrVT); 6745 } 6746 6747 SDValue 6748 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6749 MachineFunction &MF = DAG.getMachineFunction(); 6750 bool isPPC64 = Subtarget.isPPC64(); 6751 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6752 6753 // Get current frame pointer save index. The users of this index will be 6754 // primarily DYNALLOC instructions. 6755 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6756 int FPSI = FI->getFramePointerSaveIndex(); 6757 6758 // If the frame pointer save index hasn't been defined yet. 6759 if (!FPSI) { 6760 // Find out what the fix offset of the frame pointer save area. 6761 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6762 // Allocate the frame index for frame pointer save area. 6763 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6764 // Save the result. 6765 FI->setFramePointerSaveIndex(FPSI); 6766 } 6767 return DAG.getFrameIndex(FPSI, PtrVT); 6768 } 6769 6770 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6771 SelectionDAG &DAG) const { 6772 // Get the inputs. 6773 SDValue Chain = Op.getOperand(0); 6774 SDValue Size = Op.getOperand(1); 6775 SDLoc dl(Op); 6776 6777 // Get the correct type for pointers. 6778 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6779 // Negate the size. 6780 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6781 DAG.getConstant(0, dl, PtrVT), Size); 6782 // Construct a node for the frame pointer save index. 6783 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6784 // Build a DYNALLOC node. 6785 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6786 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6787 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6788 } 6789 6790 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 6791 SelectionDAG &DAG) const { 6792 MachineFunction &MF = DAG.getMachineFunction(); 6793 6794 bool isPPC64 = Subtarget.isPPC64(); 6795 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6796 6797 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 6798 return DAG.getFrameIndex(FI, PtrVT); 6799 } 6800 6801 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6802 SelectionDAG &DAG) const { 6803 SDLoc DL(Op); 6804 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6805 DAG.getVTList(MVT::i32, MVT::Other), 6806 Op.getOperand(0), Op.getOperand(1)); 6807 } 6808 6809 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6810 SelectionDAG &DAG) const { 6811 SDLoc DL(Op); 6812 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6813 Op.getOperand(0), Op.getOperand(1)); 6814 } 6815 6816 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6817 if (Op.getValueType().isVector()) 6818 return LowerVectorLoad(Op, DAG); 6819 6820 assert(Op.getValueType() == MVT::i1 && 6821 "Custom lowering only for i1 loads"); 6822 6823 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6824 6825 SDLoc dl(Op); 6826 LoadSDNode *LD = cast<LoadSDNode>(Op); 6827 6828 SDValue Chain = LD->getChain(); 6829 SDValue BasePtr = LD->getBasePtr(); 6830 MachineMemOperand *MMO = LD->getMemOperand(); 6831 6832 SDValue NewLD = 6833 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6834 BasePtr, MVT::i8, MMO); 6835 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6836 6837 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6838 return DAG.getMergeValues(Ops, dl); 6839 } 6840 6841 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6842 if (Op.getOperand(1).getValueType().isVector()) 6843 return LowerVectorStore(Op, DAG); 6844 6845 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6846 "Custom lowering only for i1 stores"); 6847 6848 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6849 6850 SDLoc dl(Op); 6851 StoreSDNode *ST = cast<StoreSDNode>(Op); 6852 6853 SDValue Chain = ST->getChain(); 6854 SDValue BasePtr = ST->getBasePtr(); 6855 SDValue Value = ST->getValue(); 6856 MachineMemOperand *MMO = ST->getMemOperand(); 6857 6858 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6859 Value); 6860 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6861 } 6862 6863 // FIXME: Remove this once the ANDI glue bug is fixed: 6864 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6865 assert(Op.getValueType() == MVT::i1 && 6866 "Custom lowering only for i1 results"); 6867 6868 SDLoc DL(Op); 6869 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6870 Op.getOperand(0)); 6871 } 6872 6873 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6874 /// possible. 6875 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6876 // Not FP? Not a fsel. 6877 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6878 !Op.getOperand(2).getValueType().isFloatingPoint()) 6879 return Op; 6880 6881 // We might be able to do better than this under some circumstances, but in 6882 // general, fsel-based lowering of select is a finite-math-only optimization. 6883 // For more information, see section F.3 of the 2.06 ISA specification. 6884 if (!DAG.getTarget().Options.NoInfsFPMath || 6885 !DAG.getTarget().Options.NoNaNsFPMath) 6886 return Op; 6887 // TODO: Propagate flags from the select rather than global settings. 6888 SDNodeFlags Flags; 6889 Flags.setNoInfs(true); 6890 Flags.setNoNaNs(true); 6891 6892 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6893 6894 EVT ResVT = Op.getValueType(); 6895 EVT CmpVT = Op.getOperand(0).getValueType(); 6896 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6897 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6898 SDLoc dl(Op); 6899 6900 // If the RHS of the comparison is a 0.0, we don't need to do the 6901 // subtraction at all. 6902 SDValue Sel1; 6903 if (isFloatingPointZero(RHS)) 6904 switch (CC) { 6905 default: break; // SETUO etc aren't handled by fsel. 6906 case ISD::SETNE: 6907 std::swap(TV, FV); 6908 LLVM_FALLTHROUGH; 6909 case ISD::SETEQ: 6910 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6911 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6912 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6913 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6914 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6915 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6916 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6917 case ISD::SETULT: 6918 case ISD::SETLT: 6919 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6920 LLVM_FALLTHROUGH; 6921 case ISD::SETOGE: 6922 case ISD::SETGE: 6923 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6924 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6925 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6926 case ISD::SETUGT: 6927 case ISD::SETGT: 6928 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6929 LLVM_FALLTHROUGH; 6930 case ISD::SETOLE: 6931 case ISD::SETLE: 6932 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6933 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6934 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6935 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6936 } 6937 6938 SDValue Cmp; 6939 switch (CC) { 6940 default: break; // SETUO etc aren't handled by fsel. 6941 case ISD::SETNE: 6942 std::swap(TV, FV); 6943 LLVM_FALLTHROUGH; 6944 case ISD::SETEQ: 6945 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6946 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6947 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6948 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6949 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6950 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6951 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6952 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6953 case ISD::SETULT: 6954 case ISD::SETLT: 6955 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6956 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6957 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6958 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6959 case ISD::SETOGE: 6960 case ISD::SETGE: 6961 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6962 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6963 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6964 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6965 case ISD::SETUGT: 6966 case ISD::SETGT: 6967 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6968 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6969 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6970 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6971 case ISD::SETOLE: 6972 case ISD::SETLE: 6973 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6974 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6975 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6976 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6977 } 6978 return Op; 6979 } 6980 6981 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6982 SelectionDAG &DAG, 6983 const SDLoc &dl) const { 6984 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6985 SDValue Src = Op.getOperand(0); 6986 if (Src.getValueType() == MVT::f32) 6987 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6988 6989 SDValue Tmp; 6990 switch (Op.getSimpleValueType().SimpleTy) { 6991 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6992 case MVT::i32: 6993 Tmp = DAG.getNode( 6994 Op.getOpcode() == ISD::FP_TO_SINT 6995 ? PPCISD::FCTIWZ 6996 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6997 dl, MVT::f64, Src); 6998 break; 6999 case MVT::i64: 7000 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7001 "i64 FP_TO_UINT is supported only with FPCVT"); 7002 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7003 PPCISD::FCTIDUZ, 7004 dl, MVT::f64, Src); 7005 break; 7006 } 7007 7008 // Convert the FP value to an int value through memory. 7009 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 7010 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 7011 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 7012 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 7013 MachinePointerInfo MPI = 7014 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 7015 7016 // Emit a store to the stack slot. 7017 SDValue Chain; 7018 if (i32Stack) { 7019 MachineFunction &MF = DAG.getMachineFunction(); 7020 MachineMemOperand *MMO = 7021 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 7022 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 7023 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7024 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 7025 } else 7026 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 7027 7028 // Result is a load from the stack slot. If loading 4 bytes, make sure to 7029 // add in a bias on big endian. 7030 if (Op.getValueType() == MVT::i32 && !i32Stack) { 7031 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 7032 DAG.getConstant(4, dl, FIPtr.getValueType())); 7033 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 7034 } 7035 7036 RLI.Chain = Chain; 7037 RLI.Ptr = FIPtr; 7038 RLI.MPI = MPI; 7039 } 7040 7041 /// Custom lowers floating point to integer conversions to use 7042 /// the direct move instructions available in ISA 2.07 to avoid the 7043 /// need for load/store combinations. 7044 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 7045 SelectionDAG &DAG, 7046 const SDLoc &dl) const { 7047 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7048 SDValue Src = Op.getOperand(0); 7049 7050 if (Src.getValueType() == MVT::f32) 7051 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7052 7053 SDValue Tmp; 7054 switch (Op.getSimpleValueType().SimpleTy) { 7055 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7056 case MVT::i32: 7057 Tmp = DAG.getNode( 7058 Op.getOpcode() == ISD::FP_TO_SINT 7059 ? PPCISD::FCTIWZ 7060 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7061 dl, MVT::f64, Src); 7062 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 7063 break; 7064 case MVT::i64: 7065 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7066 "i64 FP_TO_UINT is supported only with FPCVT"); 7067 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7068 PPCISD::FCTIDUZ, 7069 dl, MVT::f64, Src); 7070 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 7071 break; 7072 } 7073 return Tmp; 7074 } 7075 7076 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 7077 const SDLoc &dl) const { 7078 7079 // FP to INT conversions are legal for f128. 7080 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128)) 7081 return Op; 7082 7083 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 7084 // PPC (the libcall is not available). 7085 if (Op.getOperand(0).getValueType() == MVT::ppcf128) { 7086 if (Op.getValueType() == MVT::i32) { 7087 if (Op.getOpcode() == ISD::FP_TO_SINT) { 7088 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7089 MVT::f64, Op.getOperand(0), 7090 DAG.getIntPtrConstant(0, dl)); 7091 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7092 MVT::f64, Op.getOperand(0), 7093 DAG.getIntPtrConstant(1, dl)); 7094 7095 // Add the two halves of the long double in round-to-zero mode. 7096 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7097 7098 // Now use a smaller FP_TO_SINT. 7099 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 7100 } 7101 if (Op.getOpcode() == ISD::FP_TO_UINT) { 7102 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 7103 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 7104 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128); 7105 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 7106 // FIXME: generated code sucks. 7107 // TODO: Are there fast-math-flags to propagate to this FSUB? 7108 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, 7109 Op.getOperand(0), Tmp); 7110 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 7111 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, 7112 DAG.getConstant(0x80000000, dl, MVT::i32)); 7113 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, 7114 Op.getOperand(0)); 7115 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False, 7116 ISD::SETGE); 7117 } 7118 } 7119 7120 return SDValue(); 7121 } 7122 7123 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 7124 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 7125 7126 ReuseLoadInfo RLI; 7127 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7128 7129 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7130 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7131 } 7132 7133 // We're trying to insert a regular store, S, and then a load, L. If the 7134 // incoming value, O, is a load, we might just be able to have our load use the 7135 // address used by O. However, we don't know if anything else will store to 7136 // that address before we can load from it. To prevent this situation, we need 7137 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7138 // the same chain operand as O, we create a token factor from the chain results 7139 // of O and L, and we replace all uses of O's chain result with that token 7140 // factor (see spliceIntoChain below for this last part). 7141 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7142 ReuseLoadInfo &RLI, 7143 SelectionDAG &DAG, 7144 ISD::LoadExtType ET) const { 7145 SDLoc dl(Op); 7146 if (ET == ISD::NON_EXTLOAD && 7147 (Op.getOpcode() == ISD::FP_TO_UINT || 7148 Op.getOpcode() == ISD::FP_TO_SINT) && 7149 isOperationLegalOrCustom(Op.getOpcode(), 7150 Op.getOperand(0).getValueType())) { 7151 7152 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7153 return true; 7154 } 7155 7156 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7157 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7158 LD->isNonTemporal()) 7159 return false; 7160 if (LD->getMemoryVT() != MemVT) 7161 return false; 7162 7163 RLI.Ptr = LD->getBasePtr(); 7164 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7165 assert(LD->getAddressingMode() == ISD::PRE_INC && 7166 "Non-pre-inc AM on PPC?"); 7167 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7168 LD->getOffset()); 7169 } 7170 7171 RLI.Chain = LD->getChain(); 7172 RLI.MPI = LD->getPointerInfo(); 7173 RLI.IsDereferenceable = LD->isDereferenceable(); 7174 RLI.IsInvariant = LD->isInvariant(); 7175 RLI.Alignment = LD->getAlignment(); 7176 RLI.AAInfo = LD->getAAInfo(); 7177 RLI.Ranges = LD->getRanges(); 7178 7179 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7180 return true; 7181 } 7182 7183 // Given the head of the old chain, ResChain, insert a token factor containing 7184 // it and NewResChain, and make users of ResChain now be users of that token 7185 // factor. 7186 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7187 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7188 SDValue NewResChain, 7189 SelectionDAG &DAG) const { 7190 if (!ResChain) 7191 return; 7192 7193 SDLoc dl(NewResChain); 7194 7195 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7196 NewResChain, DAG.getUNDEF(MVT::Other)); 7197 assert(TF.getNode() != NewResChain.getNode() && 7198 "A new TF really is required here"); 7199 7200 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7201 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7202 } 7203 7204 /// Analyze profitability of direct move 7205 /// prefer float load to int load plus direct move 7206 /// when there is no integer use of int load 7207 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7208 SDNode *Origin = Op.getOperand(0).getNode(); 7209 if (Origin->getOpcode() != ISD::LOAD) 7210 return true; 7211 7212 // If there is no LXSIBZX/LXSIHZX, like Power8, 7213 // prefer direct move if the memory size is 1 or 2 bytes. 7214 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7215 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7216 return true; 7217 7218 for (SDNode::use_iterator UI = Origin->use_begin(), 7219 UE = Origin->use_end(); 7220 UI != UE; ++UI) { 7221 7222 // Only look at the users of the loaded value. 7223 if (UI.getUse().get().getResNo() != 0) 7224 continue; 7225 7226 if (UI->getOpcode() != ISD::SINT_TO_FP && 7227 UI->getOpcode() != ISD::UINT_TO_FP) 7228 return true; 7229 } 7230 7231 return false; 7232 } 7233 7234 /// Custom lowers integer to floating point conversions to use 7235 /// the direct move instructions available in ISA 2.07 to avoid the 7236 /// need for load/store combinations. 7237 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7238 SelectionDAG &DAG, 7239 const SDLoc &dl) const { 7240 assert((Op.getValueType() == MVT::f32 || 7241 Op.getValueType() == MVT::f64) && 7242 "Invalid floating point type as target of conversion"); 7243 assert(Subtarget.hasFPCVT() && 7244 "Int to FP conversions with direct moves require FPCVT"); 7245 SDValue FP; 7246 SDValue Src = Op.getOperand(0); 7247 bool SinglePrec = Op.getValueType() == MVT::f32; 7248 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7249 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 7250 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 7251 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 7252 7253 if (WordInt) { 7254 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7255 dl, MVT::f64, Src); 7256 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7257 } 7258 else { 7259 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7260 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7261 } 7262 7263 return FP; 7264 } 7265 7266 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 7267 SelectionDAG &DAG) const { 7268 SDLoc dl(Op); 7269 7270 // Conversions to f128 are legal. 7271 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128)) 7272 return Op; 7273 7274 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 7275 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 7276 return SDValue(); 7277 7278 SDValue Value = Op.getOperand(0); 7279 // The values are now known to be -1 (false) or 1 (true). To convert this 7280 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7281 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7282 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7283 7284 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7285 7286 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7287 7288 if (Op.getValueType() != MVT::v4f64) 7289 Value = DAG.getNode(ISD::FP_ROUND, dl, 7290 Op.getValueType(), Value, 7291 DAG.getIntPtrConstant(1, dl)); 7292 return Value; 7293 } 7294 7295 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 7296 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 7297 return SDValue(); 7298 7299 if (Op.getOperand(0).getValueType() == MVT::i1) 7300 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 7301 DAG.getConstantFP(1.0, dl, Op.getValueType()), 7302 DAG.getConstantFP(0.0, dl, Op.getValueType())); 7303 7304 // If we have direct moves, we can do all the conversion, skip the store/load 7305 // however, without FPCVT we can't do most conversions. 7306 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 7307 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 7308 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 7309 7310 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 7311 "UINT_TO_FP is supported only with FPCVT"); 7312 7313 // If we have FCFIDS, then use it when converting to single-precision. 7314 // Otherwise, convert to double-precision and then round. 7315 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7316 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 7317 : PPCISD::FCFIDS) 7318 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 7319 : PPCISD::FCFID); 7320 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7321 ? MVT::f32 7322 : MVT::f64; 7323 7324 if (Op.getOperand(0).getValueType() == MVT::i64) { 7325 SDValue SINT = Op.getOperand(0); 7326 // When converting to single-precision, we actually need to convert 7327 // to double-precision first and then round to single-precision. 7328 // To avoid double-rounding effects during that operation, we have 7329 // to prepare the input operand. Bits that might be truncated when 7330 // converting to double-precision are replaced by a bit that won't 7331 // be lost at this stage, but is below the single-precision rounding 7332 // position. 7333 // 7334 // However, if -enable-unsafe-fp-math is in effect, accept double 7335 // rounding to avoid the extra overhead. 7336 if (Op.getValueType() == MVT::f32 && 7337 !Subtarget.hasFPCVT() && 7338 !DAG.getTarget().Options.UnsafeFPMath) { 7339 7340 // Twiddle input to make sure the low 11 bits are zero. (If this 7341 // is the case, we are guaranteed the value will fit into the 53 bit 7342 // mantissa of an IEEE double-precision value without rounding.) 7343 // If any of those low 11 bits were not zero originally, make sure 7344 // bit 12 (value 2048) is set instead, so that the final rounding 7345 // to single-precision gets the correct result. 7346 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7347 SINT, DAG.getConstant(2047, dl, MVT::i64)); 7348 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 7349 Round, DAG.getConstant(2047, dl, MVT::i64)); 7350 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 7351 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7352 Round, DAG.getConstant(-2048, dl, MVT::i64)); 7353 7354 // However, we cannot use that value unconditionally: if the magnitude 7355 // of the input value is small, the bit-twiddling we did above might 7356 // end up visibly changing the output. Fortunately, in that case, we 7357 // don't need to twiddle bits since the original input will convert 7358 // exactly to double-precision floating-point already. Therefore, 7359 // construct a conditional to use the original value if the top 11 7360 // bits are all sign-bit copies, and use the rounded value computed 7361 // above otherwise. 7362 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 7363 SINT, DAG.getConstant(53, dl, MVT::i32)); 7364 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 7365 Cond, DAG.getConstant(1, dl, MVT::i64)); 7366 Cond = DAG.getSetCC(dl, MVT::i32, 7367 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 7368 7369 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 7370 } 7371 7372 ReuseLoadInfo RLI; 7373 SDValue Bits; 7374 7375 MachineFunction &MF = DAG.getMachineFunction(); 7376 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 7377 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7378 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7379 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7380 } else if (Subtarget.hasLFIWAX() && 7381 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 7382 MachineMemOperand *MMO = 7383 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7384 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7385 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7386 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 7387 DAG.getVTList(MVT::f64, MVT::Other), 7388 Ops, MVT::i32, MMO); 7389 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7390 } else if (Subtarget.hasFPCVT() && 7391 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 7392 MachineMemOperand *MMO = 7393 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7394 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7395 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7396 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 7397 DAG.getVTList(MVT::f64, MVT::Other), 7398 Ops, MVT::i32, MMO); 7399 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7400 } else if (((Subtarget.hasLFIWAX() && 7401 SINT.getOpcode() == ISD::SIGN_EXTEND) || 7402 (Subtarget.hasFPCVT() && 7403 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 7404 SINT.getOperand(0).getValueType() == MVT::i32) { 7405 MachineFrameInfo &MFI = MF.getFrameInfo(); 7406 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7407 7408 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7409 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7410 7411 SDValue Store = 7412 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 7413 MachinePointerInfo::getFixedStack( 7414 DAG.getMachineFunction(), FrameIdx)); 7415 7416 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7417 "Expected an i32 store"); 7418 7419 RLI.Ptr = FIdx; 7420 RLI.Chain = Store; 7421 RLI.MPI = 7422 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7423 RLI.Alignment = 4; 7424 7425 MachineMemOperand *MMO = 7426 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7427 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7428 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7429 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 7430 PPCISD::LFIWZX : PPCISD::LFIWAX, 7431 dl, DAG.getVTList(MVT::f64, MVT::Other), 7432 Ops, MVT::i32, MMO); 7433 } else 7434 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 7435 7436 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 7437 7438 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7439 FP = DAG.getNode(ISD::FP_ROUND, dl, 7440 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 7441 return FP; 7442 } 7443 7444 assert(Op.getOperand(0).getValueType() == MVT::i32 && 7445 "Unhandled INT_TO_FP type in custom expander!"); 7446 // Since we only generate this in 64-bit mode, we can take advantage of 7447 // 64-bit registers. In particular, sign extend the input value into the 7448 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 7449 // then lfd it and fcfid it. 7450 MachineFunction &MF = DAG.getMachineFunction(); 7451 MachineFrameInfo &MFI = MF.getFrameInfo(); 7452 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7453 7454 SDValue Ld; 7455 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 7456 ReuseLoadInfo RLI; 7457 bool ReusingLoad; 7458 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 7459 DAG))) { 7460 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7461 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7462 7463 SDValue Store = 7464 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7465 MachinePointerInfo::getFixedStack( 7466 DAG.getMachineFunction(), FrameIdx)); 7467 7468 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7469 "Expected an i32 store"); 7470 7471 RLI.Ptr = FIdx; 7472 RLI.Chain = Store; 7473 RLI.MPI = 7474 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7475 RLI.Alignment = 4; 7476 } 7477 7478 MachineMemOperand *MMO = 7479 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7480 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7481 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7482 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 7483 PPCISD::LFIWZX : PPCISD::LFIWAX, 7484 dl, DAG.getVTList(MVT::f64, MVT::Other), 7485 Ops, MVT::i32, MMO); 7486 if (ReusingLoad) 7487 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 7488 } else { 7489 assert(Subtarget.isPPC64() && 7490 "i32->FP without LFIWAX supported only on PPC64"); 7491 7492 int FrameIdx = MFI.CreateStackObject(8, 8, false); 7493 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7494 7495 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 7496 Op.getOperand(0)); 7497 7498 // STD the extended value into the stack slot. 7499 SDValue Store = DAG.getStore( 7500 DAG.getEntryNode(), dl, Ext64, FIdx, 7501 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7502 7503 // Load the value as a double. 7504 Ld = DAG.getLoad( 7505 MVT::f64, dl, Store, FIdx, 7506 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7507 } 7508 7509 // FCFID it and return it. 7510 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 7511 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7512 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 7513 DAG.getIntPtrConstant(0, dl)); 7514 return FP; 7515 } 7516 7517 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 7518 SelectionDAG &DAG) const { 7519 SDLoc dl(Op); 7520 /* 7521 The rounding mode is in bits 30:31 of FPSR, and has the following 7522 settings: 7523 00 Round to nearest 7524 01 Round to 0 7525 10 Round to +inf 7526 11 Round to -inf 7527 7528 FLT_ROUNDS, on the other hand, expects the following: 7529 -1 Undefined 7530 0 Round to 0 7531 1 Round to nearest 7532 2 Round to +inf 7533 3 Round to -inf 7534 7535 To perform the conversion, we do: 7536 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7537 */ 7538 7539 MachineFunction &MF = DAG.getMachineFunction(); 7540 EVT VT = Op.getValueType(); 7541 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7542 7543 // Save FP Control Word to register 7544 EVT NodeTys[] = { 7545 MVT::f64, // return register 7546 MVT::Glue // unused in this context 7547 }; 7548 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7549 7550 // Save FP register to stack slot 7551 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7552 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7553 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7554 MachinePointerInfo()); 7555 7556 // Load FP Control Word from low 32 bits of stack slot. 7557 SDValue Four = DAG.getConstant(4, dl, PtrVT); 7558 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 7559 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 7560 7561 // Transform as necessary 7562 SDValue CWD1 = 7563 DAG.getNode(ISD::AND, dl, MVT::i32, 7564 CWD, DAG.getConstant(3, dl, MVT::i32)); 7565 SDValue CWD2 = 7566 DAG.getNode(ISD::SRL, dl, MVT::i32, 7567 DAG.getNode(ISD::AND, dl, MVT::i32, 7568 DAG.getNode(ISD::XOR, dl, MVT::i32, 7569 CWD, DAG.getConstant(3, dl, MVT::i32)), 7570 DAG.getConstant(3, dl, MVT::i32)), 7571 DAG.getConstant(1, dl, MVT::i32)); 7572 7573 SDValue RetVal = 7574 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 7575 7576 return DAG.getNode((VT.getSizeInBits() < 16 ? 7577 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 7578 } 7579 7580 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7581 EVT VT = Op.getValueType(); 7582 unsigned BitWidth = VT.getSizeInBits(); 7583 SDLoc dl(Op); 7584 assert(Op.getNumOperands() == 3 && 7585 VT == Op.getOperand(1).getValueType() && 7586 "Unexpected SHL!"); 7587 7588 // Expand into a bunch of logical ops. Note that these ops 7589 // depend on the PPC behavior for oversized shift amounts. 7590 SDValue Lo = Op.getOperand(0); 7591 SDValue Hi = Op.getOperand(1); 7592 SDValue Amt = Op.getOperand(2); 7593 EVT AmtVT = Amt.getValueType(); 7594 7595 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7596 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7597 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 7598 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 7599 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 7600 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7601 DAG.getConstant(-BitWidth, dl, AmtVT)); 7602 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 7603 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7604 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 7605 SDValue OutOps[] = { OutLo, OutHi }; 7606 return DAG.getMergeValues(OutOps, dl); 7607 } 7608 7609 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7610 EVT VT = Op.getValueType(); 7611 SDLoc dl(Op); 7612 unsigned BitWidth = VT.getSizeInBits(); 7613 assert(Op.getNumOperands() == 3 && 7614 VT == Op.getOperand(1).getValueType() && 7615 "Unexpected SRL!"); 7616 7617 // Expand into a bunch of logical ops. Note that these ops 7618 // depend on the PPC behavior for oversized shift amounts. 7619 SDValue Lo = Op.getOperand(0); 7620 SDValue Hi = Op.getOperand(1); 7621 SDValue Amt = Op.getOperand(2); 7622 EVT AmtVT = Amt.getValueType(); 7623 7624 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7625 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7626 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7627 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7628 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7629 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7630 DAG.getConstant(-BitWidth, dl, AmtVT)); 7631 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 7632 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7633 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 7634 SDValue OutOps[] = { OutLo, OutHi }; 7635 return DAG.getMergeValues(OutOps, dl); 7636 } 7637 7638 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 7639 SDLoc dl(Op); 7640 EVT VT = Op.getValueType(); 7641 unsigned BitWidth = VT.getSizeInBits(); 7642 assert(Op.getNumOperands() == 3 && 7643 VT == Op.getOperand(1).getValueType() && 7644 "Unexpected SRA!"); 7645 7646 // Expand into a bunch of logical ops, followed by a select_cc. 7647 SDValue Lo = Op.getOperand(0); 7648 SDValue Hi = Op.getOperand(1); 7649 SDValue Amt = Op.getOperand(2); 7650 EVT AmtVT = Amt.getValueType(); 7651 7652 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7653 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7654 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7655 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7656 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7657 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7658 DAG.getConstant(-BitWidth, dl, AmtVT)); 7659 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 7660 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 7661 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 7662 Tmp4, Tmp6, ISD::SETLE); 7663 SDValue OutOps[] = { OutLo, OutHi }; 7664 return DAG.getMergeValues(OutOps, dl); 7665 } 7666 7667 //===----------------------------------------------------------------------===// 7668 // Vector related lowering. 7669 // 7670 7671 /// BuildSplatI - Build a canonical splati of Val with an element size of 7672 /// SplatSize. Cast the result to VT. 7673 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7674 SelectionDAG &DAG, const SDLoc &dl) { 7675 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7676 7677 static const MVT VTys[] = { // canonical VT to use for each size. 7678 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7679 }; 7680 7681 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7682 7683 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7684 if (Val == -1) 7685 SplatSize = 1; 7686 7687 EVT CanonicalVT = VTys[SplatSize-1]; 7688 7689 // Build a canonical splat for this value. 7690 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7691 } 7692 7693 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7694 /// specified intrinsic ID. 7695 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7696 const SDLoc &dl, EVT DestVT = MVT::Other) { 7697 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7698 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7699 DAG.getConstant(IID, dl, MVT::i32), Op); 7700 } 7701 7702 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7703 /// specified intrinsic ID. 7704 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7705 SelectionDAG &DAG, const SDLoc &dl, 7706 EVT DestVT = MVT::Other) { 7707 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7708 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7709 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7710 } 7711 7712 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7713 /// specified intrinsic ID. 7714 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7715 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7716 EVT DestVT = MVT::Other) { 7717 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7718 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7719 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7720 } 7721 7722 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7723 /// amount. The result has the specified value type. 7724 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7725 SelectionDAG &DAG, const SDLoc &dl) { 7726 // Force LHS/RHS to be the right type. 7727 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7728 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7729 7730 int Ops[16]; 7731 for (unsigned i = 0; i != 16; ++i) 7732 Ops[i] = i + Amt; 7733 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7734 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7735 } 7736 7737 /// Do we have an efficient pattern in a .td file for this node? 7738 /// 7739 /// \param V - pointer to the BuildVectorSDNode being matched 7740 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 7741 /// 7742 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 7743 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 7744 /// the opposite is true (expansion is beneficial) are: 7745 /// - The node builds a vector out of integers that are not 32 or 64-bits 7746 /// - The node builds a vector out of constants 7747 /// - The node is a "load-and-splat" 7748 /// In all other cases, we will choose to keep the BUILD_VECTOR. 7749 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 7750 bool HasDirectMove, 7751 bool HasP8Vector) { 7752 EVT VecVT = V->getValueType(0); 7753 bool RightType = VecVT == MVT::v2f64 || 7754 (HasP8Vector && VecVT == MVT::v4f32) || 7755 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 7756 if (!RightType) 7757 return false; 7758 7759 bool IsSplat = true; 7760 bool IsLoad = false; 7761 SDValue Op0 = V->getOperand(0); 7762 7763 // This function is called in a block that confirms the node is not a constant 7764 // splat. So a constant BUILD_VECTOR here means the vector is built out of 7765 // different constants. 7766 if (V->isConstant()) 7767 return false; 7768 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 7769 if (V->getOperand(i).isUndef()) 7770 return false; 7771 // We want to expand nodes that represent load-and-splat even if the 7772 // loaded value is a floating point truncation or conversion to int. 7773 if (V->getOperand(i).getOpcode() == ISD::LOAD || 7774 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 7775 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7776 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 7777 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7778 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 7779 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 7780 IsLoad = true; 7781 // If the operands are different or the input is not a load and has more 7782 // uses than just this BV node, then it isn't a splat. 7783 if (V->getOperand(i) != Op0 || 7784 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 7785 IsSplat = false; 7786 } 7787 return !(IsSplat && IsLoad); 7788 } 7789 7790 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 7791 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 7792 7793 SDLoc dl(Op); 7794 SDValue Op0 = Op->getOperand(0); 7795 7796 if (!EnableQuadPrecision || 7797 (Op.getValueType() != MVT::f128 ) || 7798 (Op0.getOpcode() != ISD::BUILD_PAIR) || 7799 (Op0.getOperand(0).getValueType() != MVT::i64) || 7800 (Op0.getOperand(1).getValueType() != MVT::i64)) 7801 return SDValue(); 7802 7803 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 7804 Op0.getOperand(1)); 7805 } 7806 7807 // If this is a case we can't handle, return null and let the default 7808 // expansion code take care of it. If we CAN select this case, and if it 7809 // selects to a single instruction, return Op. Otherwise, if we can codegen 7810 // this case more efficiently than a constant pool load, lower it to the 7811 // sequence of ops that should be used. 7812 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7813 SelectionDAG &DAG) const { 7814 SDLoc dl(Op); 7815 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7816 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7817 7818 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7819 // We first build an i32 vector, load it into a QPX register, 7820 // then convert it to a floating-point vector and compare it 7821 // to a zero vector to get the boolean result. 7822 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7823 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7824 MachinePointerInfo PtrInfo = 7825 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7826 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7827 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7828 7829 assert(BVN->getNumOperands() == 4 && 7830 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7831 7832 bool IsConst = true; 7833 for (unsigned i = 0; i < 4; ++i) { 7834 if (BVN->getOperand(i).isUndef()) continue; 7835 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7836 IsConst = false; 7837 break; 7838 } 7839 } 7840 7841 if (IsConst) { 7842 Constant *One = 7843 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7844 Constant *NegOne = 7845 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7846 7847 Constant *CV[4]; 7848 for (unsigned i = 0; i < 4; ++i) { 7849 if (BVN->getOperand(i).isUndef()) 7850 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7851 else if (isNullConstant(BVN->getOperand(i))) 7852 CV[i] = NegOne; 7853 else 7854 CV[i] = One; 7855 } 7856 7857 Constant *CP = ConstantVector::get(CV); 7858 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7859 16 /* alignment */); 7860 7861 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7862 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7863 return DAG.getMemIntrinsicNode( 7864 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7865 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7866 } 7867 7868 SmallVector<SDValue, 4> Stores; 7869 for (unsigned i = 0; i < 4; ++i) { 7870 if (BVN->getOperand(i).isUndef()) continue; 7871 7872 unsigned Offset = 4*i; 7873 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7874 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7875 7876 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7877 if (StoreSize > 4) { 7878 Stores.push_back( 7879 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7880 PtrInfo.getWithOffset(Offset), MVT::i32)); 7881 } else { 7882 SDValue StoreValue = BVN->getOperand(i); 7883 if (StoreSize < 4) 7884 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7885 7886 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7887 PtrInfo.getWithOffset(Offset))); 7888 } 7889 } 7890 7891 SDValue StoreChain; 7892 if (!Stores.empty()) 7893 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7894 else 7895 StoreChain = DAG.getEntryNode(); 7896 7897 // Now load from v4i32 into the QPX register; this will extend it to 7898 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7899 // is typed as v4f64 because the QPX register integer states are not 7900 // explicitly represented. 7901 7902 SDValue Ops[] = {StoreChain, 7903 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7904 FIdx}; 7905 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7906 7907 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7908 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7909 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7910 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7911 LoadedVect); 7912 7913 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7914 7915 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7916 } 7917 7918 // All other QPX vectors are handled by generic code. 7919 if (Subtarget.hasQPX()) 7920 return SDValue(); 7921 7922 // Check if this is a splat of a constant value. 7923 APInt APSplatBits, APSplatUndef; 7924 unsigned SplatBitSize; 7925 bool HasAnyUndefs; 7926 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7927 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7928 SplatBitSize > 32) { 7929 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 7930 // lowered to VSX instructions under certain conditions. 7931 // Without VSX, there is no pattern more efficient than expanding the node. 7932 if (Subtarget.hasVSX() && 7933 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 7934 Subtarget.hasP8Vector())) 7935 return Op; 7936 return SDValue(); 7937 } 7938 7939 unsigned SplatBits = APSplatBits.getZExtValue(); 7940 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7941 unsigned SplatSize = SplatBitSize / 8; 7942 7943 // First, handle single instruction cases. 7944 7945 // All zeros? 7946 if (SplatBits == 0) { 7947 // Canonicalize all zero vectors to be v4i32. 7948 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7949 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7950 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7951 } 7952 return Op; 7953 } 7954 7955 // We have XXSPLTIB for constant splats one byte wide 7956 if (Subtarget.hasP9Vector() && SplatSize == 1) { 7957 // This is a splat of 1-byte elements with some elements potentially undef. 7958 // Rather than trying to match undef in the SDAG patterns, ensure that all 7959 // elements are the same constant. 7960 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 7961 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 7962 dl, MVT::i32)); 7963 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 7964 if (Op.getValueType() != MVT::v16i8) 7965 return DAG.getBitcast(Op.getValueType(), NewBV); 7966 return NewBV; 7967 } 7968 7969 // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll 7970 // detect that constant splats like v8i16: 0xABAB are really just splats 7971 // of a 1-byte constant. In this case, we need to convert the node to a 7972 // splat of v16i8 and a bitcast. 7973 if (Op.getValueType() != MVT::v16i8) 7974 return DAG.getBitcast(Op.getValueType(), 7975 DAG.getConstant(SplatBits, dl, MVT::v16i8)); 7976 7977 return Op; 7978 } 7979 7980 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7981 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7982 (32-SplatBitSize)); 7983 if (SextVal >= -16 && SextVal <= 15) 7984 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7985 7986 // Two instruction sequences. 7987 7988 // If this value is in the range [-32,30] and is even, use: 7989 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7990 // If this value is in the range [17,31] and is odd, use: 7991 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7992 // If this value is in the range [-31,-17] and is odd, use: 7993 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7994 // Note the last two are three-instruction sequences. 7995 if (SextVal >= -32 && SextVal <= 31) { 7996 // To avoid having these optimizations undone by constant folding, 7997 // we convert to a pseudo that will be expanded later into one of 7998 // the above forms. 7999 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 8000 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 8001 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 8002 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 8003 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 8004 if (VT == Op.getValueType()) 8005 return RetVal; 8006 else 8007 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 8008 } 8009 8010 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 8011 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 8012 // for fneg/fabs. 8013 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 8014 // Make -1 and vspltisw -1: 8015 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 8016 8017 // Make the VSLW intrinsic, computing 0x8000_0000. 8018 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 8019 OnesV, DAG, dl); 8020 8021 // xor by OnesV to invert it. 8022 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 8023 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8024 } 8025 8026 // Check to see if this is a wide variety of vsplti*, binop self cases. 8027 static const signed char SplatCsts[] = { 8028 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 8029 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 8030 }; 8031 8032 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 8033 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 8034 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 8035 int i = SplatCsts[idx]; 8036 8037 // Figure out what shift amount will be used by altivec if shifted by i in 8038 // this splat size. 8039 unsigned TypeShiftAmt = i & (SplatBitSize-1); 8040 8041 // vsplti + shl self. 8042 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 8043 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8044 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8045 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 8046 Intrinsic::ppc_altivec_vslw 8047 }; 8048 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8049 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8050 } 8051 8052 // vsplti + srl self. 8053 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8054 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8055 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8056 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 8057 Intrinsic::ppc_altivec_vsrw 8058 }; 8059 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8060 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8061 } 8062 8063 // vsplti + sra self. 8064 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8065 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8066 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8067 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 8068 Intrinsic::ppc_altivec_vsraw 8069 }; 8070 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8071 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8072 } 8073 8074 // vsplti + rol self. 8075 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 8076 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 8077 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8078 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8079 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 8080 Intrinsic::ppc_altivec_vrlw 8081 }; 8082 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8083 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8084 } 8085 8086 // t = vsplti c, result = vsldoi t, t, 1 8087 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 8088 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8089 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 8090 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8091 } 8092 // t = vsplti c, result = vsldoi t, t, 2 8093 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 8094 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8095 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 8096 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8097 } 8098 // t = vsplti c, result = vsldoi t, t, 3 8099 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 8100 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8101 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 8102 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8103 } 8104 } 8105 8106 return SDValue(); 8107 } 8108 8109 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 8110 /// the specified operations to build the shuffle. 8111 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 8112 SDValue RHS, SelectionDAG &DAG, 8113 const SDLoc &dl) { 8114 unsigned OpNum = (PFEntry >> 26) & 0x0F; 8115 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8116 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8117 8118 enum { 8119 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 8120 OP_VMRGHW, 8121 OP_VMRGLW, 8122 OP_VSPLTISW0, 8123 OP_VSPLTISW1, 8124 OP_VSPLTISW2, 8125 OP_VSPLTISW3, 8126 OP_VSLDOI4, 8127 OP_VSLDOI8, 8128 OP_VSLDOI12 8129 }; 8130 8131 if (OpNum == OP_COPY) { 8132 if (LHSID == (1*9+2)*9+3) return LHS; 8133 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 8134 return RHS; 8135 } 8136 8137 SDValue OpLHS, OpRHS; 8138 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 8139 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 8140 8141 int ShufIdxs[16]; 8142 switch (OpNum) { 8143 default: llvm_unreachable("Unknown i32 permute!"); 8144 case OP_VMRGHW: 8145 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 8146 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 8147 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 8148 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 8149 break; 8150 case OP_VMRGLW: 8151 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 8152 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 8153 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 8154 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 8155 break; 8156 case OP_VSPLTISW0: 8157 for (unsigned i = 0; i != 16; ++i) 8158 ShufIdxs[i] = (i&3)+0; 8159 break; 8160 case OP_VSPLTISW1: 8161 for (unsigned i = 0; i != 16; ++i) 8162 ShufIdxs[i] = (i&3)+4; 8163 break; 8164 case OP_VSPLTISW2: 8165 for (unsigned i = 0; i != 16; ++i) 8166 ShufIdxs[i] = (i&3)+8; 8167 break; 8168 case OP_VSPLTISW3: 8169 for (unsigned i = 0; i != 16; ++i) 8170 ShufIdxs[i] = (i&3)+12; 8171 break; 8172 case OP_VSLDOI4: 8173 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 8174 case OP_VSLDOI8: 8175 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 8176 case OP_VSLDOI12: 8177 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 8178 } 8179 EVT VT = OpLHS.getValueType(); 8180 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 8181 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 8182 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 8183 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8184 } 8185 8186 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 8187 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 8188 /// SDValue. 8189 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 8190 SelectionDAG &DAG) const { 8191 const unsigned BytesInVector = 16; 8192 bool IsLE = Subtarget.isLittleEndian(); 8193 SDLoc dl(N); 8194 SDValue V1 = N->getOperand(0); 8195 SDValue V2 = N->getOperand(1); 8196 unsigned ShiftElts = 0, InsertAtByte = 0; 8197 bool Swap = false; 8198 8199 // Shifts required to get the byte we want at element 7. 8200 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 8201 0, 15, 14, 13, 12, 11, 10, 9}; 8202 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 8203 1, 2, 3, 4, 5, 6, 7, 8}; 8204 8205 ArrayRef<int> Mask = N->getMask(); 8206 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 8207 8208 // For each mask element, find out if we're just inserting something 8209 // from V2 into V1 or vice versa. 8210 // Possible permutations inserting an element from V2 into V1: 8211 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8212 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8213 // ... 8214 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 8215 // Inserting from V1 into V2 will be similar, except mask range will be 8216 // [16,31]. 8217 8218 bool FoundCandidate = false; 8219 // If both vector operands for the shuffle are the same vector, the mask 8220 // will contain only elements from the first one and the second one will be 8221 // undef. 8222 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 8223 // Go through the mask of half-words to find an element that's being moved 8224 // from one vector to the other. 8225 for (unsigned i = 0; i < BytesInVector; ++i) { 8226 unsigned CurrentElement = Mask[i]; 8227 // If 2nd operand is undefined, we should only look for element 7 in the 8228 // Mask. 8229 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 8230 continue; 8231 8232 bool OtherElementsInOrder = true; 8233 // Examine the other elements in the Mask to see if they're in original 8234 // order. 8235 for (unsigned j = 0; j < BytesInVector; ++j) { 8236 if (j == i) 8237 continue; 8238 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 8239 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 8240 // in which we always assume we're always picking from the 1st operand. 8241 int MaskOffset = 8242 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 8243 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 8244 OtherElementsInOrder = false; 8245 break; 8246 } 8247 } 8248 // If other elements are in original order, we record the number of shifts 8249 // we need to get the element we want into element 7. Also record which byte 8250 // in the vector we should insert into. 8251 if (OtherElementsInOrder) { 8252 // If 2nd operand is undefined, we assume no shifts and no swapping. 8253 if (V2.isUndef()) { 8254 ShiftElts = 0; 8255 Swap = false; 8256 } else { 8257 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 8258 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 8259 : BigEndianShifts[CurrentElement & 0xF]; 8260 Swap = CurrentElement < BytesInVector; 8261 } 8262 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 8263 FoundCandidate = true; 8264 break; 8265 } 8266 } 8267 8268 if (!FoundCandidate) 8269 return SDValue(); 8270 8271 // Candidate found, construct the proper SDAG sequence with VINSERTB, 8272 // optionally with VECSHL if shift is required. 8273 if (Swap) 8274 std::swap(V1, V2); 8275 if (V2.isUndef()) 8276 V2 = V1; 8277 if (ShiftElts) { 8278 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8279 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8280 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 8281 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8282 } 8283 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 8284 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8285 } 8286 8287 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 8288 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 8289 /// SDValue. 8290 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 8291 SelectionDAG &DAG) const { 8292 const unsigned NumHalfWords = 8; 8293 const unsigned BytesInVector = NumHalfWords * 2; 8294 // Check that the shuffle is on half-words. 8295 if (!isNByteElemShuffleMask(N, 2, 1)) 8296 return SDValue(); 8297 8298 bool IsLE = Subtarget.isLittleEndian(); 8299 SDLoc dl(N); 8300 SDValue V1 = N->getOperand(0); 8301 SDValue V2 = N->getOperand(1); 8302 unsigned ShiftElts = 0, InsertAtByte = 0; 8303 bool Swap = false; 8304 8305 // Shifts required to get the half-word we want at element 3. 8306 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 8307 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 8308 8309 uint32_t Mask = 0; 8310 uint32_t OriginalOrderLow = 0x1234567; 8311 uint32_t OriginalOrderHigh = 0x89ABCDEF; 8312 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 8313 // 32-bit space, only need 4-bit nibbles per element. 8314 for (unsigned i = 0; i < NumHalfWords; ++i) { 8315 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8316 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 8317 } 8318 8319 // For each mask element, find out if we're just inserting something 8320 // from V2 into V1 or vice versa. Possible permutations inserting an element 8321 // from V2 into V1: 8322 // X, 1, 2, 3, 4, 5, 6, 7 8323 // 0, X, 2, 3, 4, 5, 6, 7 8324 // 0, 1, X, 3, 4, 5, 6, 7 8325 // 0, 1, 2, X, 4, 5, 6, 7 8326 // 0, 1, 2, 3, X, 5, 6, 7 8327 // 0, 1, 2, 3, 4, X, 6, 7 8328 // 0, 1, 2, 3, 4, 5, X, 7 8329 // 0, 1, 2, 3, 4, 5, 6, X 8330 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 8331 8332 bool FoundCandidate = false; 8333 // Go through the mask of half-words to find an element that's being moved 8334 // from one vector to the other. 8335 for (unsigned i = 0; i < NumHalfWords; ++i) { 8336 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8337 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 8338 uint32_t MaskOtherElts = ~(0xF << MaskShift); 8339 uint32_t TargetOrder = 0x0; 8340 8341 // If both vector operands for the shuffle are the same vector, the mask 8342 // will contain only elements from the first one and the second one will be 8343 // undef. 8344 if (V2.isUndef()) { 8345 ShiftElts = 0; 8346 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 8347 TargetOrder = OriginalOrderLow; 8348 Swap = false; 8349 // Skip if not the correct element or mask of other elements don't equal 8350 // to our expected order. 8351 if (MaskOneElt == VINSERTHSrcElem && 8352 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8353 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8354 FoundCandidate = true; 8355 break; 8356 } 8357 } else { // If both operands are defined. 8358 // Target order is [8,15] if the current mask is between [0,7]. 8359 TargetOrder = 8360 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 8361 // Skip if mask of other elements don't equal our expected order. 8362 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8363 // We only need the last 3 bits for the number of shifts. 8364 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 8365 : BigEndianShifts[MaskOneElt & 0x7]; 8366 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8367 Swap = MaskOneElt < NumHalfWords; 8368 FoundCandidate = true; 8369 break; 8370 } 8371 } 8372 } 8373 8374 if (!FoundCandidate) 8375 return SDValue(); 8376 8377 // Candidate found, construct the proper SDAG sequence with VINSERTH, 8378 // optionally with VECSHL if shift is required. 8379 if (Swap) 8380 std::swap(V1, V2); 8381 if (V2.isUndef()) 8382 V2 = V1; 8383 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8384 if (ShiftElts) { 8385 // Double ShiftElts because we're left shifting on v16i8 type. 8386 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8387 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 8388 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 8389 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8390 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8391 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8392 } 8393 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 8394 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8395 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8396 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8397 } 8398 8399 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 8400 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 8401 /// return the code it can be lowered into. Worst case, it can always be 8402 /// lowered into a vperm. 8403 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 8404 SelectionDAG &DAG) const { 8405 SDLoc dl(Op); 8406 SDValue V1 = Op.getOperand(0); 8407 SDValue V2 = Op.getOperand(1); 8408 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 8409 EVT VT = Op.getValueType(); 8410 bool isLittleEndian = Subtarget.isLittleEndian(); 8411 8412 unsigned ShiftElts, InsertAtByte; 8413 bool Swap = false; 8414 if (Subtarget.hasP9Vector() && 8415 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 8416 isLittleEndian)) { 8417 if (Swap) 8418 std::swap(V1, V2); 8419 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8420 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 8421 if (ShiftElts) { 8422 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 8423 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8424 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 8425 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8426 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8427 } 8428 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 8429 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8430 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8431 } 8432 8433 if (Subtarget.hasP9Altivec()) { 8434 SDValue NewISDNode; 8435 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 8436 return NewISDNode; 8437 8438 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 8439 return NewISDNode; 8440 } 8441 8442 if (Subtarget.hasVSX() && 8443 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8444 if (Swap) 8445 std::swap(V1, V2); 8446 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8447 SDValue Conv2 = 8448 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 8449 8450 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 8451 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8452 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 8453 } 8454 8455 if (Subtarget.hasVSX() && 8456 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8457 if (Swap) 8458 std::swap(V1, V2); 8459 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8460 SDValue Conv2 = 8461 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 8462 8463 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 8464 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8465 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 8466 } 8467 8468 if (Subtarget.hasP9Vector()) { 8469 if (PPC::isXXBRHShuffleMask(SVOp)) { 8470 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8471 SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv); 8472 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 8473 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 8474 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8475 SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv); 8476 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 8477 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 8478 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8479 SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv); 8480 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 8481 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 8482 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 8483 SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv); 8484 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 8485 } 8486 } 8487 8488 if (Subtarget.hasVSX()) { 8489 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 8490 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 8491 8492 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8493 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 8494 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8495 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 8496 } 8497 8498 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 8499 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 8500 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 8501 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 8502 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 8503 } 8504 } 8505 8506 if (Subtarget.hasQPX()) { 8507 if (VT.getVectorNumElements() != 4) 8508 return SDValue(); 8509 8510 if (V2.isUndef()) V2 = V1; 8511 8512 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 8513 if (AlignIdx != -1) { 8514 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 8515 DAG.getConstant(AlignIdx, dl, MVT::i32)); 8516 } else if (SVOp->isSplat()) { 8517 int SplatIdx = SVOp->getSplatIndex(); 8518 if (SplatIdx >= 4) { 8519 std::swap(V1, V2); 8520 SplatIdx -= 4; 8521 } 8522 8523 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 8524 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8525 } 8526 8527 // Lower this into a qvgpci/qvfperm pair. 8528 8529 // Compute the qvgpci literal 8530 unsigned idx = 0; 8531 for (unsigned i = 0; i < 4; ++i) { 8532 int m = SVOp->getMaskElt(i); 8533 unsigned mm = m >= 0 ? (unsigned) m : i; 8534 idx |= mm << (3-i)*3; 8535 } 8536 8537 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 8538 DAG.getConstant(idx, dl, MVT::i32)); 8539 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 8540 } 8541 8542 // Cases that are handled by instructions that take permute immediates 8543 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 8544 // selected by the instruction selector. 8545 if (V2.isUndef()) { 8546 if (PPC::isSplatShuffleMask(SVOp, 1) || 8547 PPC::isSplatShuffleMask(SVOp, 2) || 8548 PPC::isSplatShuffleMask(SVOp, 4) || 8549 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 8550 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 8551 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 8552 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 8553 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 8554 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 8555 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 8556 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 8557 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 8558 (Subtarget.hasP8Altivec() && ( 8559 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 8560 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 8561 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 8562 return Op; 8563 } 8564 } 8565 8566 // Altivec has a variety of "shuffle immediates" that take two vector inputs 8567 // and produce a fixed permutation. If any of these match, do not lower to 8568 // VPERM. 8569 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 8570 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 8571 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 8572 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 8573 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8574 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8575 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8576 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8577 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8578 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8579 (Subtarget.hasP8Altivec() && ( 8580 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 8581 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 8582 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 8583 return Op; 8584 8585 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 8586 // perfect shuffle table to emit an optimal matching sequence. 8587 ArrayRef<int> PermMask = SVOp->getMask(); 8588 8589 unsigned PFIndexes[4]; 8590 bool isFourElementShuffle = true; 8591 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 8592 unsigned EltNo = 8; // Start out undef. 8593 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 8594 if (PermMask[i*4+j] < 0) 8595 continue; // Undef, ignore it. 8596 8597 unsigned ByteSource = PermMask[i*4+j]; 8598 if ((ByteSource & 3) != j) { 8599 isFourElementShuffle = false; 8600 break; 8601 } 8602 8603 if (EltNo == 8) { 8604 EltNo = ByteSource/4; 8605 } else if (EltNo != ByteSource/4) { 8606 isFourElementShuffle = false; 8607 break; 8608 } 8609 } 8610 PFIndexes[i] = EltNo; 8611 } 8612 8613 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 8614 // perfect shuffle vector to determine if it is cost effective to do this as 8615 // discrete instructions, or whether we should use a vperm. 8616 // For now, we skip this for little endian until such time as we have a 8617 // little-endian perfect shuffle table. 8618 if (isFourElementShuffle && !isLittleEndian) { 8619 // Compute the index in the perfect shuffle table. 8620 unsigned PFTableIndex = 8621 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 8622 8623 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 8624 unsigned Cost = (PFEntry >> 30); 8625 8626 // Determining when to avoid vperm is tricky. Many things affect the cost 8627 // of vperm, particularly how many times the perm mask needs to be computed. 8628 // For example, if the perm mask can be hoisted out of a loop or is already 8629 // used (perhaps because there are multiple permutes with the same shuffle 8630 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 8631 // the loop requires an extra register. 8632 // 8633 // As a compromise, we only emit discrete instructions if the shuffle can be 8634 // generated in 3 or fewer operations. When we have loop information 8635 // available, if this block is within a loop, we should avoid using vperm 8636 // for 3-operation perms and use a constant pool load instead. 8637 if (Cost < 3) 8638 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8639 } 8640 8641 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 8642 // vector that will get spilled to the constant pool. 8643 if (V2.isUndef()) V2 = V1; 8644 8645 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 8646 // that it is in input element units, not in bytes. Convert now. 8647 8648 // For little endian, the order of the input vectors is reversed, and 8649 // the permutation mask is complemented with respect to 31. This is 8650 // necessary to produce proper semantics with the big-endian-biased vperm 8651 // instruction. 8652 EVT EltVT = V1.getValueType().getVectorElementType(); 8653 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 8654 8655 SmallVector<SDValue, 16> ResultMask; 8656 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 8657 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 8658 8659 for (unsigned j = 0; j != BytesPerElement; ++j) 8660 if (isLittleEndian) 8661 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 8662 dl, MVT::i32)); 8663 else 8664 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 8665 MVT::i32)); 8666 } 8667 8668 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 8669 if (isLittleEndian) 8670 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8671 V2, V1, VPermMask); 8672 else 8673 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8674 V1, V2, VPermMask); 8675 } 8676 8677 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 8678 /// vector comparison. If it is, return true and fill in Opc/isDot with 8679 /// information about the intrinsic. 8680 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 8681 bool &isDot, const PPCSubtarget &Subtarget) { 8682 unsigned IntrinsicID = 8683 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 8684 CompareOpc = -1; 8685 isDot = false; 8686 switch (IntrinsicID) { 8687 default: 8688 return false; 8689 // Comparison predicates. 8690 case Intrinsic::ppc_altivec_vcmpbfp_p: 8691 CompareOpc = 966; 8692 isDot = true; 8693 break; 8694 case Intrinsic::ppc_altivec_vcmpeqfp_p: 8695 CompareOpc = 198; 8696 isDot = true; 8697 break; 8698 case Intrinsic::ppc_altivec_vcmpequb_p: 8699 CompareOpc = 6; 8700 isDot = true; 8701 break; 8702 case Intrinsic::ppc_altivec_vcmpequh_p: 8703 CompareOpc = 70; 8704 isDot = true; 8705 break; 8706 case Intrinsic::ppc_altivec_vcmpequw_p: 8707 CompareOpc = 134; 8708 isDot = true; 8709 break; 8710 case Intrinsic::ppc_altivec_vcmpequd_p: 8711 if (Subtarget.hasP8Altivec()) { 8712 CompareOpc = 199; 8713 isDot = true; 8714 } else 8715 return false; 8716 break; 8717 case Intrinsic::ppc_altivec_vcmpneb_p: 8718 case Intrinsic::ppc_altivec_vcmpneh_p: 8719 case Intrinsic::ppc_altivec_vcmpnew_p: 8720 case Intrinsic::ppc_altivec_vcmpnezb_p: 8721 case Intrinsic::ppc_altivec_vcmpnezh_p: 8722 case Intrinsic::ppc_altivec_vcmpnezw_p: 8723 if (Subtarget.hasP9Altivec()) { 8724 switch (IntrinsicID) { 8725 default: 8726 llvm_unreachable("Unknown comparison intrinsic."); 8727 case Intrinsic::ppc_altivec_vcmpneb_p: 8728 CompareOpc = 7; 8729 break; 8730 case Intrinsic::ppc_altivec_vcmpneh_p: 8731 CompareOpc = 71; 8732 break; 8733 case Intrinsic::ppc_altivec_vcmpnew_p: 8734 CompareOpc = 135; 8735 break; 8736 case Intrinsic::ppc_altivec_vcmpnezb_p: 8737 CompareOpc = 263; 8738 break; 8739 case Intrinsic::ppc_altivec_vcmpnezh_p: 8740 CompareOpc = 327; 8741 break; 8742 case Intrinsic::ppc_altivec_vcmpnezw_p: 8743 CompareOpc = 391; 8744 break; 8745 } 8746 isDot = true; 8747 } else 8748 return false; 8749 break; 8750 case Intrinsic::ppc_altivec_vcmpgefp_p: 8751 CompareOpc = 454; 8752 isDot = true; 8753 break; 8754 case Intrinsic::ppc_altivec_vcmpgtfp_p: 8755 CompareOpc = 710; 8756 isDot = true; 8757 break; 8758 case Intrinsic::ppc_altivec_vcmpgtsb_p: 8759 CompareOpc = 774; 8760 isDot = true; 8761 break; 8762 case Intrinsic::ppc_altivec_vcmpgtsh_p: 8763 CompareOpc = 838; 8764 isDot = true; 8765 break; 8766 case Intrinsic::ppc_altivec_vcmpgtsw_p: 8767 CompareOpc = 902; 8768 isDot = true; 8769 break; 8770 case Intrinsic::ppc_altivec_vcmpgtsd_p: 8771 if (Subtarget.hasP8Altivec()) { 8772 CompareOpc = 967; 8773 isDot = true; 8774 } else 8775 return false; 8776 break; 8777 case Intrinsic::ppc_altivec_vcmpgtub_p: 8778 CompareOpc = 518; 8779 isDot = true; 8780 break; 8781 case Intrinsic::ppc_altivec_vcmpgtuh_p: 8782 CompareOpc = 582; 8783 isDot = true; 8784 break; 8785 case Intrinsic::ppc_altivec_vcmpgtuw_p: 8786 CompareOpc = 646; 8787 isDot = true; 8788 break; 8789 case Intrinsic::ppc_altivec_vcmpgtud_p: 8790 if (Subtarget.hasP8Altivec()) { 8791 CompareOpc = 711; 8792 isDot = true; 8793 } else 8794 return false; 8795 break; 8796 8797 // VSX predicate comparisons use the same infrastructure 8798 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8799 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8800 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8801 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8802 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8803 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8804 if (Subtarget.hasVSX()) { 8805 switch (IntrinsicID) { 8806 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8807 CompareOpc = 99; 8808 break; 8809 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8810 CompareOpc = 115; 8811 break; 8812 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8813 CompareOpc = 107; 8814 break; 8815 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8816 CompareOpc = 67; 8817 break; 8818 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8819 CompareOpc = 83; 8820 break; 8821 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8822 CompareOpc = 75; 8823 break; 8824 } 8825 isDot = true; 8826 } else 8827 return false; 8828 break; 8829 8830 // Normal Comparisons. 8831 case Intrinsic::ppc_altivec_vcmpbfp: 8832 CompareOpc = 966; 8833 break; 8834 case Intrinsic::ppc_altivec_vcmpeqfp: 8835 CompareOpc = 198; 8836 break; 8837 case Intrinsic::ppc_altivec_vcmpequb: 8838 CompareOpc = 6; 8839 break; 8840 case Intrinsic::ppc_altivec_vcmpequh: 8841 CompareOpc = 70; 8842 break; 8843 case Intrinsic::ppc_altivec_vcmpequw: 8844 CompareOpc = 134; 8845 break; 8846 case Intrinsic::ppc_altivec_vcmpequd: 8847 if (Subtarget.hasP8Altivec()) 8848 CompareOpc = 199; 8849 else 8850 return false; 8851 break; 8852 case Intrinsic::ppc_altivec_vcmpneb: 8853 case Intrinsic::ppc_altivec_vcmpneh: 8854 case Intrinsic::ppc_altivec_vcmpnew: 8855 case Intrinsic::ppc_altivec_vcmpnezb: 8856 case Intrinsic::ppc_altivec_vcmpnezh: 8857 case Intrinsic::ppc_altivec_vcmpnezw: 8858 if (Subtarget.hasP9Altivec()) 8859 switch (IntrinsicID) { 8860 default: 8861 llvm_unreachable("Unknown comparison intrinsic."); 8862 case Intrinsic::ppc_altivec_vcmpneb: 8863 CompareOpc = 7; 8864 break; 8865 case Intrinsic::ppc_altivec_vcmpneh: 8866 CompareOpc = 71; 8867 break; 8868 case Intrinsic::ppc_altivec_vcmpnew: 8869 CompareOpc = 135; 8870 break; 8871 case Intrinsic::ppc_altivec_vcmpnezb: 8872 CompareOpc = 263; 8873 break; 8874 case Intrinsic::ppc_altivec_vcmpnezh: 8875 CompareOpc = 327; 8876 break; 8877 case Intrinsic::ppc_altivec_vcmpnezw: 8878 CompareOpc = 391; 8879 break; 8880 } 8881 else 8882 return false; 8883 break; 8884 case Intrinsic::ppc_altivec_vcmpgefp: 8885 CompareOpc = 454; 8886 break; 8887 case Intrinsic::ppc_altivec_vcmpgtfp: 8888 CompareOpc = 710; 8889 break; 8890 case Intrinsic::ppc_altivec_vcmpgtsb: 8891 CompareOpc = 774; 8892 break; 8893 case Intrinsic::ppc_altivec_vcmpgtsh: 8894 CompareOpc = 838; 8895 break; 8896 case Intrinsic::ppc_altivec_vcmpgtsw: 8897 CompareOpc = 902; 8898 break; 8899 case Intrinsic::ppc_altivec_vcmpgtsd: 8900 if (Subtarget.hasP8Altivec()) 8901 CompareOpc = 967; 8902 else 8903 return false; 8904 break; 8905 case Intrinsic::ppc_altivec_vcmpgtub: 8906 CompareOpc = 518; 8907 break; 8908 case Intrinsic::ppc_altivec_vcmpgtuh: 8909 CompareOpc = 582; 8910 break; 8911 case Intrinsic::ppc_altivec_vcmpgtuw: 8912 CompareOpc = 646; 8913 break; 8914 case Intrinsic::ppc_altivec_vcmpgtud: 8915 if (Subtarget.hasP8Altivec()) 8916 CompareOpc = 711; 8917 else 8918 return false; 8919 break; 8920 } 8921 return true; 8922 } 8923 8924 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 8925 /// lower, do it, otherwise return null. 8926 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 8927 SelectionDAG &DAG) const { 8928 unsigned IntrinsicID = 8929 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8930 8931 SDLoc dl(Op); 8932 8933 if (IntrinsicID == Intrinsic::thread_pointer) { 8934 // Reads the thread pointer register, used for __builtin_thread_pointer. 8935 if (Subtarget.isPPC64()) 8936 return DAG.getRegister(PPC::X13, MVT::i64); 8937 return DAG.getRegister(PPC::R2, MVT::i32); 8938 } 8939 8940 // We are looking for absolute values here. 8941 // The idea is to try to fit one of two patterns: 8942 // max (a, (0-a)) OR max ((0-a), a) 8943 if (Subtarget.hasP9Vector() && 8944 (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw || 8945 IntrinsicID == Intrinsic::ppc_altivec_vmaxsh || 8946 IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) { 8947 SDValue V1 = Op.getOperand(1); 8948 SDValue V2 = Op.getOperand(2); 8949 if (V1.getSimpleValueType() == V2.getSimpleValueType() && 8950 (V1.getSimpleValueType() == MVT::v4i32 || 8951 V1.getSimpleValueType() == MVT::v8i16 || 8952 V1.getSimpleValueType() == MVT::v16i8)) { 8953 if ( V1.getOpcode() == ISD::SUB && 8954 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 8955 V1.getOperand(1) == V2 ) { 8956 // Generate the abs instruction with the operands 8957 return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2); 8958 } 8959 8960 if ( V2.getOpcode() == ISD::SUB && 8961 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 8962 V2.getOperand(1) == V1 ) { 8963 // Generate the abs instruction with the operands 8964 return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1); 8965 } 8966 } 8967 } 8968 8969 // If this is a lowered altivec predicate compare, CompareOpc is set to the 8970 // opcode number of the comparison. 8971 int CompareOpc; 8972 bool isDot; 8973 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 8974 return SDValue(); // Don't custom lower most intrinsics. 8975 8976 // If this is a non-dot comparison, make the VCMP node and we are done. 8977 if (!isDot) { 8978 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 8979 Op.getOperand(1), Op.getOperand(2), 8980 DAG.getConstant(CompareOpc, dl, MVT::i32)); 8981 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 8982 } 8983 8984 // Create the PPCISD altivec 'dot' comparison node. 8985 SDValue Ops[] = { 8986 Op.getOperand(2), // LHS 8987 Op.getOperand(3), // RHS 8988 DAG.getConstant(CompareOpc, dl, MVT::i32) 8989 }; 8990 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 8991 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 8992 8993 // Now that we have the comparison, emit a copy from the CR to a GPR. 8994 // This is flagged to the above dot comparison. 8995 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 8996 DAG.getRegister(PPC::CR6, MVT::i32), 8997 CompNode.getValue(1)); 8998 8999 // Unpack the result based on how the target uses it. 9000 unsigned BitNo; // Bit # of CR6. 9001 bool InvertBit; // Invert result? 9002 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 9003 default: // Can't happen, don't crash on invalid number though. 9004 case 0: // Return the value of the EQ bit of CR6. 9005 BitNo = 0; InvertBit = false; 9006 break; 9007 case 1: // Return the inverted value of the EQ bit of CR6. 9008 BitNo = 0; InvertBit = true; 9009 break; 9010 case 2: // Return the value of the LT bit of CR6. 9011 BitNo = 2; InvertBit = false; 9012 break; 9013 case 3: // Return the inverted value of the LT bit of CR6. 9014 BitNo = 2; InvertBit = true; 9015 break; 9016 } 9017 9018 // Shift the bit into the low position. 9019 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 9020 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 9021 // Isolate the bit. 9022 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 9023 DAG.getConstant(1, dl, MVT::i32)); 9024 9025 // If we are supposed to, toggle the bit. 9026 if (InvertBit) 9027 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 9028 DAG.getConstant(1, dl, MVT::i32)); 9029 return Flags; 9030 } 9031 9032 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 9033 SelectionDAG &DAG) const { 9034 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 9035 // the beginning of the argument list. 9036 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 9037 SDLoc DL(Op); 9038 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 9039 case Intrinsic::ppc_cfence: { 9040 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 9041 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 9042 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 9043 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 9044 Op.getOperand(ArgStart + 1)), 9045 Op.getOperand(0)), 9046 0); 9047 } 9048 default: 9049 break; 9050 } 9051 return SDValue(); 9052 } 9053 9054 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 9055 // Check for a DIV with the same operands as this REM. 9056 for (auto UI : Op.getOperand(1)->uses()) { 9057 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 9058 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 9059 if (UI->getOperand(0) == Op.getOperand(0) && 9060 UI->getOperand(1) == Op.getOperand(1)) 9061 return SDValue(); 9062 } 9063 return Op; 9064 } 9065 9066 // Lower scalar BSWAP64 to xxbrd. 9067 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 9068 SDLoc dl(Op); 9069 // MTVSRDD 9070 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 9071 Op.getOperand(0)); 9072 // XXBRD 9073 Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op); 9074 // MFVSRD 9075 int VectorIndex = 0; 9076 if (Subtarget.isLittleEndian()) 9077 VectorIndex = 1; 9078 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 9079 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 9080 return Op; 9081 } 9082 9083 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 9084 // compared to a value that is atomically loaded (atomic loads zero-extend). 9085 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 9086 SelectionDAG &DAG) const { 9087 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 9088 "Expecting an atomic compare-and-swap here."); 9089 SDLoc dl(Op); 9090 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 9091 EVT MemVT = AtomicNode->getMemoryVT(); 9092 if (MemVT.getSizeInBits() >= 32) 9093 return Op; 9094 9095 SDValue CmpOp = Op.getOperand(2); 9096 // If this is already correctly zero-extended, leave it alone. 9097 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 9098 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 9099 return Op; 9100 9101 // Clear the high bits of the compare operand. 9102 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 9103 SDValue NewCmpOp = 9104 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 9105 DAG.getConstant(MaskVal, dl, MVT::i32)); 9106 9107 // Replace the existing compare operand with the properly zero-extended one. 9108 SmallVector<SDValue, 4> Ops; 9109 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 9110 Ops.push_back(AtomicNode->getOperand(i)); 9111 Ops[2] = NewCmpOp; 9112 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 9113 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 9114 auto NodeTy = 9115 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 9116 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 9117 } 9118 9119 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 9120 SelectionDAG &DAG) const { 9121 SDLoc dl(Op); 9122 // Create a stack slot that is 16-byte aligned. 9123 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9124 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9125 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9126 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9127 9128 // Store the input value into Value#0 of the stack slot. 9129 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 9130 MachinePointerInfo()); 9131 // Load it out. 9132 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 9133 } 9134 9135 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 9136 SelectionDAG &DAG) const { 9137 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 9138 "Should only be called for ISD::INSERT_VECTOR_ELT"); 9139 9140 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 9141 // We have legal lowering for constant indices but not for variable ones. 9142 if (!C) 9143 return SDValue(); 9144 9145 EVT VT = Op.getValueType(); 9146 SDLoc dl(Op); 9147 SDValue V1 = Op.getOperand(0); 9148 SDValue V2 = Op.getOperand(1); 9149 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 9150 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 9151 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 9152 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 9153 unsigned InsertAtElement = C->getZExtValue(); 9154 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 9155 if (Subtarget.isLittleEndian()) { 9156 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 9157 } 9158 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 9159 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9160 } 9161 return Op; 9162 } 9163 9164 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 9165 SelectionDAG &DAG) const { 9166 SDLoc dl(Op); 9167 SDNode *N = Op.getNode(); 9168 9169 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 9170 "Unknown extract_vector_elt type"); 9171 9172 SDValue Value = N->getOperand(0); 9173 9174 // The first part of this is like the store lowering except that we don't 9175 // need to track the chain. 9176 9177 // The values are now known to be -1 (false) or 1 (true). To convert this 9178 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9179 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9180 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9181 9182 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9183 // understand how to form the extending load. 9184 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9185 9186 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9187 9188 // Now convert to an integer and store. 9189 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9190 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9191 Value); 9192 9193 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9194 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9195 MachinePointerInfo PtrInfo = 9196 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9197 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9198 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9199 9200 SDValue StoreChain = DAG.getEntryNode(); 9201 SDValue Ops[] = {StoreChain, 9202 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9203 Value, FIdx}; 9204 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9205 9206 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9207 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9208 9209 // Extract the value requested. 9210 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9211 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9212 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9213 9214 SDValue IntVal = 9215 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 9216 9217 if (!Subtarget.useCRBits()) 9218 return IntVal; 9219 9220 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 9221 } 9222 9223 /// Lowering for QPX v4i1 loads 9224 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 9225 SelectionDAG &DAG) const { 9226 SDLoc dl(Op); 9227 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 9228 SDValue LoadChain = LN->getChain(); 9229 SDValue BasePtr = LN->getBasePtr(); 9230 9231 if (Op.getValueType() == MVT::v4f64 || 9232 Op.getValueType() == MVT::v4f32) { 9233 EVT MemVT = LN->getMemoryVT(); 9234 unsigned Alignment = LN->getAlignment(); 9235 9236 // If this load is properly aligned, then it is legal. 9237 if (Alignment >= MemVT.getStoreSize()) 9238 return Op; 9239 9240 EVT ScalarVT = Op.getValueType().getScalarType(), 9241 ScalarMemVT = MemVT.getScalarType(); 9242 unsigned Stride = ScalarMemVT.getStoreSize(); 9243 9244 SDValue Vals[4], LoadChains[4]; 9245 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9246 SDValue Load; 9247 if (ScalarVT != ScalarMemVT) 9248 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 9249 BasePtr, 9250 LN->getPointerInfo().getWithOffset(Idx * Stride), 9251 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9252 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9253 else 9254 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 9255 LN->getPointerInfo().getWithOffset(Idx * Stride), 9256 MinAlign(Alignment, Idx * Stride), 9257 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9258 9259 if (Idx == 0 && LN->isIndexed()) { 9260 assert(LN->getAddressingMode() == ISD::PRE_INC && 9261 "Unknown addressing mode on vector load"); 9262 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 9263 LN->getAddressingMode()); 9264 } 9265 9266 Vals[Idx] = Load; 9267 LoadChains[Idx] = Load.getValue(1); 9268 9269 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9270 DAG.getConstant(Stride, dl, 9271 BasePtr.getValueType())); 9272 } 9273 9274 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9275 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 9276 9277 if (LN->isIndexed()) { 9278 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 9279 return DAG.getMergeValues(RetOps, dl); 9280 } 9281 9282 SDValue RetOps[] = { Value, TF }; 9283 return DAG.getMergeValues(RetOps, dl); 9284 } 9285 9286 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 9287 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 9288 9289 // To lower v4i1 from a byte array, we load the byte elements of the 9290 // vector and then reuse the BUILD_VECTOR logic. 9291 9292 SDValue VectElmts[4], VectElmtChains[4]; 9293 for (unsigned i = 0; i < 4; ++i) { 9294 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9295 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9296 9297 VectElmts[i] = DAG.getExtLoad( 9298 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 9299 LN->getPointerInfo().getWithOffset(i), MVT::i8, 9300 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9301 VectElmtChains[i] = VectElmts[i].getValue(1); 9302 } 9303 9304 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 9305 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 9306 9307 SDValue RVals[] = { Value, LoadChain }; 9308 return DAG.getMergeValues(RVals, dl); 9309 } 9310 9311 /// Lowering for QPX v4i1 stores 9312 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 9313 SelectionDAG &DAG) const { 9314 SDLoc dl(Op); 9315 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 9316 SDValue StoreChain = SN->getChain(); 9317 SDValue BasePtr = SN->getBasePtr(); 9318 SDValue Value = SN->getValue(); 9319 9320 if (Value.getValueType() == MVT::v4f64 || 9321 Value.getValueType() == MVT::v4f32) { 9322 EVT MemVT = SN->getMemoryVT(); 9323 unsigned Alignment = SN->getAlignment(); 9324 9325 // If this store is properly aligned, then it is legal. 9326 if (Alignment >= MemVT.getStoreSize()) 9327 return Op; 9328 9329 EVT ScalarVT = Value.getValueType().getScalarType(), 9330 ScalarMemVT = MemVT.getScalarType(); 9331 unsigned Stride = ScalarMemVT.getStoreSize(); 9332 9333 SDValue Stores[4]; 9334 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9335 SDValue Ex = DAG.getNode( 9336 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 9337 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 9338 SDValue Store; 9339 if (ScalarVT != ScalarMemVT) 9340 Store = 9341 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 9342 SN->getPointerInfo().getWithOffset(Idx * Stride), 9343 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9344 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9345 else 9346 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 9347 SN->getPointerInfo().getWithOffset(Idx * Stride), 9348 MinAlign(Alignment, Idx * Stride), 9349 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9350 9351 if (Idx == 0 && SN->isIndexed()) { 9352 assert(SN->getAddressingMode() == ISD::PRE_INC && 9353 "Unknown addressing mode on vector store"); 9354 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 9355 SN->getAddressingMode()); 9356 } 9357 9358 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9359 DAG.getConstant(Stride, dl, 9360 BasePtr.getValueType())); 9361 Stores[Idx] = Store; 9362 } 9363 9364 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9365 9366 if (SN->isIndexed()) { 9367 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 9368 return DAG.getMergeValues(RetOps, dl); 9369 } 9370 9371 return TF; 9372 } 9373 9374 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 9375 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 9376 9377 // The values are now known to be -1 (false) or 1 (true). To convert this 9378 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9379 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9380 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9381 9382 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9383 // understand how to form the extending load. 9384 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9385 9386 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9387 9388 // Now convert to an integer and store. 9389 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9390 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9391 Value); 9392 9393 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9394 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9395 MachinePointerInfo PtrInfo = 9396 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9397 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9398 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9399 9400 SDValue Ops[] = {StoreChain, 9401 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9402 Value, FIdx}; 9403 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9404 9405 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9406 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9407 9408 // Move data into the byte array. 9409 SDValue Loads[4], LoadChains[4]; 9410 for (unsigned i = 0; i < 4; ++i) { 9411 unsigned Offset = 4*i; 9412 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9413 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9414 9415 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 9416 PtrInfo.getWithOffset(Offset)); 9417 LoadChains[i] = Loads[i].getValue(1); 9418 } 9419 9420 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9421 9422 SDValue Stores[4]; 9423 for (unsigned i = 0; i < 4; ++i) { 9424 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9425 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9426 9427 Stores[i] = DAG.getTruncStore( 9428 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 9429 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 9430 SN->getAAInfo()); 9431 } 9432 9433 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9434 9435 return StoreChain; 9436 } 9437 9438 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 9439 SDLoc dl(Op); 9440 if (Op.getValueType() == MVT::v4i32) { 9441 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9442 9443 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 9444 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 9445 9446 SDValue RHSSwap = // = vrlw RHS, 16 9447 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 9448 9449 // Shrinkify inputs to v8i16. 9450 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 9451 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 9452 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 9453 9454 // Low parts multiplied together, generating 32-bit results (we ignore the 9455 // top parts). 9456 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 9457 LHS, RHS, DAG, dl, MVT::v4i32); 9458 9459 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 9460 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 9461 // Shift the high parts up 16 bits. 9462 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 9463 Neg16, DAG, dl); 9464 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 9465 } else if (Op.getValueType() == MVT::v8i16) { 9466 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9467 9468 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 9469 9470 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 9471 LHS, RHS, Zero, DAG, dl); 9472 } else if (Op.getValueType() == MVT::v16i8) { 9473 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9474 bool isLittleEndian = Subtarget.isLittleEndian(); 9475 9476 // Multiply the even 8-bit parts, producing 16-bit sums. 9477 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 9478 LHS, RHS, DAG, dl, MVT::v8i16); 9479 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 9480 9481 // Multiply the odd 8-bit parts, producing 16-bit sums. 9482 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 9483 LHS, RHS, DAG, dl, MVT::v8i16); 9484 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 9485 9486 // Merge the results together. Because vmuleub and vmuloub are 9487 // instructions with a big-endian bias, we must reverse the 9488 // element numbering and reverse the meaning of "odd" and "even" 9489 // when generating little endian code. 9490 int Ops[16]; 9491 for (unsigned i = 0; i != 8; ++i) { 9492 if (isLittleEndian) { 9493 Ops[i*2 ] = 2*i; 9494 Ops[i*2+1] = 2*i+16; 9495 } else { 9496 Ops[i*2 ] = 2*i+1; 9497 Ops[i*2+1] = 2*i+1+16; 9498 } 9499 } 9500 if (isLittleEndian) 9501 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 9502 else 9503 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 9504 } else { 9505 llvm_unreachable("Unknown mul to lower!"); 9506 } 9507 } 9508 9509 /// LowerOperation - Provide custom lowering hooks for some operations. 9510 /// 9511 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9512 switch (Op.getOpcode()) { 9513 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 9514 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 9515 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 9516 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 9517 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 9518 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 9519 case ISD::SETCC: return LowerSETCC(Op, DAG); 9520 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 9521 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 9522 9523 // Variable argument lowering. 9524 case ISD::VASTART: return LowerVASTART(Op, DAG); 9525 case ISD::VAARG: return LowerVAARG(Op, DAG); 9526 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 9527 9528 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 9529 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 9530 case ISD::GET_DYNAMIC_AREA_OFFSET: 9531 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 9532 9533 // Exception handling lowering. 9534 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 9535 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 9536 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 9537 9538 case ISD::LOAD: return LowerLOAD(Op, DAG); 9539 case ISD::STORE: return LowerSTORE(Op, DAG); 9540 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 9541 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 9542 case ISD::FP_TO_UINT: 9543 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 9544 case ISD::UINT_TO_FP: 9545 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 9546 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9547 9548 // Lower 64-bit shifts. 9549 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 9550 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 9551 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 9552 9553 // Vector-related lowering. 9554 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 9555 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 9556 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 9557 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 9558 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 9559 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9560 case ISD::MUL: return LowerMUL(Op, DAG); 9561 9562 // For counter-based loop handling. 9563 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 9564 9565 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 9566 9567 // Frame & Return address. 9568 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9569 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9570 9571 case ISD::INTRINSIC_VOID: 9572 return LowerINTRINSIC_VOID(Op, DAG); 9573 case ISD::SREM: 9574 case ISD::UREM: 9575 return LowerREM(Op, DAG); 9576 case ISD::BSWAP: 9577 return LowerBSWAP(Op, DAG); 9578 case ISD::ATOMIC_CMP_SWAP: 9579 return LowerATOMIC_CMP_SWAP(Op, DAG); 9580 } 9581 } 9582 9583 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 9584 SmallVectorImpl<SDValue>&Results, 9585 SelectionDAG &DAG) const { 9586 SDLoc dl(N); 9587 switch (N->getOpcode()) { 9588 default: 9589 llvm_unreachable("Do not know how to custom type legalize this operation!"); 9590 case ISD::READCYCLECOUNTER: { 9591 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 9592 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 9593 9594 Results.push_back(RTB); 9595 Results.push_back(RTB.getValue(1)); 9596 Results.push_back(RTB.getValue(2)); 9597 break; 9598 } 9599 case ISD::INTRINSIC_W_CHAIN: { 9600 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 9601 Intrinsic::ppc_is_decremented_ctr_nonzero) 9602 break; 9603 9604 assert(N->getValueType(0) == MVT::i1 && 9605 "Unexpected result type for CTR decrement intrinsic"); 9606 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 9607 N->getValueType(0)); 9608 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 9609 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 9610 N->getOperand(1)); 9611 9612 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 9613 Results.push_back(NewInt.getValue(1)); 9614 break; 9615 } 9616 case ISD::VAARG: { 9617 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 9618 return; 9619 9620 EVT VT = N->getValueType(0); 9621 9622 if (VT == MVT::i64) { 9623 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 9624 9625 Results.push_back(NewNode); 9626 Results.push_back(NewNode.getValue(1)); 9627 } 9628 return; 9629 } 9630 case ISD::FP_TO_SINT: 9631 case ISD::FP_TO_UINT: 9632 // LowerFP_TO_INT() can only handle f32 and f64. 9633 if (N->getOperand(0).getValueType() == MVT::ppcf128) 9634 return; 9635 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 9636 return; 9637 } 9638 } 9639 9640 //===----------------------------------------------------------------------===// 9641 // Other Lowering Code 9642 //===----------------------------------------------------------------------===// 9643 9644 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 9645 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 9646 Function *Func = Intrinsic::getDeclaration(M, Id); 9647 return Builder.CreateCall(Func, {}); 9648 } 9649 9650 // The mappings for emitLeading/TrailingFence is taken from 9651 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 9652 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 9653 Instruction *Inst, 9654 AtomicOrdering Ord) const { 9655 if (Ord == AtomicOrdering::SequentiallyConsistent) 9656 return callIntrinsic(Builder, Intrinsic::ppc_sync); 9657 if (isReleaseOrStronger(Ord)) 9658 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9659 return nullptr; 9660 } 9661 9662 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 9663 Instruction *Inst, 9664 AtomicOrdering Ord) const { 9665 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 9666 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 9667 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 9668 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 9669 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 9670 return Builder.CreateCall( 9671 Intrinsic::getDeclaration( 9672 Builder.GetInsertBlock()->getParent()->getParent(), 9673 Intrinsic::ppc_cfence, {Inst->getType()}), 9674 {Inst}); 9675 // FIXME: Can use isync for rmw operation. 9676 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9677 } 9678 return nullptr; 9679 } 9680 9681 MachineBasicBlock * 9682 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 9683 unsigned AtomicSize, 9684 unsigned BinOpcode, 9685 unsigned CmpOpcode, 9686 unsigned CmpPred) const { 9687 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9688 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9689 9690 auto LoadMnemonic = PPC::LDARX; 9691 auto StoreMnemonic = PPC::STDCX; 9692 switch (AtomicSize) { 9693 default: 9694 llvm_unreachable("Unexpected size of atomic entity"); 9695 case 1: 9696 LoadMnemonic = PPC::LBARX; 9697 StoreMnemonic = PPC::STBCX; 9698 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9699 break; 9700 case 2: 9701 LoadMnemonic = PPC::LHARX; 9702 StoreMnemonic = PPC::STHCX; 9703 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9704 break; 9705 case 4: 9706 LoadMnemonic = PPC::LWARX; 9707 StoreMnemonic = PPC::STWCX; 9708 break; 9709 case 8: 9710 LoadMnemonic = PPC::LDARX; 9711 StoreMnemonic = PPC::STDCX; 9712 break; 9713 } 9714 9715 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9716 MachineFunction *F = BB->getParent(); 9717 MachineFunction::iterator It = ++BB->getIterator(); 9718 9719 unsigned dest = MI.getOperand(0).getReg(); 9720 unsigned ptrA = MI.getOperand(1).getReg(); 9721 unsigned ptrB = MI.getOperand(2).getReg(); 9722 unsigned incr = MI.getOperand(3).getReg(); 9723 DebugLoc dl = MI.getDebugLoc(); 9724 9725 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9726 MachineBasicBlock *loop2MBB = 9727 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9728 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9729 F->insert(It, loopMBB); 9730 if (CmpOpcode) 9731 F->insert(It, loop2MBB); 9732 F->insert(It, exitMBB); 9733 exitMBB->splice(exitMBB->begin(), BB, 9734 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9735 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9736 9737 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9738 unsigned TmpReg = (!BinOpcode) ? incr : 9739 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 9740 : &PPC::GPRCRegClass); 9741 9742 // thisMBB: 9743 // ... 9744 // fallthrough --> loopMBB 9745 BB->addSuccessor(loopMBB); 9746 9747 // loopMBB: 9748 // l[wd]arx dest, ptr 9749 // add r0, dest, incr 9750 // st[wd]cx. r0, ptr 9751 // bne- loopMBB 9752 // fallthrough --> exitMBB 9753 9754 // For max/min... 9755 // loopMBB: 9756 // l[wd]arx dest, ptr 9757 // cmpl?[wd] incr, dest 9758 // bgt exitMBB 9759 // loop2MBB: 9760 // st[wd]cx. dest, ptr 9761 // bne- loopMBB 9762 // fallthrough --> exitMBB 9763 9764 BB = loopMBB; 9765 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9766 .addReg(ptrA).addReg(ptrB); 9767 if (BinOpcode) 9768 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 9769 if (CmpOpcode) { 9770 // Signed comparisons of byte or halfword values must be sign-extended. 9771 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 9772 unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9773 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 9774 ExtReg).addReg(dest); 9775 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9776 .addReg(incr).addReg(ExtReg); 9777 } else 9778 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9779 .addReg(incr).addReg(dest); 9780 9781 BuildMI(BB, dl, TII->get(PPC::BCC)) 9782 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9783 BB->addSuccessor(loop2MBB); 9784 BB->addSuccessor(exitMBB); 9785 BB = loop2MBB; 9786 } 9787 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9788 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 9789 BuildMI(BB, dl, TII->get(PPC::BCC)) 9790 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9791 BB->addSuccessor(loopMBB); 9792 BB->addSuccessor(exitMBB); 9793 9794 // exitMBB: 9795 // ... 9796 BB = exitMBB; 9797 return BB; 9798 } 9799 9800 MachineBasicBlock * 9801 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 9802 MachineBasicBlock *BB, 9803 bool is8bit, // operation 9804 unsigned BinOpcode, 9805 unsigned CmpOpcode, 9806 unsigned CmpPred) const { 9807 // If we support part-word atomic mnemonics, just use them 9808 if (Subtarget.hasPartwordAtomics()) 9809 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, 9810 CmpOpcode, CmpPred); 9811 9812 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9813 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9814 // In 64 bit mode we have to use 64 bits for addresses, even though the 9815 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 9816 // registers without caring whether they're 32 or 64, but here we're 9817 // doing actual arithmetic on the addresses. 9818 bool is64bit = Subtarget.isPPC64(); 9819 bool isLittleEndian = Subtarget.isLittleEndian(); 9820 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9821 9822 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9823 MachineFunction *F = BB->getParent(); 9824 MachineFunction::iterator It = ++BB->getIterator(); 9825 9826 unsigned dest = MI.getOperand(0).getReg(); 9827 unsigned ptrA = MI.getOperand(1).getReg(); 9828 unsigned ptrB = MI.getOperand(2).getReg(); 9829 unsigned incr = MI.getOperand(3).getReg(); 9830 DebugLoc dl = MI.getDebugLoc(); 9831 9832 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9833 MachineBasicBlock *loop2MBB = 9834 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9835 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9836 F->insert(It, loopMBB); 9837 if (CmpOpcode) 9838 F->insert(It, loop2MBB); 9839 F->insert(It, exitMBB); 9840 exitMBB->splice(exitMBB->begin(), BB, 9841 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9842 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9843 9844 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9845 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9846 : &PPC::GPRCRegClass; 9847 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9848 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9849 unsigned ShiftReg = 9850 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9851 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 9852 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9853 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9854 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9855 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9856 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 9857 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9858 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9859 unsigned Ptr1Reg; 9860 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 9861 9862 // thisMBB: 9863 // ... 9864 // fallthrough --> loopMBB 9865 BB->addSuccessor(loopMBB); 9866 9867 // The 4-byte load must be aligned, while a char or short may be 9868 // anywhere in the word. Hence all this nasty bookkeeping code. 9869 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9870 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9871 // xori shift, shift1, 24 [16] 9872 // rlwinm ptr, ptr1, 0, 0, 29 9873 // slw incr2, incr, shift 9874 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9875 // slw mask, mask2, shift 9876 // loopMBB: 9877 // lwarx tmpDest, ptr 9878 // add tmp, tmpDest, incr2 9879 // andc tmp2, tmpDest, mask 9880 // and tmp3, tmp, mask 9881 // or tmp4, tmp3, tmp2 9882 // stwcx. tmp4, ptr 9883 // bne- loopMBB 9884 // fallthrough --> exitMBB 9885 // srw dest, tmpDest, shift 9886 if (ptrA != ZeroReg) { 9887 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9888 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9889 .addReg(ptrA).addReg(ptrB); 9890 } else { 9891 Ptr1Reg = ptrB; 9892 } 9893 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9894 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9895 if (!isLittleEndian) 9896 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9897 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9898 if (is64bit) 9899 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9900 .addReg(Ptr1Reg).addImm(0).addImm(61); 9901 else 9902 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9903 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9904 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 9905 .addReg(incr).addReg(ShiftReg); 9906 if (is8bit) 9907 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9908 else { 9909 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9910 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 9911 } 9912 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9913 .addReg(Mask2Reg).addReg(ShiftReg); 9914 9915 BB = loopMBB; 9916 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9917 .addReg(ZeroReg).addReg(PtrReg); 9918 if (BinOpcode) 9919 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 9920 .addReg(Incr2Reg).addReg(TmpDestReg); 9921 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 9922 .addReg(TmpDestReg).addReg(MaskReg); 9923 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 9924 .addReg(TmpReg).addReg(MaskReg); 9925 if (CmpOpcode) { 9926 // For unsigned comparisons, we can directly compare the shifted values. 9927 // For signed comparisons we shift and sign extend. 9928 unsigned SReg = RegInfo.createVirtualRegister(RC); 9929 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg) 9930 .addReg(TmpDestReg).addReg(MaskReg); 9931 unsigned ValueReg = SReg; 9932 unsigned CmpReg = Incr2Reg; 9933 if (CmpOpcode == PPC::CMPW) { 9934 ValueReg = RegInfo.createVirtualRegister(RC); 9935 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 9936 .addReg(SReg).addReg(ShiftReg); 9937 unsigned ValueSReg = RegInfo.createVirtualRegister(RC); 9938 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 9939 .addReg(ValueReg); 9940 ValueReg = ValueSReg; 9941 CmpReg = incr; 9942 } 9943 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9944 .addReg(CmpReg).addReg(ValueReg); 9945 BuildMI(BB, dl, TII->get(PPC::BCC)) 9946 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9947 BB->addSuccessor(loop2MBB); 9948 BB->addSuccessor(exitMBB); 9949 BB = loop2MBB; 9950 } 9951 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 9952 .addReg(Tmp3Reg).addReg(Tmp2Reg); 9953 BuildMI(BB, dl, TII->get(PPC::STWCX)) 9954 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 9955 BuildMI(BB, dl, TII->get(PPC::BCC)) 9956 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9957 BB->addSuccessor(loopMBB); 9958 BB->addSuccessor(exitMBB); 9959 9960 // exitMBB: 9961 // ... 9962 BB = exitMBB; 9963 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 9964 .addReg(ShiftReg); 9965 return BB; 9966 } 9967 9968 llvm::MachineBasicBlock * 9969 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 9970 MachineBasicBlock *MBB) const { 9971 DebugLoc DL = MI.getDebugLoc(); 9972 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9973 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 9974 9975 MachineFunction *MF = MBB->getParent(); 9976 MachineRegisterInfo &MRI = MF->getRegInfo(); 9977 9978 const BasicBlock *BB = MBB->getBasicBlock(); 9979 MachineFunction::iterator I = ++MBB->getIterator(); 9980 9981 unsigned DstReg = MI.getOperand(0).getReg(); 9982 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 9983 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 9984 unsigned mainDstReg = MRI.createVirtualRegister(RC); 9985 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 9986 9987 MVT PVT = getPointerTy(MF->getDataLayout()); 9988 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9989 "Invalid Pointer Size!"); 9990 // For v = setjmp(buf), we generate 9991 // 9992 // thisMBB: 9993 // SjLjSetup mainMBB 9994 // bl mainMBB 9995 // v_restore = 1 9996 // b sinkMBB 9997 // 9998 // mainMBB: 9999 // buf[LabelOffset] = LR 10000 // v_main = 0 10001 // 10002 // sinkMBB: 10003 // v = phi(main, restore) 10004 // 10005 10006 MachineBasicBlock *thisMBB = MBB; 10007 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 10008 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 10009 MF->insert(I, mainMBB); 10010 MF->insert(I, sinkMBB); 10011 10012 MachineInstrBuilder MIB; 10013 10014 // Transfer the remainder of BB and its successor edges to sinkMBB. 10015 sinkMBB->splice(sinkMBB->begin(), MBB, 10016 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 10017 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 10018 10019 // Note that the structure of the jmp_buf used here is not compatible 10020 // with that used by libc, and is not designed to be. Specifically, it 10021 // stores only those 'reserved' registers that LLVM does not otherwise 10022 // understand how to spill. Also, by convention, by the time this 10023 // intrinsic is called, Clang has already stored the frame address in the 10024 // first slot of the buffer and stack address in the third. Following the 10025 // X86 target code, we'll store the jump address in the second slot. We also 10026 // need to save the TOC pointer (R2) to handle jumps between shared 10027 // libraries, and that will be stored in the fourth slot. The thread 10028 // identifier (R13) is not affected. 10029 10030 // thisMBB: 10031 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10032 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10033 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10034 10035 // Prepare IP either in reg. 10036 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 10037 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 10038 unsigned BufReg = MI.getOperand(1).getReg(); 10039 10040 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 10041 setUsesTOCBasePtr(*MBB->getParent()); 10042 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 10043 .addReg(PPC::X2) 10044 .addImm(TOCOffset) 10045 .addReg(BufReg) 10046 .cloneMemRefs(MI); 10047 } 10048 10049 // Naked functions never have a base pointer, and so we use r1. For all 10050 // other functions, this decision must be delayed until during PEI. 10051 unsigned BaseReg; 10052 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 10053 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 10054 else 10055 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 10056 10057 MIB = BuildMI(*thisMBB, MI, DL, 10058 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 10059 .addReg(BaseReg) 10060 .addImm(BPOffset) 10061 .addReg(BufReg) 10062 .cloneMemRefs(MI); 10063 10064 // Setup 10065 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 10066 MIB.addRegMask(TRI->getNoPreservedMask()); 10067 10068 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 10069 10070 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 10071 .addMBB(mainMBB); 10072 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 10073 10074 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 10075 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 10076 10077 // mainMBB: 10078 // mainDstReg = 0 10079 MIB = 10080 BuildMI(mainMBB, DL, 10081 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 10082 10083 // Store IP 10084 if (Subtarget.isPPC64()) { 10085 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 10086 .addReg(LabelReg) 10087 .addImm(LabelOffset) 10088 .addReg(BufReg); 10089 } else { 10090 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 10091 .addReg(LabelReg) 10092 .addImm(LabelOffset) 10093 .addReg(BufReg); 10094 } 10095 MIB.cloneMemRefs(MI); 10096 10097 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 10098 mainMBB->addSuccessor(sinkMBB); 10099 10100 // sinkMBB: 10101 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 10102 TII->get(PPC::PHI), DstReg) 10103 .addReg(mainDstReg).addMBB(mainMBB) 10104 .addReg(restoreDstReg).addMBB(thisMBB); 10105 10106 MI.eraseFromParent(); 10107 return sinkMBB; 10108 } 10109 10110 MachineBasicBlock * 10111 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 10112 MachineBasicBlock *MBB) const { 10113 DebugLoc DL = MI.getDebugLoc(); 10114 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10115 10116 MachineFunction *MF = MBB->getParent(); 10117 MachineRegisterInfo &MRI = MF->getRegInfo(); 10118 10119 MVT PVT = getPointerTy(MF->getDataLayout()); 10120 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10121 "Invalid Pointer Size!"); 10122 10123 const TargetRegisterClass *RC = 10124 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10125 unsigned Tmp = MRI.createVirtualRegister(RC); 10126 // Since FP is only updated here but NOT referenced, it's treated as GPR. 10127 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 10128 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 10129 unsigned BP = 10130 (PVT == MVT::i64) 10131 ? PPC::X30 10132 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 10133 : PPC::R30); 10134 10135 MachineInstrBuilder MIB; 10136 10137 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10138 const int64_t SPOffset = 2 * PVT.getStoreSize(); 10139 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10140 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10141 10142 unsigned BufReg = MI.getOperand(0).getReg(); 10143 10144 // Reload FP (the jumped-to function may not have had a 10145 // frame pointer, and if so, then its r31 will be restored 10146 // as necessary). 10147 if (PVT == MVT::i64) { 10148 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 10149 .addImm(0) 10150 .addReg(BufReg); 10151 } else { 10152 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 10153 .addImm(0) 10154 .addReg(BufReg); 10155 } 10156 MIB.cloneMemRefs(MI); 10157 10158 // Reload IP 10159 if (PVT == MVT::i64) { 10160 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 10161 .addImm(LabelOffset) 10162 .addReg(BufReg); 10163 } else { 10164 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 10165 .addImm(LabelOffset) 10166 .addReg(BufReg); 10167 } 10168 MIB.cloneMemRefs(MI); 10169 10170 // Reload SP 10171 if (PVT == MVT::i64) { 10172 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 10173 .addImm(SPOffset) 10174 .addReg(BufReg); 10175 } else { 10176 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 10177 .addImm(SPOffset) 10178 .addReg(BufReg); 10179 } 10180 MIB.cloneMemRefs(MI); 10181 10182 // Reload BP 10183 if (PVT == MVT::i64) { 10184 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 10185 .addImm(BPOffset) 10186 .addReg(BufReg); 10187 } else { 10188 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 10189 .addImm(BPOffset) 10190 .addReg(BufReg); 10191 } 10192 MIB.cloneMemRefs(MI); 10193 10194 // Reload TOC 10195 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 10196 setUsesTOCBasePtr(*MBB->getParent()); 10197 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 10198 .addImm(TOCOffset) 10199 .addReg(BufReg) 10200 .cloneMemRefs(MI); 10201 } 10202 10203 // Jump 10204 BuildMI(*MBB, MI, DL, 10205 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 10206 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 10207 10208 MI.eraseFromParent(); 10209 return MBB; 10210 } 10211 10212 MachineBasicBlock * 10213 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 10214 MachineBasicBlock *BB) const { 10215 if (MI.getOpcode() == TargetOpcode::STACKMAP || 10216 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10217 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 10218 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10219 // Call lowering should have added an r2 operand to indicate a dependence 10220 // on the TOC base pointer value. It can't however, because there is no 10221 // way to mark the dependence as implicit there, and so the stackmap code 10222 // will confuse it with a regular operand. Instead, add the dependence 10223 // here. 10224 setUsesTOCBasePtr(*BB->getParent()); 10225 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 10226 } 10227 10228 return emitPatchPoint(MI, BB); 10229 } 10230 10231 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 10232 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 10233 return emitEHSjLjSetJmp(MI, BB); 10234 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 10235 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 10236 return emitEHSjLjLongJmp(MI, BB); 10237 } 10238 10239 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10240 10241 // To "insert" these instructions we actually have to insert their 10242 // control-flow patterns. 10243 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10244 MachineFunction::iterator It = ++BB->getIterator(); 10245 10246 MachineFunction *F = BB->getParent(); 10247 10248 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10249 MI.getOpcode() == PPC::SELECT_CC_I8 || 10250 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) { 10251 SmallVector<MachineOperand, 2> Cond; 10252 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10253 MI.getOpcode() == PPC::SELECT_CC_I8) 10254 Cond.push_back(MI.getOperand(4)); 10255 else 10256 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 10257 Cond.push_back(MI.getOperand(1)); 10258 10259 DebugLoc dl = MI.getDebugLoc(); 10260 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 10261 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 10262 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10263 MI.getOpcode() == PPC::SELECT_CC_I8 || 10264 MI.getOpcode() == PPC::SELECT_CC_F4 || 10265 MI.getOpcode() == PPC::SELECT_CC_F8 || 10266 MI.getOpcode() == PPC::SELECT_CC_F16 || 10267 MI.getOpcode() == PPC::SELECT_CC_QFRC || 10268 MI.getOpcode() == PPC::SELECT_CC_QSRC || 10269 MI.getOpcode() == PPC::SELECT_CC_QBRC || 10270 MI.getOpcode() == PPC::SELECT_CC_VRRC || 10271 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 10272 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 10273 MI.getOpcode() == PPC::SELECT_CC_VSRC || 10274 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 10275 MI.getOpcode() == PPC::SELECT_CC_SPE || 10276 MI.getOpcode() == PPC::SELECT_I4 || 10277 MI.getOpcode() == PPC::SELECT_I8 || 10278 MI.getOpcode() == PPC::SELECT_F4 || 10279 MI.getOpcode() == PPC::SELECT_F8 || 10280 MI.getOpcode() == PPC::SELECT_F16 || 10281 MI.getOpcode() == PPC::SELECT_QFRC || 10282 MI.getOpcode() == PPC::SELECT_QSRC || 10283 MI.getOpcode() == PPC::SELECT_QBRC || 10284 MI.getOpcode() == PPC::SELECT_SPE || 10285 MI.getOpcode() == PPC::SELECT_SPE4 || 10286 MI.getOpcode() == PPC::SELECT_VRRC || 10287 MI.getOpcode() == PPC::SELECT_VSFRC || 10288 MI.getOpcode() == PPC::SELECT_VSSRC || 10289 MI.getOpcode() == PPC::SELECT_VSRC) { 10290 // The incoming instruction knows the destination vreg to set, the 10291 // condition code register to branch on, the true/false values to 10292 // select between, and a branch opcode to use. 10293 10294 // thisMBB: 10295 // ... 10296 // TrueVal = ... 10297 // cmpTY ccX, r1, r2 10298 // bCC copy1MBB 10299 // fallthrough --> copy0MBB 10300 MachineBasicBlock *thisMBB = BB; 10301 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 10302 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10303 DebugLoc dl = MI.getDebugLoc(); 10304 F->insert(It, copy0MBB); 10305 F->insert(It, sinkMBB); 10306 10307 // Transfer the remainder of BB and its successor edges to sinkMBB. 10308 sinkMBB->splice(sinkMBB->begin(), BB, 10309 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10310 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10311 10312 // Next, add the true and fallthrough blocks as its successors. 10313 BB->addSuccessor(copy0MBB); 10314 BB->addSuccessor(sinkMBB); 10315 10316 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 10317 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 10318 MI.getOpcode() == PPC::SELECT_F16 || 10319 MI.getOpcode() == PPC::SELECT_SPE4 || 10320 MI.getOpcode() == PPC::SELECT_SPE || 10321 MI.getOpcode() == PPC::SELECT_QFRC || 10322 MI.getOpcode() == PPC::SELECT_QSRC || 10323 MI.getOpcode() == PPC::SELECT_QBRC || 10324 MI.getOpcode() == PPC::SELECT_VRRC || 10325 MI.getOpcode() == PPC::SELECT_VSFRC || 10326 MI.getOpcode() == PPC::SELECT_VSSRC || 10327 MI.getOpcode() == PPC::SELECT_VSRC) { 10328 BuildMI(BB, dl, TII->get(PPC::BC)) 10329 .addReg(MI.getOperand(1).getReg()) 10330 .addMBB(sinkMBB); 10331 } else { 10332 unsigned SelectPred = MI.getOperand(4).getImm(); 10333 BuildMI(BB, dl, TII->get(PPC::BCC)) 10334 .addImm(SelectPred) 10335 .addReg(MI.getOperand(1).getReg()) 10336 .addMBB(sinkMBB); 10337 } 10338 10339 // copy0MBB: 10340 // %FalseValue = ... 10341 // # fallthrough to sinkMBB 10342 BB = copy0MBB; 10343 10344 // Update machine-CFG edges 10345 BB->addSuccessor(sinkMBB); 10346 10347 // sinkMBB: 10348 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 10349 // ... 10350 BB = sinkMBB; 10351 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 10352 .addReg(MI.getOperand(3).getReg()) 10353 .addMBB(copy0MBB) 10354 .addReg(MI.getOperand(2).getReg()) 10355 .addMBB(thisMBB); 10356 } else if (MI.getOpcode() == PPC::ReadTB) { 10357 // To read the 64-bit time-base register on a 32-bit target, we read the 10358 // two halves. Should the counter have wrapped while it was being read, we 10359 // need to try again. 10360 // ... 10361 // readLoop: 10362 // mfspr Rx,TBU # load from TBU 10363 // mfspr Ry,TB # load from TB 10364 // mfspr Rz,TBU # load from TBU 10365 // cmpw crX,Rx,Rz # check if 'old'='new' 10366 // bne readLoop # branch if they're not equal 10367 // ... 10368 10369 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 10370 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10371 DebugLoc dl = MI.getDebugLoc(); 10372 F->insert(It, readMBB); 10373 F->insert(It, sinkMBB); 10374 10375 // Transfer the remainder of BB and its successor edges to sinkMBB. 10376 sinkMBB->splice(sinkMBB->begin(), BB, 10377 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10378 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10379 10380 BB->addSuccessor(readMBB); 10381 BB = readMBB; 10382 10383 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10384 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10385 unsigned LoReg = MI.getOperand(0).getReg(); 10386 unsigned HiReg = MI.getOperand(1).getReg(); 10387 10388 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 10389 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 10390 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 10391 10392 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10393 10394 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 10395 .addReg(HiReg).addReg(ReadAgainReg); 10396 BuildMI(BB, dl, TII->get(PPC::BCC)) 10397 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 10398 10399 BB->addSuccessor(readMBB); 10400 BB->addSuccessor(sinkMBB); 10401 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 10402 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 10403 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 10404 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 10405 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 10406 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 10407 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 10408 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 10409 10410 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 10411 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 10412 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 10413 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 10414 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 10415 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 10416 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 10417 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 10418 10419 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 10420 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 10421 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 10422 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 10423 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 10424 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 10425 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 10426 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 10427 10428 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 10429 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 10430 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 10431 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 10432 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 10433 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 10434 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 10435 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 10436 10437 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 10438 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 10439 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 10440 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 10441 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 10442 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 10443 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 10444 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 10445 10446 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 10447 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 10448 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 10449 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 10450 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 10451 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 10452 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 10453 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 10454 10455 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 10456 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 10457 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 10458 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 10459 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 10460 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 10461 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 10462 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 10463 10464 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 10465 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 10466 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 10467 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 10468 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 10469 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 10470 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 10471 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 10472 10473 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 10474 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 10475 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 10476 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 10477 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 10478 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 10479 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 10480 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 10481 10482 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 10483 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 10484 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 10485 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 10486 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 10487 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 10488 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 10489 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 10490 10491 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 10492 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 10493 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 10494 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 10495 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 10496 BB = EmitAtomicBinary(MI, BB, 4, 0); 10497 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 10498 BB = EmitAtomicBinary(MI, BB, 8, 0); 10499 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 10500 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 10501 (Subtarget.hasPartwordAtomics() && 10502 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 10503 (Subtarget.hasPartwordAtomics() && 10504 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 10505 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 10506 10507 auto LoadMnemonic = PPC::LDARX; 10508 auto StoreMnemonic = PPC::STDCX; 10509 switch (MI.getOpcode()) { 10510 default: 10511 llvm_unreachable("Compare and swap of unknown size"); 10512 case PPC::ATOMIC_CMP_SWAP_I8: 10513 LoadMnemonic = PPC::LBARX; 10514 StoreMnemonic = PPC::STBCX; 10515 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10516 break; 10517 case PPC::ATOMIC_CMP_SWAP_I16: 10518 LoadMnemonic = PPC::LHARX; 10519 StoreMnemonic = PPC::STHCX; 10520 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10521 break; 10522 case PPC::ATOMIC_CMP_SWAP_I32: 10523 LoadMnemonic = PPC::LWARX; 10524 StoreMnemonic = PPC::STWCX; 10525 break; 10526 case PPC::ATOMIC_CMP_SWAP_I64: 10527 LoadMnemonic = PPC::LDARX; 10528 StoreMnemonic = PPC::STDCX; 10529 break; 10530 } 10531 unsigned dest = MI.getOperand(0).getReg(); 10532 unsigned ptrA = MI.getOperand(1).getReg(); 10533 unsigned ptrB = MI.getOperand(2).getReg(); 10534 unsigned oldval = MI.getOperand(3).getReg(); 10535 unsigned newval = MI.getOperand(4).getReg(); 10536 DebugLoc dl = MI.getDebugLoc(); 10537 10538 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10539 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10540 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10541 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10542 F->insert(It, loop1MBB); 10543 F->insert(It, loop2MBB); 10544 F->insert(It, midMBB); 10545 F->insert(It, exitMBB); 10546 exitMBB->splice(exitMBB->begin(), BB, 10547 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10548 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10549 10550 // thisMBB: 10551 // ... 10552 // fallthrough --> loopMBB 10553 BB->addSuccessor(loop1MBB); 10554 10555 // loop1MBB: 10556 // l[bhwd]arx dest, ptr 10557 // cmp[wd] dest, oldval 10558 // bne- midMBB 10559 // loop2MBB: 10560 // st[bhwd]cx. newval, ptr 10561 // bne- loopMBB 10562 // b exitBB 10563 // midMBB: 10564 // st[bhwd]cx. dest, ptr 10565 // exitBB: 10566 BB = loop1MBB; 10567 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10568 .addReg(ptrA).addReg(ptrB); 10569 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 10570 .addReg(oldval).addReg(dest); 10571 BuildMI(BB, dl, TII->get(PPC::BCC)) 10572 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10573 BB->addSuccessor(loop2MBB); 10574 BB->addSuccessor(midMBB); 10575 10576 BB = loop2MBB; 10577 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10578 .addReg(newval).addReg(ptrA).addReg(ptrB); 10579 BuildMI(BB, dl, TII->get(PPC::BCC)) 10580 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10581 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10582 BB->addSuccessor(loop1MBB); 10583 BB->addSuccessor(exitMBB); 10584 10585 BB = midMBB; 10586 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10587 .addReg(dest).addReg(ptrA).addReg(ptrB); 10588 BB->addSuccessor(exitMBB); 10589 10590 // exitMBB: 10591 // ... 10592 BB = exitMBB; 10593 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 10594 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 10595 // We must use 64-bit registers for addresses when targeting 64-bit, 10596 // since we're actually doing arithmetic on them. Other registers 10597 // can be 32-bit. 10598 bool is64bit = Subtarget.isPPC64(); 10599 bool isLittleEndian = Subtarget.isLittleEndian(); 10600 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 10601 10602 unsigned dest = MI.getOperand(0).getReg(); 10603 unsigned ptrA = MI.getOperand(1).getReg(); 10604 unsigned ptrB = MI.getOperand(2).getReg(); 10605 unsigned oldval = MI.getOperand(3).getReg(); 10606 unsigned newval = MI.getOperand(4).getReg(); 10607 DebugLoc dl = MI.getDebugLoc(); 10608 10609 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10610 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10611 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10612 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10613 F->insert(It, loop1MBB); 10614 F->insert(It, loop2MBB); 10615 F->insert(It, midMBB); 10616 F->insert(It, exitMBB); 10617 exitMBB->splice(exitMBB->begin(), BB, 10618 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10619 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10620 10621 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10622 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 10623 : &PPC::GPRCRegClass; 10624 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 10625 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 10626 unsigned ShiftReg = 10627 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 10628 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 10629 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 10630 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 10631 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 10632 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 10633 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 10634 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 10635 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 10636 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 10637 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 10638 unsigned Ptr1Reg; 10639 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 10640 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10641 // thisMBB: 10642 // ... 10643 // fallthrough --> loopMBB 10644 BB->addSuccessor(loop1MBB); 10645 10646 // The 4-byte load must be aligned, while a char or short may be 10647 // anywhere in the word. Hence all this nasty bookkeeping code. 10648 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10649 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10650 // xori shift, shift1, 24 [16] 10651 // rlwinm ptr, ptr1, 0, 0, 29 10652 // slw newval2, newval, shift 10653 // slw oldval2, oldval,shift 10654 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10655 // slw mask, mask2, shift 10656 // and newval3, newval2, mask 10657 // and oldval3, oldval2, mask 10658 // loop1MBB: 10659 // lwarx tmpDest, ptr 10660 // and tmp, tmpDest, mask 10661 // cmpw tmp, oldval3 10662 // bne- midMBB 10663 // loop2MBB: 10664 // andc tmp2, tmpDest, mask 10665 // or tmp4, tmp2, newval3 10666 // stwcx. tmp4, ptr 10667 // bne- loop1MBB 10668 // b exitBB 10669 // midMBB: 10670 // stwcx. tmpDest, ptr 10671 // exitBB: 10672 // srw dest, tmpDest, shift 10673 if (ptrA != ZeroReg) { 10674 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10675 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10676 .addReg(ptrA).addReg(ptrB); 10677 } else { 10678 Ptr1Reg = ptrB; 10679 } 10680 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 10681 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 10682 if (!isLittleEndian) 10683 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 10684 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 10685 if (is64bit) 10686 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10687 .addReg(Ptr1Reg).addImm(0).addImm(61); 10688 else 10689 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10690 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 10691 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 10692 .addReg(newval).addReg(ShiftReg); 10693 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 10694 .addReg(oldval).addReg(ShiftReg); 10695 if (is8bit) 10696 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10697 else { 10698 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10699 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10700 .addReg(Mask3Reg).addImm(65535); 10701 } 10702 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10703 .addReg(Mask2Reg).addReg(ShiftReg); 10704 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 10705 .addReg(NewVal2Reg).addReg(MaskReg); 10706 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 10707 .addReg(OldVal2Reg).addReg(MaskReg); 10708 10709 BB = loop1MBB; 10710 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10711 .addReg(ZeroReg).addReg(PtrReg); 10712 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 10713 .addReg(TmpDestReg).addReg(MaskReg); 10714 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 10715 .addReg(TmpReg).addReg(OldVal3Reg); 10716 BuildMI(BB, dl, TII->get(PPC::BCC)) 10717 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10718 BB->addSuccessor(loop2MBB); 10719 BB->addSuccessor(midMBB); 10720 10721 BB = loop2MBB; 10722 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 10723 .addReg(TmpDestReg).addReg(MaskReg); 10724 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 10725 .addReg(Tmp2Reg).addReg(NewVal3Reg); 10726 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 10727 .addReg(ZeroReg).addReg(PtrReg); 10728 BuildMI(BB, dl, TII->get(PPC::BCC)) 10729 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10730 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10731 BB->addSuccessor(loop1MBB); 10732 BB->addSuccessor(exitMBB); 10733 10734 BB = midMBB; 10735 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 10736 .addReg(ZeroReg).addReg(PtrReg); 10737 BB->addSuccessor(exitMBB); 10738 10739 // exitMBB: 10740 // ... 10741 BB = exitMBB; 10742 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 10743 .addReg(ShiftReg); 10744 } else if (MI.getOpcode() == PPC::FADDrtz) { 10745 // This pseudo performs an FADD with rounding mode temporarily forced 10746 // to round-to-zero. We emit this via custom inserter since the FPSCR 10747 // is not modeled at the SelectionDAG level. 10748 unsigned Dest = MI.getOperand(0).getReg(); 10749 unsigned Src1 = MI.getOperand(1).getReg(); 10750 unsigned Src2 = MI.getOperand(2).getReg(); 10751 DebugLoc dl = MI.getDebugLoc(); 10752 10753 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10754 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 10755 10756 // Save FPSCR value. 10757 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 10758 10759 // Set rounding mode to round-to-zero. 10760 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 10761 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 10762 10763 // Perform addition. 10764 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 10765 10766 // Restore FPSCR value. 10767 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 10768 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10769 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 10770 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10771 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 10772 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10773 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 10774 ? PPC::ANDIo8 10775 : PPC::ANDIo; 10776 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10777 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 10778 10779 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10780 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 10781 &PPC::GPRCRegClass : 10782 &PPC::G8RCRegClass); 10783 10784 DebugLoc dl = MI.getDebugLoc(); 10785 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 10786 .addReg(MI.getOperand(1).getReg()) 10787 .addImm(1); 10788 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 10789 MI.getOperand(0).getReg()) 10790 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 10791 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 10792 DebugLoc Dl = MI.getDebugLoc(); 10793 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10794 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10795 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 10796 return BB; 10797 } else { 10798 llvm_unreachable("Unexpected instr type to insert"); 10799 } 10800 10801 MI.eraseFromParent(); // The pseudo instruction is gone now. 10802 return BB; 10803 } 10804 10805 //===----------------------------------------------------------------------===// 10806 // Target Optimization Hooks 10807 //===----------------------------------------------------------------------===// 10808 10809 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 10810 // For the estimates, convergence is quadratic, so we essentially double the 10811 // number of digits correct after every iteration. For both FRE and FRSQRTE, 10812 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 10813 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 10814 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 10815 if (VT.getScalarType() == MVT::f64) 10816 RefinementSteps++; 10817 return RefinementSteps; 10818 } 10819 10820 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 10821 int Enabled, int &RefinementSteps, 10822 bool &UseOneConstNR, 10823 bool Reciprocal) const { 10824 EVT VT = Operand.getValueType(); 10825 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 10826 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 10827 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10828 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10829 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10830 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10831 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10832 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10833 10834 UseOneConstNR = true; 10835 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 10836 } 10837 return SDValue(); 10838 } 10839 10840 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 10841 int Enabled, 10842 int &RefinementSteps) const { 10843 EVT VT = Operand.getValueType(); 10844 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 10845 (VT == MVT::f64 && Subtarget.hasFRE()) || 10846 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10847 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10848 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10849 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10850 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10851 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10852 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 10853 } 10854 return SDValue(); 10855 } 10856 10857 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 10858 // Note: This functionality is used only when unsafe-fp-math is enabled, and 10859 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 10860 // enabled for division), this functionality is redundant with the default 10861 // combiner logic (once the division -> reciprocal/multiply transformation 10862 // has taken place). As a result, this matters more for older cores than for 10863 // newer ones. 10864 10865 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 10866 // reciprocal if there are two or more FDIVs (for embedded cores with only 10867 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 10868 switch (Subtarget.getDarwinDirective()) { 10869 default: 10870 return 3; 10871 case PPC::DIR_440: 10872 case PPC::DIR_A2: 10873 case PPC::DIR_E500: 10874 case PPC::DIR_E500mc: 10875 case PPC::DIR_E5500: 10876 return 2; 10877 } 10878 } 10879 10880 // isConsecutiveLSLoc needs to work even if all adds have not yet been 10881 // collapsed, and so we need to look through chains of them. 10882 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 10883 int64_t& Offset, SelectionDAG &DAG) { 10884 if (DAG.isBaseWithConstantOffset(Loc)) { 10885 Base = Loc.getOperand(0); 10886 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 10887 10888 // The base might itself be a base plus an offset, and if so, accumulate 10889 // that as well. 10890 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 10891 } 10892 } 10893 10894 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 10895 unsigned Bytes, int Dist, 10896 SelectionDAG &DAG) { 10897 if (VT.getSizeInBits() / 8 != Bytes) 10898 return false; 10899 10900 SDValue BaseLoc = Base->getBasePtr(); 10901 if (Loc.getOpcode() == ISD::FrameIndex) { 10902 if (BaseLoc.getOpcode() != ISD::FrameIndex) 10903 return false; 10904 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10905 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 10906 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 10907 int FS = MFI.getObjectSize(FI); 10908 int BFS = MFI.getObjectSize(BFI); 10909 if (FS != BFS || FS != (int)Bytes) return false; 10910 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 10911 } 10912 10913 SDValue Base1 = Loc, Base2 = BaseLoc; 10914 int64_t Offset1 = 0, Offset2 = 0; 10915 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 10916 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 10917 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 10918 return true; 10919 10920 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10921 const GlobalValue *GV1 = nullptr; 10922 const GlobalValue *GV2 = nullptr; 10923 Offset1 = 0; 10924 Offset2 = 0; 10925 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 10926 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 10927 if (isGA1 && isGA2 && GV1 == GV2) 10928 return Offset1 == (Offset2 + Dist*Bytes); 10929 return false; 10930 } 10931 10932 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 10933 // not enforce equality of the chain operands. 10934 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 10935 unsigned Bytes, int Dist, 10936 SelectionDAG &DAG) { 10937 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 10938 EVT VT = LS->getMemoryVT(); 10939 SDValue Loc = LS->getBasePtr(); 10940 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 10941 } 10942 10943 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 10944 EVT VT; 10945 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10946 default: return false; 10947 case Intrinsic::ppc_qpx_qvlfd: 10948 case Intrinsic::ppc_qpx_qvlfda: 10949 VT = MVT::v4f64; 10950 break; 10951 case Intrinsic::ppc_qpx_qvlfs: 10952 case Intrinsic::ppc_qpx_qvlfsa: 10953 VT = MVT::v4f32; 10954 break; 10955 case Intrinsic::ppc_qpx_qvlfcd: 10956 case Intrinsic::ppc_qpx_qvlfcda: 10957 VT = MVT::v2f64; 10958 break; 10959 case Intrinsic::ppc_qpx_qvlfcs: 10960 case Intrinsic::ppc_qpx_qvlfcsa: 10961 VT = MVT::v2f32; 10962 break; 10963 case Intrinsic::ppc_qpx_qvlfiwa: 10964 case Intrinsic::ppc_qpx_qvlfiwz: 10965 case Intrinsic::ppc_altivec_lvx: 10966 case Intrinsic::ppc_altivec_lvxl: 10967 case Intrinsic::ppc_vsx_lxvw4x: 10968 case Intrinsic::ppc_vsx_lxvw4x_be: 10969 VT = MVT::v4i32; 10970 break; 10971 case Intrinsic::ppc_vsx_lxvd2x: 10972 case Intrinsic::ppc_vsx_lxvd2x_be: 10973 VT = MVT::v2f64; 10974 break; 10975 case Intrinsic::ppc_altivec_lvebx: 10976 VT = MVT::i8; 10977 break; 10978 case Intrinsic::ppc_altivec_lvehx: 10979 VT = MVT::i16; 10980 break; 10981 case Intrinsic::ppc_altivec_lvewx: 10982 VT = MVT::i32; 10983 break; 10984 } 10985 10986 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 10987 } 10988 10989 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 10990 EVT VT; 10991 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10992 default: return false; 10993 case Intrinsic::ppc_qpx_qvstfd: 10994 case Intrinsic::ppc_qpx_qvstfda: 10995 VT = MVT::v4f64; 10996 break; 10997 case Intrinsic::ppc_qpx_qvstfs: 10998 case Intrinsic::ppc_qpx_qvstfsa: 10999 VT = MVT::v4f32; 11000 break; 11001 case Intrinsic::ppc_qpx_qvstfcd: 11002 case Intrinsic::ppc_qpx_qvstfcda: 11003 VT = MVT::v2f64; 11004 break; 11005 case Intrinsic::ppc_qpx_qvstfcs: 11006 case Intrinsic::ppc_qpx_qvstfcsa: 11007 VT = MVT::v2f32; 11008 break; 11009 case Intrinsic::ppc_qpx_qvstfiw: 11010 case Intrinsic::ppc_qpx_qvstfiwa: 11011 case Intrinsic::ppc_altivec_stvx: 11012 case Intrinsic::ppc_altivec_stvxl: 11013 case Intrinsic::ppc_vsx_stxvw4x: 11014 VT = MVT::v4i32; 11015 break; 11016 case Intrinsic::ppc_vsx_stxvd2x: 11017 VT = MVT::v2f64; 11018 break; 11019 case Intrinsic::ppc_vsx_stxvw4x_be: 11020 VT = MVT::v4i32; 11021 break; 11022 case Intrinsic::ppc_vsx_stxvd2x_be: 11023 VT = MVT::v2f64; 11024 break; 11025 case Intrinsic::ppc_altivec_stvebx: 11026 VT = MVT::i8; 11027 break; 11028 case Intrinsic::ppc_altivec_stvehx: 11029 VT = MVT::i16; 11030 break; 11031 case Intrinsic::ppc_altivec_stvewx: 11032 VT = MVT::i32; 11033 break; 11034 } 11035 11036 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 11037 } 11038 11039 return false; 11040 } 11041 11042 // Return true is there is a nearyby consecutive load to the one provided 11043 // (regardless of alignment). We search up and down the chain, looking though 11044 // token factors and other loads (but nothing else). As a result, a true result 11045 // indicates that it is safe to create a new consecutive load adjacent to the 11046 // load provided. 11047 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 11048 SDValue Chain = LD->getChain(); 11049 EVT VT = LD->getMemoryVT(); 11050 11051 SmallSet<SDNode *, 16> LoadRoots; 11052 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 11053 SmallSet<SDNode *, 16> Visited; 11054 11055 // First, search up the chain, branching to follow all token-factor operands. 11056 // If we find a consecutive load, then we're done, otherwise, record all 11057 // nodes just above the top-level loads and token factors. 11058 while (!Queue.empty()) { 11059 SDNode *ChainNext = Queue.pop_back_val(); 11060 if (!Visited.insert(ChainNext).second) 11061 continue; 11062 11063 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 11064 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 11065 return true; 11066 11067 if (!Visited.count(ChainLD->getChain().getNode())) 11068 Queue.push_back(ChainLD->getChain().getNode()); 11069 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 11070 for (const SDUse &O : ChainNext->ops()) 11071 if (!Visited.count(O.getNode())) 11072 Queue.push_back(O.getNode()); 11073 } else 11074 LoadRoots.insert(ChainNext); 11075 } 11076 11077 // Second, search down the chain, starting from the top-level nodes recorded 11078 // in the first phase. These top-level nodes are the nodes just above all 11079 // loads and token factors. Starting with their uses, recursively look though 11080 // all loads (just the chain uses) and token factors to find a consecutive 11081 // load. 11082 Visited.clear(); 11083 Queue.clear(); 11084 11085 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 11086 IE = LoadRoots.end(); I != IE; ++I) { 11087 Queue.push_back(*I); 11088 11089 while (!Queue.empty()) { 11090 SDNode *LoadRoot = Queue.pop_back_val(); 11091 if (!Visited.insert(LoadRoot).second) 11092 continue; 11093 11094 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 11095 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 11096 return true; 11097 11098 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 11099 UE = LoadRoot->use_end(); UI != UE; ++UI) 11100 if (((isa<MemSDNode>(*UI) && 11101 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 11102 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 11103 Queue.push_back(*UI); 11104 } 11105 } 11106 11107 return false; 11108 } 11109 11110 /// This function is called when we have proved that a SETCC node can be replaced 11111 /// by subtraction (and other supporting instructions) so that the result of 11112 /// comparison is kept in a GPR instead of CR. This function is purely for 11113 /// codegen purposes and has some flags to guide the codegen process. 11114 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 11115 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 11116 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 11117 11118 // Zero extend the operands to the largest legal integer. Originally, they 11119 // must be of a strictly smaller size. 11120 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 11121 DAG.getConstant(Size, DL, MVT::i32)); 11122 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 11123 DAG.getConstant(Size, DL, MVT::i32)); 11124 11125 // Swap if needed. Depends on the condition code. 11126 if (Swap) 11127 std::swap(Op0, Op1); 11128 11129 // Subtract extended integers. 11130 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 11131 11132 // Move the sign bit to the least significant position and zero out the rest. 11133 // Now the least significant bit carries the result of original comparison. 11134 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 11135 DAG.getConstant(Size - 1, DL, MVT::i32)); 11136 auto Final = Shifted; 11137 11138 // Complement the result if needed. Based on the condition code. 11139 if (Complement) 11140 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 11141 DAG.getConstant(1, DL, MVT::i64)); 11142 11143 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 11144 } 11145 11146 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 11147 DAGCombinerInfo &DCI) const { 11148 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 11149 11150 SelectionDAG &DAG = DCI.DAG; 11151 SDLoc DL(N); 11152 11153 // Size of integers being compared has a critical role in the following 11154 // analysis, so we prefer to do this when all types are legal. 11155 if (!DCI.isAfterLegalizeDAG()) 11156 return SDValue(); 11157 11158 // If all users of SETCC extend its value to a legal integer type 11159 // then we replace SETCC with a subtraction 11160 for (SDNode::use_iterator UI = N->use_begin(), 11161 UE = N->use_end(); UI != UE; ++UI) { 11162 if (UI->getOpcode() != ISD::ZERO_EXTEND) 11163 return SDValue(); 11164 } 11165 11166 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 11167 auto OpSize = N->getOperand(0).getValueSizeInBits(); 11168 11169 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 11170 11171 if (OpSize < Size) { 11172 switch (CC) { 11173 default: break; 11174 case ISD::SETULT: 11175 return generateEquivalentSub(N, Size, false, false, DL, DAG); 11176 case ISD::SETULE: 11177 return generateEquivalentSub(N, Size, true, true, DL, DAG); 11178 case ISD::SETUGT: 11179 return generateEquivalentSub(N, Size, false, true, DL, DAG); 11180 case ISD::SETUGE: 11181 return generateEquivalentSub(N, Size, true, false, DL, DAG); 11182 } 11183 } 11184 11185 return SDValue(); 11186 } 11187 11188 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 11189 DAGCombinerInfo &DCI) const { 11190 SelectionDAG &DAG = DCI.DAG; 11191 SDLoc dl(N); 11192 11193 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 11194 // If we're tracking CR bits, we need to be careful that we don't have: 11195 // trunc(binary-ops(zext(x), zext(y))) 11196 // or 11197 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 11198 // such that we're unnecessarily moving things into GPRs when it would be 11199 // better to keep them in CR bits. 11200 11201 // Note that trunc here can be an actual i1 trunc, or can be the effective 11202 // truncation that comes from a setcc or select_cc. 11203 if (N->getOpcode() == ISD::TRUNCATE && 11204 N->getValueType(0) != MVT::i1) 11205 return SDValue(); 11206 11207 if (N->getOperand(0).getValueType() != MVT::i32 && 11208 N->getOperand(0).getValueType() != MVT::i64) 11209 return SDValue(); 11210 11211 if (N->getOpcode() == ISD::SETCC || 11212 N->getOpcode() == ISD::SELECT_CC) { 11213 // If we're looking at a comparison, then we need to make sure that the 11214 // high bits (all except for the first) don't matter the result. 11215 ISD::CondCode CC = 11216 cast<CondCodeSDNode>(N->getOperand( 11217 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 11218 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 11219 11220 if (ISD::isSignedIntSetCC(CC)) { 11221 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 11222 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 11223 return SDValue(); 11224 } else if (ISD::isUnsignedIntSetCC(CC)) { 11225 if (!DAG.MaskedValueIsZero(N->getOperand(0), 11226 APInt::getHighBitsSet(OpBits, OpBits-1)) || 11227 !DAG.MaskedValueIsZero(N->getOperand(1), 11228 APInt::getHighBitsSet(OpBits, OpBits-1))) 11229 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 11230 : SDValue()); 11231 } else { 11232 // This is neither a signed nor an unsigned comparison, just make sure 11233 // that the high bits are equal. 11234 KnownBits Op1Known, Op2Known; 11235 DAG.computeKnownBits(N->getOperand(0), Op1Known); 11236 DAG.computeKnownBits(N->getOperand(1), Op2Known); 11237 11238 // We don't really care about what is known about the first bit (if 11239 // anything), so clear it in all masks prior to comparing them. 11240 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 11241 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 11242 11243 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 11244 return SDValue(); 11245 } 11246 } 11247 11248 // We now know that the higher-order bits are irrelevant, we just need to 11249 // make sure that all of the intermediate operations are bit operations, and 11250 // all inputs are extensions. 11251 if (N->getOperand(0).getOpcode() != ISD::AND && 11252 N->getOperand(0).getOpcode() != ISD::OR && 11253 N->getOperand(0).getOpcode() != ISD::XOR && 11254 N->getOperand(0).getOpcode() != ISD::SELECT && 11255 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 11256 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 11257 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 11258 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 11259 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 11260 return SDValue(); 11261 11262 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 11263 N->getOperand(1).getOpcode() != ISD::AND && 11264 N->getOperand(1).getOpcode() != ISD::OR && 11265 N->getOperand(1).getOpcode() != ISD::XOR && 11266 N->getOperand(1).getOpcode() != ISD::SELECT && 11267 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 11268 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 11269 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 11270 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 11271 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 11272 return SDValue(); 11273 11274 SmallVector<SDValue, 4> Inputs; 11275 SmallVector<SDValue, 8> BinOps, PromOps; 11276 SmallPtrSet<SDNode *, 16> Visited; 11277 11278 for (unsigned i = 0; i < 2; ++i) { 11279 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11280 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11281 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11282 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11283 isa<ConstantSDNode>(N->getOperand(i))) 11284 Inputs.push_back(N->getOperand(i)); 11285 else 11286 BinOps.push_back(N->getOperand(i)); 11287 11288 if (N->getOpcode() == ISD::TRUNCATE) 11289 break; 11290 } 11291 11292 // Visit all inputs, collect all binary operations (and, or, xor and 11293 // select) that are all fed by extensions. 11294 while (!BinOps.empty()) { 11295 SDValue BinOp = BinOps.back(); 11296 BinOps.pop_back(); 11297 11298 if (!Visited.insert(BinOp.getNode()).second) 11299 continue; 11300 11301 PromOps.push_back(BinOp); 11302 11303 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11304 // The condition of the select is not promoted. 11305 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11306 continue; 11307 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11308 continue; 11309 11310 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11311 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11312 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11313 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11314 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11315 Inputs.push_back(BinOp.getOperand(i)); 11316 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11317 BinOp.getOperand(i).getOpcode() == ISD::OR || 11318 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11319 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11320 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 11321 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11322 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11323 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11324 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 11325 BinOps.push_back(BinOp.getOperand(i)); 11326 } else { 11327 // We have an input that is not an extension or another binary 11328 // operation; we'll abort this transformation. 11329 return SDValue(); 11330 } 11331 } 11332 } 11333 11334 // Make sure that this is a self-contained cluster of operations (which 11335 // is not quite the same thing as saying that everything has only one 11336 // use). 11337 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11338 if (isa<ConstantSDNode>(Inputs[i])) 11339 continue; 11340 11341 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11342 UE = Inputs[i].getNode()->use_end(); 11343 UI != UE; ++UI) { 11344 SDNode *User = *UI; 11345 if (User != N && !Visited.count(User)) 11346 return SDValue(); 11347 11348 // Make sure that we're not going to promote the non-output-value 11349 // operand(s) or SELECT or SELECT_CC. 11350 // FIXME: Although we could sometimes handle this, and it does occur in 11351 // practice that one of the condition inputs to the select is also one of 11352 // the outputs, we currently can't deal with this. 11353 if (User->getOpcode() == ISD::SELECT) { 11354 if (User->getOperand(0) == Inputs[i]) 11355 return SDValue(); 11356 } else if (User->getOpcode() == ISD::SELECT_CC) { 11357 if (User->getOperand(0) == Inputs[i] || 11358 User->getOperand(1) == Inputs[i]) 11359 return SDValue(); 11360 } 11361 } 11362 } 11363 11364 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11365 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11366 UE = PromOps[i].getNode()->use_end(); 11367 UI != UE; ++UI) { 11368 SDNode *User = *UI; 11369 if (User != N && !Visited.count(User)) 11370 return SDValue(); 11371 11372 // Make sure that we're not going to promote the non-output-value 11373 // operand(s) or SELECT or SELECT_CC. 11374 // FIXME: Although we could sometimes handle this, and it does occur in 11375 // practice that one of the condition inputs to the select is also one of 11376 // the outputs, we currently can't deal with this. 11377 if (User->getOpcode() == ISD::SELECT) { 11378 if (User->getOperand(0) == PromOps[i]) 11379 return SDValue(); 11380 } else if (User->getOpcode() == ISD::SELECT_CC) { 11381 if (User->getOperand(0) == PromOps[i] || 11382 User->getOperand(1) == PromOps[i]) 11383 return SDValue(); 11384 } 11385 } 11386 } 11387 11388 // Replace all inputs with the extension operand. 11389 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11390 // Constants may have users outside the cluster of to-be-promoted nodes, 11391 // and so we need to replace those as we do the promotions. 11392 if (isa<ConstantSDNode>(Inputs[i])) 11393 continue; 11394 else 11395 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 11396 } 11397 11398 std::list<HandleSDNode> PromOpHandles; 11399 for (auto &PromOp : PromOps) 11400 PromOpHandles.emplace_back(PromOp); 11401 11402 // Replace all operations (these are all the same, but have a different 11403 // (i1) return type). DAG.getNode will validate that the types of 11404 // a binary operator match, so go through the list in reverse so that 11405 // we've likely promoted both operands first. Any intermediate truncations or 11406 // extensions disappear. 11407 while (!PromOpHandles.empty()) { 11408 SDValue PromOp = PromOpHandles.back().getValue(); 11409 PromOpHandles.pop_back(); 11410 11411 if (PromOp.getOpcode() == ISD::TRUNCATE || 11412 PromOp.getOpcode() == ISD::SIGN_EXTEND || 11413 PromOp.getOpcode() == ISD::ZERO_EXTEND || 11414 PromOp.getOpcode() == ISD::ANY_EXTEND) { 11415 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 11416 PromOp.getOperand(0).getValueType() != MVT::i1) { 11417 // The operand is not yet ready (see comment below). 11418 PromOpHandles.emplace_front(PromOp); 11419 continue; 11420 } 11421 11422 SDValue RepValue = PromOp.getOperand(0); 11423 if (isa<ConstantSDNode>(RepValue)) 11424 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 11425 11426 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 11427 continue; 11428 } 11429 11430 unsigned C; 11431 switch (PromOp.getOpcode()) { 11432 default: C = 0; break; 11433 case ISD::SELECT: C = 1; break; 11434 case ISD::SELECT_CC: C = 2; break; 11435 } 11436 11437 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11438 PromOp.getOperand(C).getValueType() != MVT::i1) || 11439 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11440 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 11441 // The to-be-promoted operands of this node have not yet been 11442 // promoted (this should be rare because we're going through the 11443 // list backward, but if one of the operands has several users in 11444 // this cluster of to-be-promoted nodes, it is possible). 11445 PromOpHandles.emplace_front(PromOp); 11446 continue; 11447 } 11448 11449 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11450 PromOp.getNode()->op_end()); 11451 11452 // If there are any constant inputs, make sure they're replaced now. 11453 for (unsigned i = 0; i < 2; ++i) 11454 if (isa<ConstantSDNode>(Ops[C+i])) 11455 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 11456 11457 DAG.ReplaceAllUsesOfValueWith(PromOp, 11458 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 11459 } 11460 11461 // Now we're left with the initial truncation itself. 11462 if (N->getOpcode() == ISD::TRUNCATE) 11463 return N->getOperand(0); 11464 11465 // Otherwise, this is a comparison. The operands to be compared have just 11466 // changed type (to i1), but everything else is the same. 11467 return SDValue(N, 0); 11468 } 11469 11470 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 11471 DAGCombinerInfo &DCI) const { 11472 SelectionDAG &DAG = DCI.DAG; 11473 SDLoc dl(N); 11474 11475 // If we're tracking CR bits, we need to be careful that we don't have: 11476 // zext(binary-ops(trunc(x), trunc(y))) 11477 // or 11478 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 11479 // such that we're unnecessarily moving things into CR bits that can more 11480 // efficiently stay in GPRs. Note that if we're not certain that the high 11481 // bits are set as required by the final extension, we still may need to do 11482 // some masking to get the proper behavior. 11483 11484 // This same functionality is important on PPC64 when dealing with 11485 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 11486 // the return values of functions. Because it is so similar, it is handled 11487 // here as well. 11488 11489 if (N->getValueType(0) != MVT::i32 && 11490 N->getValueType(0) != MVT::i64) 11491 return SDValue(); 11492 11493 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 11494 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 11495 return SDValue(); 11496 11497 if (N->getOperand(0).getOpcode() != ISD::AND && 11498 N->getOperand(0).getOpcode() != ISD::OR && 11499 N->getOperand(0).getOpcode() != ISD::XOR && 11500 N->getOperand(0).getOpcode() != ISD::SELECT && 11501 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 11502 return SDValue(); 11503 11504 SmallVector<SDValue, 4> Inputs; 11505 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 11506 SmallPtrSet<SDNode *, 16> Visited; 11507 11508 // Visit all inputs, collect all binary operations (and, or, xor and 11509 // select) that are all fed by truncations. 11510 while (!BinOps.empty()) { 11511 SDValue BinOp = BinOps.back(); 11512 BinOps.pop_back(); 11513 11514 if (!Visited.insert(BinOp.getNode()).second) 11515 continue; 11516 11517 PromOps.push_back(BinOp); 11518 11519 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11520 // The condition of the select is not promoted. 11521 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11522 continue; 11523 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11524 continue; 11525 11526 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11527 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11528 Inputs.push_back(BinOp.getOperand(i)); 11529 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11530 BinOp.getOperand(i).getOpcode() == ISD::OR || 11531 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11532 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11533 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 11534 BinOps.push_back(BinOp.getOperand(i)); 11535 } else { 11536 // We have an input that is not a truncation or another binary 11537 // operation; we'll abort this transformation. 11538 return SDValue(); 11539 } 11540 } 11541 } 11542 11543 // The operands of a select that must be truncated when the select is 11544 // promoted because the operand is actually part of the to-be-promoted set. 11545 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 11546 11547 // Make sure that this is a self-contained cluster of operations (which 11548 // is not quite the same thing as saying that everything has only one 11549 // use). 11550 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11551 if (isa<ConstantSDNode>(Inputs[i])) 11552 continue; 11553 11554 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11555 UE = Inputs[i].getNode()->use_end(); 11556 UI != UE; ++UI) { 11557 SDNode *User = *UI; 11558 if (User != N && !Visited.count(User)) 11559 return SDValue(); 11560 11561 // If we're going to promote the non-output-value operand(s) or SELECT or 11562 // SELECT_CC, record them for truncation. 11563 if (User->getOpcode() == ISD::SELECT) { 11564 if (User->getOperand(0) == Inputs[i]) 11565 SelectTruncOp[0].insert(std::make_pair(User, 11566 User->getOperand(0).getValueType())); 11567 } else if (User->getOpcode() == ISD::SELECT_CC) { 11568 if (User->getOperand(0) == Inputs[i]) 11569 SelectTruncOp[0].insert(std::make_pair(User, 11570 User->getOperand(0).getValueType())); 11571 if (User->getOperand(1) == Inputs[i]) 11572 SelectTruncOp[1].insert(std::make_pair(User, 11573 User->getOperand(1).getValueType())); 11574 } 11575 } 11576 } 11577 11578 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11579 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11580 UE = PromOps[i].getNode()->use_end(); 11581 UI != UE; ++UI) { 11582 SDNode *User = *UI; 11583 if (User != N && !Visited.count(User)) 11584 return SDValue(); 11585 11586 // If we're going to promote the non-output-value operand(s) or SELECT or 11587 // SELECT_CC, record them for truncation. 11588 if (User->getOpcode() == ISD::SELECT) { 11589 if (User->getOperand(0) == PromOps[i]) 11590 SelectTruncOp[0].insert(std::make_pair(User, 11591 User->getOperand(0).getValueType())); 11592 } else if (User->getOpcode() == ISD::SELECT_CC) { 11593 if (User->getOperand(0) == PromOps[i]) 11594 SelectTruncOp[0].insert(std::make_pair(User, 11595 User->getOperand(0).getValueType())); 11596 if (User->getOperand(1) == PromOps[i]) 11597 SelectTruncOp[1].insert(std::make_pair(User, 11598 User->getOperand(1).getValueType())); 11599 } 11600 } 11601 } 11602 11603 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 11604 bool ReallyNeedsExt = false; 11605 if (N->getOpcode() != ISD::ANY_EXTEND) { 11606 // If all of the inputs are not already sign/zero extended, then 11607 // we'll still need to do that at the end. 11608 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11609 if (isa<ConstantSDNode>(Inputs[i])) 11610 continue; 11611 11612 unsigned OpBits = 11613 Inputs[i].getOperand(0).getValueSizeInBits(); 11614 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 11615 11616 if ((N->getOpcode() == ISD::ZERO_EXTEND && 11617 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 11618 APInt::getHighBitsSet(OpBits, 11619 OpBits-PromBits))) || 11620 (N->getOpcode() == ISD::SIGN_EXTEND && 11621 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 11622 (OpBits-(PromBits-1)))) { 11623 ReallyNeedsExt = true; 11624 break; 11625 } 11626 } 11627 } 11628 11629 // Replace all inputs, either with the truncation operand, or a 11630 // truncation or extension to the final output type. 11631 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11632 // Constant inputs need to be replaced with the to-be-promoted nodes that 11633 // use them because they might have users outside of the cluster of 11634 // promoted nodes. 11635 if (isa<ConstantSDNode>(Inputs[i])) 11636 continue; 11637 11638 SDValue InSrc = Inputs[i].getOperand(0); 11639 if (Inputs[i].getValueType() == N->getValueType(0)) 11640 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 11641 else if (N->getOpcode() == ISD::SIGN_EXTEND) 11642 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11643 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 11644 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11645 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11646 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 11647 else 11648 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11649 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 11650 } 11651 11652 std::list<HandleSDNode> PromOpHandles; 11653 for (auto &PromOp : PromOps) 11654 PromOpHandles.emplace_back(PromOp); 11655 11656 // Replace all operations (these are all the same, but have a different 11657 // (promoted) return type). DAG.getNode will validate that the types of 11658 // a binary operator match, so go through the list in reverse so that 11659 // we've likely promoted both operands first. 11660 while (!PromOpHandles.empty()) { 11661 SDValue PromOp = PromOpHandles.back().getValue(); 11662 PromOpHandles.pop_back(); 11663 11664 unsigned C; 11665 switch (PromOp.getOpcode()) { 11666 default: C = 0; break; 11667 case ISD::SELECT: C = 1; break; 11668 case ISD::SELECT_CC: C = 2; break; 11669 } 11670 11671 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11672 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 11673 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11674 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 11675 // The to-be-promoted operands of this node have not yet been 11676 // promoted (this should be rare because we're going through the 11677 // list backward, but if one of the operands has several users in 11678 // this cluster of to-be-promoted nodes, it is possible). 11679 PromOpHandles.emplace_front(PromOp); 11680 continue; 11681 } 11682 11683 // For SELECT and SELECT_CC nodes, we do a similar check for any 11684 // to-be-promoted comparison inputs. 11685 if (PromOp.getOpcode() == ISD::SELECT || 11686 PromOp.getOpcode() == ISD::SELECT_CC) { 11687 if ((SelectTruncOp[0].count(PromOp.getNode()) && 11688 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 11689 (SelectTruncOp[1].count(PromOp.getNode()) && 11690 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 11691 PromOpHandles.emplace_front(PromOp); 11692 continue; 11693 } 11694 } 11695 11696 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11697 PromOp.getNode()->op_end()); 11698 11699 // If this node has constant inputs, then they'll need to be promoted here. 11700 for (unsigned i = 0; i < 2; ++i) { 11701 if (!isa<ConstantSDNode>(Ops[C+i])) 11702 continue; 11703 if (Ops[C+i].getValueType() == N->getValueType(0)) 11704 continue; 11705 11706 if (N->getOpcode() == ISD::SIGN_EXTEND) 11707 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11708 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11709 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11710 else 11711 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11712 } 11713 11714 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 11715 // truncate them again to the original value type. 11716 if (PromOp.getOpcode() == ISD::SELECT || 11717 PromOp.getOpcode() == ISD::SELECT_CC) { 11718 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 11719 if (SI0 != SelectTruncOp[0].end()) 11720 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 11721 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 11722 if (SI1 != SelectTruncOp[1].end()) 11723 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 11724 } 11725 11726 DAG.ReplaceAllUsesOfValueWith(PromOp, 11727 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 11728 } 11729 11730 // Now we're left with the initial extension itself. 11731 if (!ReallyNeedsExt) 11732 return N->getOperand(0); 11733 11734 // To zero extend, just mask off everything except for the first bit (in the 11735 // i1 case). 11736 if (N->getOpcode() == ISD::ZERO_EXTEND) 11737 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 11738 DAG.getConstant(APInt::getLowBitsSet( 11739 N->getValueSizeInBits(0), PromBits), 11740 dl, N->getValueType(0))); 11741 11742 assert(N->getOpcode() == ISD::SIGN_EXTEND && 11743 "Invalid extension type"); 11744 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 11745 SDValue ShiftCst = 11746 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 11747 return DAG.getNode( 11748 ISD::SRA, dl, N->getValueType(0), 11749 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 11750 ShiftCst); 11751 } 11752 11753 // Is this an extending load from an f32 to an f64? 11754 static bool isFPExtLoad(SDValue Op) { 11755 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 11756 return LD->getExtensionType() == ISD::EXTLOAD && 11757 Op.getValueType() == MVT::f64; 11758 return false; 11759 } 11760 11761 /// Reduces the number of fp-to-int conversion when building a vector. 11762 /// 11763 /// If this vector is built out of floating to integer conversions, 11764 /// transform it to a vector built out of floating point values followed by a 11765 /// single floating to integer conversion of the vector. 11766 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 11767 /// becomes (fptosi (build_vector ($A, $B, ...))) 11768 SDValue PPCTargetLowering:: 11769 combineElementTruncationToVectorTruncation(SDNode *N, 11770 DAGCombinerInfo &DCI) const { 11771 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11772 "Should be called with a BUILD_VECTOR node"); 11773 11774 SelectionDAG &DAG = DCI.DAG; 11775 SDLoc dl(N); 11776 11777 SDValue FirstInput = N->getOperand(0); 11778 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 11779 "The input operand must be an fp-to-int conversion."); 11780 11781 // This combine happens after legalization so the fp_to_[su]i nodes are 11782 // already converted to PPCSISD nodes. 11783 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 11784 if (FirstConversion == PPCISD::FCTIDZ || 11785 FirstConversion == PPCISD::FCTIDUZ || 11786 FirstConversion == PPCISD::FCTIWZ || 11787 FirstConversion == PPCISD::FCTIWUZ) { 11788 bool IsSplat = true; 11789 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 11790 FirstConversion == PPCISD::FCTIWUZ; 11791 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 11792 SmallVector<SDValue, 4> Ops; 11793 EVT TargetVT = N->getValueType(0); 11794 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11795 SDValue NextOp = N->getOperand(i); 11796 if (NextOp.getOpcode() != PPCISD::MFVSR) 11797 return SDValue(); 11798 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 11799 if (NextConversion != FirstConversion) 11800 return SDValue(); 11801 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 11802 // This is not valid if the input was originally double precision. It is 11803 // also not profitable to do unless this is an extending load in which 11804 // case doing this combine will allow us to combine consecutive loads. 11805 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 11806 return SDValue(); 11807 if (N->getOperand(i) != FirstInput) 11808 IsSplat = false; 11809 } 11810 11811 // If this is a splat, we leave it as-is since there will be only a single 11812 // fp-to-int conversion followed by a splat of the integer. This is better 11813 // for 32-bit and smaller ints and neutral for 64-bit ints. 11814 if (IsSplat) 11815 return SDValue(); 11816 11817 // Now that we know we have the right type of node, get its operands 11818 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11819 SDValue In = N->getOperand(i).getOperand(0); 11820 if (Is32Bit) { 11821 // For 32-bit values, we need to add an FP_ROUND node (if we made it 11822 // here, we know that all inputs are extending loads so this is safe). 11823 if (In.isUndef()) 11824 Ops.push_back(DAG.getUNDEF(SrcVT)); 11825 else { 11826 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 11827 MVT::f32, In.getOperand(0), 11828 DAG.getIntPtrConstant(1, dl)); 11829 Ops.push_back(Trunc); 11830 } 11831 } else 11832 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 11833 } 11834 11835 unsigned Opcode; 11836 if (FirstConversion == PPCISD::FCTIDZ || 11837 FirstConversion == PPCISD::FCTIWZ) 11838 Opcode = ISD::FP_TO_SINT; 11839 else 11840 Opcode = ISD::FP_TO_UINT; 11841 11842 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 11843 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 11844 return DAG.getNode(Opcode, dl, TargetVT, BV); 11845 } 11846 return SDValue(); 11847 } 11848 11849 /// Reduce the number of loads when building a vector. 11850 /// 11851 /// Building a vector out of multiple loads can be converted to a load 11852 /// of the vector type if the loads are consecutive. If the loads are 11853 /// consecutive but in descending order, a shuffle is added at the end 11854 /// to reorder the vector. 11855 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 11856 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11857 "Should be called with a BUILD_VECTOR node"); 11858 11859 SDLoc dl(N); 11860 bool InputsAreConsecutiveLoads = true; 11861 bool InputsAreReverseConsecutive = true; 11862 unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8; 11863 SDValue FirstInput = N->getOperand(0); 11864 bool IsRoundOfExtLoad = false; 11865 11866 if (FirstInput.getOpcode() == ISD::FP_ROUND && 11867 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 11868 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 11869 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 11870 } 11871 // Not a build vector of (possibly fp_rounded) loads. 11872 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) || 11873 N->getNumOperands() == 1) 11874 return SDValue(); 11875 11876 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 11877 // If any inputs are fp_round(extload), they all must be. 11878 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 11879 return SDValue(); 11880 11881 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 11882 N->getOperand(i); 11883 if (NextInput.getOpcode() != ISD::LOAD) 11884 return SDValue(); 11885 11886 SDValue PreviousInput = 11887 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 11888 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 11889 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 11890 11891 // If any inputs are fp_round(extload), they all must be. 11892 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 11893 return SDValue(); 11894 11895 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 11896 InputsAreConsecutiveLoads = false; 11897 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 11898 InputsAreReverseConsecutive = false; 11899 11900 // Exit early if the loads are neither consecutive nor reverse consecutive. 11901 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 11902 return SDValue(); 11903 } 11904 11905 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 11906 "The loads cannot be both consecutive and reverse consecutive."); 11907 11908 SDValue FirstLoadOp = 11909 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 11910 SDValue LastLoadOp = 11911 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 11912 N->getOperand(N->getNumOperands()-1); 11913 11914 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 11915 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 11916 if (InputsAreConsecutiveLoads) { 11917 assert(LD1 && "Input needs to be a LoadSDNode."); 11918 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 11919 LD1->getBasePtr(), LD1->getPointerInfo(), 11920 LD1->getAlignment()); 11921 } 11922 if (InputsAreReverseConsecutive) { 11923 assert(LDL && "Input needs to be a LoadSDNode."); 11924 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 11925 LDL->getBasePtr(), LDL->getPointerInfo(), 11926 LDL->getAlignment()); 11927 SmallVector<int, 16> Ops; 11928 for (int i = N->getNumOperands() - 1; i >= 0; i--) 11929 Ops.push_back(i); 11930 11931 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 11932 DAG.getUNDEF(N->getValueType(0)), Ops); 11933 } 11934 return SDValue(); 11935 } 11936 11937 // This function adds the required vector_shuffle needed to get 11938 // the elements of the vector extract in the correct position 11939 // as specified by the CorrectElems encoding. 11940 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 11941 SDValue Input, uint64_t Elems, 11942 uint64_t CorrectElems) { 11943 SDLoc dl(N); 11944 11945 unsigned NumElems = Input.getValueType().getVectorNumElements(); 11946 SmallVector<int, 16> ShuffleMask(NumElems, -1); 11947 11948 // Knowing the element indices being extracted from the original 11949 // vector and the order in which they're being inserted, just put 11950 // them at element indices required for the instruction. 11951 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11952 if (DAG.getDataLayout().isLittleEndian()) 11953 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 11954 else 11955 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 11956 CorrectElems = CorrectElems >> 8; 11957 Elems = Elems >> 8; 11958 } 11959 11960 SDValue Shuffle = 11961 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 11962 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 11963 11964 EVT Ty = N->getValueType(0); 11965 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 11966 return BV; 11967 } 11968 11969 // Look for build vector patterns where input operands come from sign 11970 // extended vector_extract elements of specific indices. If the correct indices 11971 // aren't used, add a vector shuffle to fix up the indices and create a new 11972 // PPCISD:SExtVElems node which selects the vector sign extend instructions 11973 // during instruction selection. 11974 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 11975 // This array encodes the indices that the vector sign extend instructions 11976 // extract from when extending from one type to another for both BE and LE. 11977 // The right nibble of each byte corresponds to the LE incides. 11978 // and the left nibble of each byte corresponds to the BE incides. 11979 // For example: 0x3074B8FC byte->word 11980 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 11981 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 11982 // For example: 0x000070F8 byte->double word 11983 // For LE: the allowed indices are: 0x0,0x8 11984 // For BE: the allowed indices are: 0x7,0xF 11985 uint64_t TargetElems[] = { 11986 0x3074B8FC, // b->w 11987 0x000070F8, // b->d 11988 0x10325476, // h->w 11989 0x00003074, // h->d 11990 0x00001032, // w->d 11991 }; 11992 11993 uint64_t Elems = 0; 11994 int Index; 11995 SDValue Input; 11996 11997 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 11998 if (!Op) 11999 return false; 12000 if (Op.getOpcode() != ISD::SIGN_EXTEND && 12001 Op.getOpcode() != ISD::SIGN_EXTEND_INREG) 12002 return false; 12003 12004 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value 12005 // of the right width. 12006 SDValue Extract = Op.getOperand(0); 12007 if (Extract.getOpcode() == ISD::ANY_EXTEND) 12008 Extract = Extract.getOperand(0); 12009 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12010 return false; 12011 12012 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 12013 if (!ExtOp) 12014 return false; 12015 12016 Index = ExtOp->getZExtValue(); 12017 if (Input && Input != Extract.getOperand(0)) 12018 return false; 12019 12020 if (!Input) 12021 Input = Extract.getOperand(0); 12022 12023 Elems = Elems << 8; 12024 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 12025 Elems |= Index; 12026 12027 return true; 12028 }; 12029 12030 // If the build vector operands aren't sign extended vector extracts, 12031 // of the same input vector, then return. 12032 for (unsigned i = 0; i < N->getNumOperands(); i++) { 12033 if (!isSExtOfVecExtract(N->getOperand(i))) { 12034 return SDValue(); 12035 } 12036 } 12037 12038 // If the vector extract indicies are not correct, add the appropriate 12039 // vector_shuffle. 12040 int TgtElemArrayIdx; 12041 int InputSize = Input.getValueType().getScalarSizeInBits(); 12042 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 12043 if (InputSize + OutputSize == 40) 12044 TgtElemArrayIdx = 0; 12045 else if (InputSize + OutputSize == 72) 12046 TgtElemArrayIdx = 1; 12047 else if (InputSize + OutputSize == 48) 12048 TgtElemArrayIdx = 2; 12049 else if (InputSize + OutputSize == 80) 12050 TgtElemArrayIdx = 3; 12051 else if (InputSize + OutputSize == 96) 12052 TgtElemArrayIdx = 4; 12053 else 12054 return SDValue(); 12055 12056 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 12057 CorrectElems = DAG.getDataLayout().isLittleEndian() 12058 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 12059 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 12060 if (Elems != CorrectElems) { 12061 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 12062 } 12063 12064 // Regular lowering will catch cases where a shuffle is not needed. 12065 return SDValue(); 12066 } 12067 12068 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 12069 DAGCombinerInfo &DCI) const { 12070 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12071 "Should be called with a BUILD_VECTOR node"); 12072 12073 SelectionDAG &DAG = DCI.DAG; 12074 SDLoc dl(N); 12075 12076 if (!Subtarget.hasVSX()) 12077 return SDValue(); 12078 12079 // The target independent DAG combiner will leave a build_vector of 12080 // float-to-int conversions intact. We can generate MUCH better code for 12081 // a float-to-int conversion of a vector of floats. 12082 SDValue FirstInput = N->getOperand(0); 12083 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 12084 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 12085 if (Reduced) 12086 return Reduced; 12087 } 12088 12089 // If we're building a vector out of consecutive loads, just load that 12090 // vector type. 12091 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 12092 if (Reduced) 12093 return Reduced; 12094 12095 // If we're building a vector out of extended elements from another vector 12096 // we have P9 vector integer extend instructions. The code assumes legal 12097 // input types (i.e. it can't handle things like v4i16) so do not run before 12098 // legalization. 12099 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) { 12100 Reduced = combineBVOfVecSExt(N, DAG); 12101 if (Reduced) 12102 return Reduced; 12103 } 12104 12105 12106 if (N->getValueType(0) != MVT::v2f64) 12107 return SDValue(); 12108 12109 // Looking for: 12110 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 12111 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 12112 FirstInput.getOpcode() != ISD::UINT_TO_FP) 12113 return SDValue(); 12114 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 12115 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 12116 return SDValue(); 12117 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 12118 return SDValue(); 12119 12120 SDValue Ext1 = FirstInput.getOperand(0); 12121 SDValue Ext2 = N->getOperand(1).getOperand(0); 12122 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 12123 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12124 return SDValue(); 12125 12126 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 12127 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 12128 if (!Ext1Op || !Ext2Op) 12129 return SDValue(); 12130 if (Ext1.getValueType() != MVT::i32 || 12131 Ext2.getValueType() != MVT::i32) 12132 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 12133 return SDValue(); 12134 12135 int FirstElem = Ext1Op->getZExtValue(); 12136 int SecondElem = Ext2Op->getZExtValue(); 12137 int SubvecIdx; 12138 if (FirstElem == 0 && SecondElem == 1) 12139 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 12140 else if (FirstElem == 2 && SecondElem == 3) 12141 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 12142 else 12143 return SDValue(); 12144 12145 SDValue SrcVec = Ext1.getOperand(0); 12146 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 12147 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 12148 return DAG.getNode(NodeType, dl, MVT::v2f64, 12149 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 12150 } 12151 12152 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 12153 DAGCombinerInfo &DCI) const { 12154 assert((N->getOpcode() == ISD::SINT_TO_FP || 12155 N->getOpcode() == ISD::UINT_TO_FP) && 12156 "Need an int -> FP conversion node here"); 12157 12158 if (useSoftFloat() || !Subtarget.has64BitSupport()) 12159 return SDValue(); 12160 12161 SelectionDAG &DAG = DCI.DAG; 12162 SDLoc dl(N); 12163 SDValue Op(N, 0); 12164 12165 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 12166 // from the hardware. 12167 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 12168 return SDValue(); 12169 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 12170 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 12171 return SDValue(); 12172 12173 SDValue FirstOperand(Op.getOperand(0)); 12174 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 12175 (FirstOperand.getValueType() == MVT::i8 || 12176 FirstOperand.getValueType() == MVT::i16); 12177 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 12178 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 12179 bool DstDouble = Op.getValueType() == MVT::f64; 12180 unsigned ConvOp = Signed ? 12181 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 12182 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 12183 SDValue WidthConst = 12184 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 12185 dl, false); 12186 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 12187 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 12188 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 12189 DAG.getVTList(MVT::f64, MVT::Other), 12190 Ops, MVT::i8, LDN->getMemOperand()); 12191 12192 // For signed conversion, we need to sign-extend the value in the VSR 12193 if (Signed) { 12194 SDValue ExtOps[] = { Ld, WidthConst }; 12195 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 12196 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 12197 } else 12198 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 12199 } 12200 12201 12202 // For i32 intermediate values, unfortunately, the conversion functions 12203 // leave the upper 32 bits of the value are undefined. Within the set of 12204 // scalar instructions, we have no method for zero- or sign-extending the 12205 // value. Thus, we cannot handle i32 intermediate values here. 12206 if (Op.getOperand(0).getValueType() == MVT::i32) 12207 return SDValue(); 12208 12209 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 12210 "UINT_TO_FP is supported only with FPCVT"); 12211 12212 // If we have FCFIDS, then use it when converting to single-precision. 12213 // Otherwise, convert to double-precision and then round. 12214 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12215 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 12216 : PPCISD::FCFIDS) 12217 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 12218 : PPCISD::FCFID); 12219 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12220 ? MVT::f32 12221 : MVT::f64; 12222 12223 // If we're converting from a float, to an int, and back to a float again, 12224 // then we don't need the store/load pair at all. 12225 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 12226 Subtarget.hasFPCVT()) || 12227 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 12228 SDValue Src = Op.getOperand(0).getOperand(0); 12229 if (Src.getValueType() == MVT::f32) { 12230 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 12231 DCI.AddToWorklist(Src.getNode()); 12232 } else if (Src.getValueType() != MVT::f64) { 12233 // Make sure that we don't pick up a ppc_fp128 source value. 12234 return SDValue(); 12235 } 12236 12237 unsigned FCTOp = 12238 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 12239 PPCISD::FCTIDUZ; 12240 12241 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 12242 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 12243 12244 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 12245 FP = DAG.getNode(ISD::FP_ROUND, dl, 12246 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 12247 DCI.AddToWorklist(FP.getNode()); 12248 } 12249 12250 return FP; 12251 } 12252 12253 return SDValue(); 12254 } 12255 12256 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 12257 // builtins) into loads with swaps. 12258 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 12259 DAGCombinerInfo &DCI) const { 12260 SelectionDAG &DAG = DCI.DAG; 12261 SDLoc dl(N); 12262 SDValue Chain; 12263 SDValue Base; 12264 MachineMemOperand *MMO; 12265 12266 switch (N->getOpcode()) { 12267 default: 12268 llvm_unreachable("Unexpected opcode for little endian VSX load"); 12269 case ISD::LOAD: { 12270 LoadSDNode *LD = cast<LoadSDNode>(N); 12271 Chain = LD->getChain(); 12272 Base = LD->getBasePtr(); 12273 MMO = LD->getMemOperand(); 12274 // If the MMO suggests this isn't a load of a full vector, leave 12275 // things alone. For a built-in, we have to make the change for 12276 // correctness, so if there is a size problem that will be a bug. 12277 if (MMO->getSize() < 16) 12278 return SDValue(); 12279 break; 12280 } 12281 case ISD::INTRINSIC_W_CHAIN: { 12282 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12283 Chain = Intrin->getChain(); 12284 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 12285 // us what we want. Get operand 2 instead. 12286 Base = Intrin->getOperand(2); 12287 MMO = Intrin->getMemOperand(); 12288 break; 12289 } 12290 } 12291 12292 MVT VecTy = N->getValueType(0).getSimpleVT(); 12293 12294 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 12295 // aligned and the type is a vector with elements up to 4 bytes 12296 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12297 && VecTy.getScalarSizeInBits() <= 32 ) { 12298 return SDValue(); 12299 } 12300 12301 SDValue LoadOps[] = { Chain, Base }; 12302 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 12303 DAG.getVTList(MVT::v2f64, MVT::Other), 12304 LoadOps, MVT::v2f64, MMO); 12305 12306 DCI.AddToWorklist(Load.getNode()); 12307 Chain = Load.getValue(1); 12308 SDValue Swap = DAG.getNode( 12309 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 12310 DCI.AddToWorklist(Swap.getNode()); 12311 12312 // Add a bitcast if the resulting load type doesn't match v2f64. 12313 if (VecTy != MVT::v2f64) { 12314 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 12315 DCI.AddToWorklist(N.getNode()); 12316 // Package {bitcast value, swap's chain} to match Load's shape. 12317 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 12318 N, Swap.getValue(1)); 12319 } 12320 12321 return Swap; 12322 } 12323 12324 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 12325 // builtins) into stores with swaps. 12326 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 12327 DAGCombinerInfo &DCI) const { 12328 SelectionDAG &DAG = DCI.DAG; 12329 SDLoc dl(N); 12330 SDValue Chain; 12331 SDValue Base; 12332 unsigned SrcOpnd; 12333 MachineMemOperand *MMO; 12334 12335 switch (N->getOpcode()) { 12336 default: 12337 llvm_unreachable("Unexpected opcode for little endian VSX store"); 12338 case ISD::STORE: { 12339 StoreSDNode *ST = cast<StoreSDNode>(N); 12340 Chain = ST->getChain(); 12341 Base = ST->getBasePtr(); 12342 MMO = ST->getMemOperand(); 12343 SrcOpnd = 1; 12344 // If the MMO suggests this isn't a store of a full vector, leave 12345 // things alone. For a built-in, we have to make the change for 12346 // correctness, so if there is a size problem that will be a bug. 12347 if (MMO->getSize() < 16) 12348 return SDValue(); 12349 break; 12350 } 12351 case ISD::INTRINSIC_VOID: { 12352 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12353 Chain = Intrin->getChain(); 12354 // Intrin->getBasePtr() oddly does not get what we want. 12355 Base = Intrin->getOperand(3); 12356 MMO = Intrin->getMemOperand(); 12357 SrcOpnd = 2; 12358 break; 12359 } 12360 } 12361 12362 SDValue Src = N->getOperand(SrcOpnd); 12363 MVT VecTy = Src.getValueType().getSimpleVT(); 12364 12365 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 12366 // aligned and the type is a vector with elements up to 4 bytes 12367 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12368 && VecTy.getScalarSizeInBits() <= 32 ) { 12369 return SDValue(); 12370 } 12371 12372 // All stores are done as v2f64 and possible bit cast. 12373 if (VecTy != MVT::v2f64) { 12374 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 12375 DCI.AddToWorklist(Src.getNode()); 12376 } 12377 12378 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 12379 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 12380 DCI.AddToWorklist(Swap.getNode()); 12381 Chain = Swap.getValue(1); 12382 SDValue StoreOps[] = { Chain, Swap, Base }; 12383 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 12384 DAG.getVTList(MVT::Other), 12385 StoreOps, VecTy, MMO); 12386 DCI.AddToWorklist(Store.getNode()); 12387 return Store; 12388 } 12389 12390 // Handle DAG combine for STORE (FP_TO_INT F). 12391 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 12392 DAGCombinerInfo &DCI) const { 12393 12394 SelectionDAG &DAG = DCI.DAG; 12395 SDLoc dl(N); 12396 unsigned Opcode = N->getOperand(1).getOpcode(); 12397 12398 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 12399 && "Not a FP_TO_INT Instruction!"); 12400 12401 SDValue Val = N->getOperand(1).getOperand(0); 12402 EVT Op1VT = N->getOperand(1).getValueType(); 12403 EVT ResVT = Val.getValueType(); 12404 12405 // Floating point types smaller than 32 bits are not legal on Power. 12406 if (ResVT.getScalarSizeInBits() < 32) 12407 return SDValue(); 12408 12409 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 12410 bool ValidTypeForStoreFltAsInt = 12411 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 12412 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 12413 12414 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() || 12415 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 12416 return SDValue(); 12417 12418 // Extend f32 values to f64 12419 if (ResVT.getScalarSizeInBits() == 32) { 12420 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 12421 DCI.AddToWorklist(Val.getNode()); 12422 } 12423 12424 // Set signed or unsigned conversion opcode. 12425 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 12426 PPCISD::FP_TO_SINT_IN_VSR : 12427 PPCISD::FP_TO_UINT_IN_VSR; 12428 12429 Val = DAG.getNode(ConvOpcode, 12430 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 12431 DCI.AddToWorklist(Val.getNode()); 12432 12433 // Set number of bytes being converted. 12434 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 12435 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 12436 DAG.getIntPtrConstant(ByteSize, dl, false), 12437 DAG.getValueType(Op1VT) }; 12438 12439 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 12440 DAG.getVTList(MVT::Other), Ops, 12441 cast<StoreSDNode>(N)->getMemoryVT(), 12442 cast<StoreSDNode>(N)->getMemOperand()); 12443 12444 DCI.AddToWorklist(Val.getNode()); 12445 return Val; 12446 } 12447 12448 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 12449 DAGCombinerInfo &DCI) const { 12450 SelectionDAG &DAG = DCI.DAG; 12451 SDLoc dl(N); 12452 switch (N->getOpcode()) { 12453 default: break; 12454 case ISD::ADD: 12455 return combineADD(N, DCI); 12456 case ISD::SHL: 12457 return combineSHL(N, DCI); 12458 case ISD::SRA: 12459 return combineSRA(N, DCI); 12460 case ISD::SRL: 12461 return combineSRL(N, DCI); 12462 case PPCISD::SHL: 12463 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 12464 return N->getOperand(0); 12465 break; 12466 case PPCISD::SRL: 12467 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 12468 return N->getOperand(0); 12469 break; 12470 case PPCISD::SRA: 12471 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 12472 if (C->isNullValue() || // 0 >>s V -> 0. 12473 C->isAllOnesValue()) // -1 >>s V -> -1. 12474 return N->getOperand(0); 12475 } 12476 break; 12477 case ISD::SIGN_EXTEND: 12478 case ISD::ZERO_EXTEND: 12479 case ISD::ANY_EXTEND: 12480 return DAGCombineExtBoolTrunc(N, DCI); 12481 case ISD::TRUNCATE: 12482 case ISD::SETCC: 12483 case ISD::SELECT_CC: 12484 return DAGCombineTruncBoolExt(N, DCI); 12485 case ISD::SINT_TO_FP: 12486 case ISD::UINT_TO_FP: 12487 return combineFPToIntToFP(N, DCI); 12488 case ISD::STORE: { 12489 12490 EVT Op1VT = N->getOperand(1).getValueType(); 12491 unsigned Opcode = N->getOperand(1).getOpcode(); 12492 12493 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 12494 SDValue Val= combineStoreFPToInt(N, DCI); 12495 if (Val) 12496 return Val; 12497 } 12498 12499 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 12500 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 12501 N->getOperand(1).getNode()->hasOneUse() && 12502 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 12503 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 12504 12505 // STBRX can only handle simple types. 12506 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 12507 if (mVT.isExtended()) 12508 break; 12509 12510 SDValue BSwapOp = N->getOperand(1).getOperand(0); 12511 // Do an any-extend to 32-bits if this is a half-word input. 12512 if (BSwapOp.getValueType() == MVT::i16) 12513 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 12514 12515 // If the type of BSWAP operand is wider than stored memory width 12516 // it need to be shifted to the right side before STBRX. 12517 if (Op1VT.bitsGT(mVT)) { 12518 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 12519 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 12520 DAG.getConstant(Shift, dl, MVT::i32)); 12521 // Need to truncate if this is a bswap of i64 stored as i32/i16. 12522 if (Op1VT == MVT::i64) 12523 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 12524 } 12525 12526 SDValue Ops[] = { 12527 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 12528 }; 12529 return 12530 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 12531 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 12532 cast<StoreSDNode>(N)->getMemOperand()); 12533 } 12534 12535 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 12536 // So it can increase the chance of CSE constant construction. 12537 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 12538 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 12539 // Need to sign-extended to 64-bits to handle negative values. 12540 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 12541 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 12542 MemVT.getSizeInBits()); 12543 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 12544 12545 // DAG.getTruncStore() can't be used here because it doesn't accept 12546 // the general (base + offset) addressing mode. 12547 // So we use UpdateNodeOperands and setTruncatingStore instead. 12548 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 12549 N->getOperand(3)); 12550 cast<StoreSDNode>(N)->setTruncatingStore(true); 12551 return SDValue(N, 0); 12552 } 12553 12554 // For little endian, VSX stores require generating xxswapd/lxvd2x. 12555 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12556 if (Op1VT.isSimple()) { 12557 MVT StoreVT = Op1VT.getSimpleVT(); 12558 if (Subtarget.needsSwapsForVSXMemOps() && 12559 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 12560 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 12561 return expandVSXStoreForLE(N, DCI); 12562 } 12563 break; 12564 } 12565 case ISD::LOAD: { 12566 LoadSDNode *LD = cast<LoadSDNode>(N); 12567 EVT VT = LD->getValueType(0); 12568 12569 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12570 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12571 if (VT.isSimple()) { 12572 MVT LoadVT = VT.getSimpleVT(); 12573 if (Subtarget.needsSwapsForVSXMemOps() && 12574 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 12575 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 12576 return expandVSXLoadForLE(N, DCI); 12577 } 12578 12579 // We sometimes end up with a 64-bit integer load, from which we extract 12580 // two single-precision floating-point numbers. This happens with 12581 // std::complex<float>, and other similar structures, because of the way we 12582 // canonicalize structure copies. However, if we lack direct moves, 12583 // then the final bitcasts from the extracted integer values to the 12584 // floating-point numbers turn into store/load pairs. Even with direct moves, 12585 // just loading the two floating-point numbers is likely better. 12586 auto ReplaceTwoFloatLoad = [&]() { 12587 if (VT != MVT::i64) 12588 return false; 12589 12590 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 12591 LD->isVolatile()) 12592 return false; 12593 12594 // We're looking for a sequence like this: 12595 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 12596 // t16: i64 = srl t13, Constant:i32<32> 12597 // t17: i32 = truncate t16 12598 // t18: f32 = bitcast t17 12599 // t19: i32 = truncate t13 12600 // t20: f32 = bitcast t19 12601 12602 if (!LD->hasNUsesOfValue(2, 0)) 12603 return false; 12604 12605 auto UI = LD->use_begin(); 12606 while (UI.getUse().getResNo() != 0) ++UI; 12607 SDNode *Trunc = *UI++; 12608 while (UI.getUse().getResNo() != 0) ++UI; 12609 SDNode *RightShift = *UI; 12610 if (Trunc->getOpcode() != ISD::TRUNCATE) 12611 std::swap(Trunc, RightShift); 12612 12613 if (Trunc->getOpcode() != ISD::TRUNCATE || 12614 Trunc->getValueType(0) != MVT::i32 || 12615 !Trunc->hasOneUse()) 12616 return false; 12617 if (RightShift->getOpcode() != ISD::SRL || 12618 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 12619 RightShift->getConstantOperandVal(1) != 32 || 12620 !RightShift->hasOneUse()) 12621 return false; 12622 12623 SDNode *Trunc2 = *RightShift->use_begin(); 12624 if (Trunc2->getOpcode() != ISD::TRUNCATE || 12625 Trunc2->getValueType(0) != MVT::i32 || 12626 !Trunc2->hasOneUse()) 12627 return false; 12628 12629 SDNode *Bitcast = *Trunc->use_begin(); 12630 SDNode *Bitcast2 = *Trunc2->use_begin(); 12631 12632 if (Bitcast->getOpcode() != ISD::BITCAST || 12633 Bitcast->getValueType(0) != MVT::f32) 12634 return false; 12635 if (Bitcast2->getOpcode() != ISD::BITCAST || 12636 Bitcast2->getValueType(0) != MVT::f32) 12637 return false; 12638 12639 if (Subtarget.isLittleEndian()) 12640 std::swap(Bitcast, Bitcast2); 12641 12642 // Bitcast has the second float (in memory-layout order) and Bitcast2 12643 // has the first one. 12644 12645 SDValue BasePtr = LD->getBasePtr(); 12646 if (LD->isIndexed()) { 12647 assert(LD->getAddressingMode() == ISD::PRE_INC && 12648 "Non-pre-inc AM on PPC?"); 12649 BasePtr = 12650 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 12651 LD->getOffset()); 12652 } 12653 12654 auto MMOFlags = 12655 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 12656 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 12657 LD->getPointerInfo(), LD->getAlignment(), 12658 MMOFlags, LD->getAAInfo()); 12659 SDValue AddPtr = 12660 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 12661 BasePtr, DAG.getIntPtrConstant(4, dl)); 12662 SDValue FloatLoad2 = DAG.getLoad( 12663 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 12664 LD->getPointerInfo().getWithOffset(4), 12665 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 12666 12667 if (LD->isIndexed()) { 12668 // Note that DAGCombine should re-form any pre-increment load(s) from 12669 // what is produced here if that makes sense. 12670 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 12671 } 12672 12673 DCI.CombineTo(Bitcast2, FloatLoad); 12674 DCI.CombineTo(Bitcast, FloatLoad2); 12675 12676 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 12677 SDValue(FloatLoad2.getNode(), 1)); 12678 return true; 12679 }; 12680 12681 if (ReplaceTwoFloatLoad()) 12682 return SDValue(N, 0); 12683 12684 EVT MemVT = LD->getMemoryVT(); 12685 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 12686 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 12687 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 12688 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 12689 if (LD->isUnindexed() && VT.isVector() && 12690 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 12691 // P8 and later hardware should just use LOAD. 12692 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 12693 VT == MVT::v4i32 || VT == MVT::v4f32)) || 12694 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 12695 LD->getAlignment() >= ScalarABIAlignment)) && 12696 LD->getAlignment() < ABIAlignment) { 12697 // This is a type-legal unaligned Altivec or QPX load. 12698 SDValue Chain = LD->getChain(); 12699 SDValue Ptr = LD->getBasePtr(); 12700 bool isLittleEndian = Subtarget.isLittleEndian(); 12701 12702 // This implements the loading of unaligned vectors as described in 12703 // the venerable Apple Velocity Engine overview. Specifically: 12704 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 12705 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 12706 // 12707 // The general idea is to expand a sequence of one or more unaligned 12708 // loads into an alignment-based permutation-control instruction (lvsl 12709 // or lvsr), a series of regular vector loads (which always truncate 12710 // their input address to an aligned address), and a series of 12711 // permutations. The results of these permutations are the requested 12712 // loaded values. The trick is that the last "extra" load is not taken 12713 // from the address you might suspect (sizeof(vector) bytes after the 12714 // last requested load), but rather sizeof(vector) - 1 bytes after the 12715 // last requested vector. The point of this is to avoid a page fault if 12716 // the base address happened to be aligned. This works because if the 12717 // base address is aligned, then adding less than a full vector length 12718 // will cause the last vector in the sequence to be (re)loaded. 12719 // Otherwise, the next vector will be fetched as you might suspect was 12720 // necessary. 12721 12722 // We might be able to reuse the permutation generation from 12723 // a different base address offset from this one by an aligned amount. 12724 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 12725 // optimization later. 12726 Intrinsic::ID Intr, IntrLD, IntrPerm; 12727 MVT PermCntlTy, PermTy, LDTy; 12728 if (Subtarget.hasAltivec()) { 12729 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 12730 Intrinsic::ppc_altivec_lvsl; 12731 IntrLD = Intrinsic::ppc_altivec_lvx; 12732 IntrPerm = Intrinsic::ppc_altivec_vperm; 12733 PermCntlTy = MVT::v16i8; 12734 PermTy = MVT::v4i32; 12735 LDTy = MVT::v4i32; 12736 } else { 12737 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 12738 Intrinsic::ppc_qpx_qvlpcls; 12739 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 12740 Intrinsic::ppc_qpx_qvlfs; 12741 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 12742 PermCntlTy = MVT::v4f64; 12743 PermTy = MVT::v4f64; 12744 LDTy = MemVT.getSimpleVT(); 12745 } 12746 12747 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 12748 12749 // Create the new MMO for the new base load. It is like the original MMO, 12750 // but represents an area in memory almost twice the vector size centered 12751 // on the original address. If the address is unaligned, we might start 12752 // reading up to (sizeof(vector)-1) bytes below the address of the 12753 // original unaligned load. 12754 MachineFunction &MF = DAG.getMachineFunction(); 12755 MachineMemOperand *BaseMMO = 12756 MF.getMachineMemOperand(LD->getMemOperand(), 12757 -(long)MemVT.getStoreSize()+1, 12758 2*MemVT.getStoreSize()-1); 12759 12760 // Create the new base load. 12761 SDValue LDXIntID = 12762 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 12763 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 12764 SDValue BaseLoad = 12765 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12766 DAG.getVTList(PermTy, MVT::Other), 12767 BaseLoadOps, LDTy, BaseMMO); 12768 12769 // Note that the value of IncOffset (which is provided to the next 12770 // load's pointer info offset value, and thus used to calculate the 12771 // alignment), and the value of IncValue (which is actually used to 12772 // increment the pointer value) are different! This is because we 12773 // require the next load to appear to be aligned, even though it 12774 // is actually offset from the base pointer by a lesser amount. 12775 int IncOffset = VT.getSizeInBits() / 8; 12776 int IncValue = IncOffset; 12777 12778 // Walk (both up and down) the chain looking for another load at the real 12779 // (aligned) offset (the alignment of the other load does not matter in 12780 // this case). If found, then do not use the offset reduction trick, as 12781 // that will prevent the loads from being later combined (as they would 12782 // otherwise be duplicates). 12783 if (!findConsecutiveLoad(LD, DAG)) 12784 --IncValue; 12785 12786 SDValue Increment = 12787 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 12788 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 12789 12790 MachineMemOperand *ExtraMMO = 12791 MF.getMachineMemOperand(LD->getMemOperand(), 12792 1, 2*MemVT.getStoreSize()-1); 12793 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 12794 SDValue ExtraLoad = 12795 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12796 DAG.getVTList(PermTy, MVT::Other), 12797 ExtraLoadOps, LDTy, ExtraMMO); 12798 12799 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 12800 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 12801 12802 // Because vperm has a big-endian bias, we must reverse the order 12803 // of the input vectors and complement the permute control vector 12804 // when generating little endian code. We have already handled the 12805 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 12806 // and ExtraLoad here. 12807 SDValue Perm; 12808 if (isLittleEndian) 12809 Perm = BuildIntrinsicOp(IntrPerm, 12810 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 12811 else 12812 Perm = BuildIntrinsicOp(IntrPerm, 12813 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 12814 12815 if (VT != PermTy) 12816 Perm = Subtarget.hasAltivec() ? 12817 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 12818 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 12819 DAG.getTargetConstant(1, dl, MVT::i64)); 12820 // second argument is 1 because this rounding 12821 // is always exact. 12822 12823 // The output of the permutation is our loaded result, the TokenFactor is 12824 // our new chain. 12825 DCI.CombineTo(N, Perm, TF); 12826 return SDValue(N, 0); 12827 } 12828 } 12829 break; 12830 case ISD::INTRINSIC_WO_CHAIN: { 12831 bool isLittleEndian = Subtarget.isLittleEndian(); 12832 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 12833 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 12834 : Intrinsic::ppc_altivec_lvsl); 12835 if ((IID == Intr || 12836 IID == Intrinsic::ppc_qpx_qvlpcld || 12837 IID == Intrinsic::ppc_qpx_qvlpcls) && 12838 N->getOperand(1)->getOpcode() == ISD::ADD) { 12839 SDValue Add = N->getOperand(1); 12840 12841 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 12842 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 12843 12844 if (DAG.MaskedValueIsZero(Add->getOperand(1), 12845 APInt::getAllOnesValue(Bits /* alignment */) 12846 .zext(Add.getScalarValueSizeInBits()))) { 12847 SDNode *BasePtr = Add->getOperand(0).getNode(); 12848 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12849 UE = BasePtr->use_end(); 12850 UI != UE; ++UI) { 12851 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12852 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 12853 // We've found another LVSL/LVSR, and this address is an aligned 12854 // multiple of that one. The results will be the same, so use the 12855 // one we've just found instead. 12856 12857 return SDValue(*UI, 0); 12858 } 12859 } 12860 } 12861 12862 if (isa<ConstantSDNode>(Add->getOperand(1))) { 12863 SDNode *BasePtr = Add->getOperand(0).getNode(); 12864 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12865 UE = BasePtr->use_end(); UI != UE; ++UI) { 12866 if (UI->getOpcode() == ISD::ADD && 12867 isa<ConstantSDNode>(UI->getOperand(1)) && 12868 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 12869 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 12870 (1ULL << Bits) == 0) { 12871 SDNode *OtherAdd = *UI; 12872 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 12873 VE = OtherAdd->use_end(); VI != VE; ++VI) { 12874 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12875 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 12876 return SDValue(*VI, 0); 12877 } 12878 } 12879 } 12880 } 12881 } 12882 } 12883 } 12884 12885 break; 12886 case ISD::INTRINSIC_W_CHAIN: 12887 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12888 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12889 if (Subtarget.needsSwapsForVSXMemOps()) { 12890 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12891 default: 12892 break; 12893 case Intrinsic::ppc_vsx_lxvw4x: 12894 case Intrinsic::ppc_vsx_lxvd2x: 12895 return expandVSXLoadForLE(N, DCI); 12896 } 12897 } 12898 break; 12899 case ISD::INTRINSIC_VOID: 12900 // For little endian, VSX stores require generating xxswapd/stxvd2x. 12901 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12902 if (Subtarget.needsSwapsForVSXMemOps()) { 12903 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12904 default: 12905 break; 12906 case Intrinsic::ppc_vsx_stxvw4x: 12907 case Intrinsic::ppc_vsx_stxvd2x: 12908 return expandVSXStoreForLE(N, DCI); 12909 } 12910 } 12911 break; 12912 case ISD::BSWAP: 12913 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 12914 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 12915 N->getOperand(0).hasOneUse() && 12916 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 12917 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 12918 N->getValueType(0) == MVT::i64))) { 12919 SDValue Load = N->getOperand(0); 12920 LoadSDNode *LD = cast<LoadSDNode>(Load); 12921 // Create the byte-swapping load. 12922 SDValue Ops[] = { 12923 LD->getChain(), // Chain 12924 LD->getBasePtr(), // Ptr 12925 DAG.getValueType(N->getValueType(0)) // VT 12926 }; 12927 SDValue BSLoad = 12928 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 12929 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 12930 MVT::i64 : MVT::i32, MVT::Other), 12931 Ops, LD->getMemoryVT(), LD->getMemOperand()); 12932 12933 // If this is an i16 load, insert the truncate. 12934 SDValue ResVal = BSLoad; 12935 if (N->getValueType(0) == MVT::i16) 12936 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 12937 12938 // First, combine the bswap away. This makes the value produced by the 12939 // load dead. 12940 DCI.CombineTo(N, ResVal); 12941 12942 // Next, combine the load away, we give it a bogus result value but a real 12943 // chain result. The result value is dead because the bswap is dead. 12944 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 12945 12946 // Return N so it doesn't get rechecked! 12947 return SDValue(N, 0); 12948 } 12949 break; 12950 case PPCISD::VCMP: 12951 // If a VCMPo node already exists with exactly the same operands as this 12952 // node, use its result instead of this node (VCMPo computes both a CR6 and 12953 // a normal output). 12954 // 12955 if (!N->getOperand(0).hasOneUse() && 12956 !N->getOperand(1).hasOneUse() && 12957 !N->getOperand(2).hasOneUse()) { 12958 12959 // Scan all of the users of the LHS, looking for VCMPo's that match. 12960 SDNode *VCMPoNode = nullptr; 12961 12962 SDNode *LHSN = N->getOperand(0).getNode(); 12963 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 12964 UI != E; ++UI) 12965 if (UI->getOpcode() == PPCISD::VCMPo && 12966 UI->getOperand(1) == N->getOperand(1) && 12967 UI->getOperand(2) == N->getOperand(2) && 12968 UI->getOperand(0) == N->getOperand(0)) { 12969 VCMPoNode = *UI; 12970 break; 12971 } 12972 12973 // If there is no VCMPo node, or if the flag value has a single use, don't 12974 // transform this. 12975 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 12976 break; 12977 12978 // Look at the (necessarily single) use of the flag value. If it has a 12979 // chain, this transformation is more complex. Note that multiple things 12980 // could use the value result, which we should ignore. 12981 SDNode *FlagUser = nullptr; 12982 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 12983 FlagUser == nullptr; ++UI) { 12984 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 12985 SDNode *User = *UI; 12986 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 12987 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 12988 FlagUser = User; 12989 break; 12990 } 12991 } 12992 } 12993 12994 // If the user is a MFOCRF instruction, we know this is safe. 12995 // Otherwise we give up for right now. 12996 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 12997 return SDValue(VCMPoNode, 0); 12998 } 12999 break; 13000 case ISD::BRCOND: { 13001 SDValue Cond = N->getOperand(1); 13002 SDValue Target = N->getOperand(2); 13003 13004 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 13005 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 13006 Intrinsic::ppc_is_decremented_ctr_nonzero) { 13007 13008 // We now need to make the intrinsic dead (it cannot be instruction 13009 // selected). 13010 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 13011 assert(Cond.getNode()->hasOneUse() && 13012 "Counter decrement has more than one use"); 13013 13014 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 13015 N->getOperand(0), Target); 13016 } 13017 } 13018 break; 13019 case ISD::BR_CC: { 13020 // If this is a branch on an altivec predicate comparison, lower this so 13021 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 13022 // lowering is done pre-legalize, because the legalizer lowers the predicate 13023 // compare down to code that is difficult to reassemble. 13024 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 13025 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 13026 13027 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 13028 // value. If so, pass-through the AND to get to the intrinsic. 13029 if (LHS.getOpcode() == ISD::AND && 13030 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 13031 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 13032 Intrinsic::ppc_is_decremented_ctr_nonzero && 13033 isa<ConstantSDNode>(LHS.getOperand(1)) && 13034 !isNullConstant(LHS.getOperand(1))) 13035 LHS = LHS.getOperand(0); 13036 13037 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 13038 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 13039 Intrinsic::ppc_is_decremented_ctr_nonzero && 13040 isa<ConstantSDNode>(RHS)) { 13041 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 13042 "Counter decrement comparison is not EQ or NE"); 13043 13044 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 13045 bool isBDNZ = (CC == ISD::SETEQ && Val) || 13046 (CC == ISD::SETNE && !Val); 13047 13048 // We now need to make the intrinsic dead (it cannot be instruction 13049 // selected). 13050 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 13051 assert(LHS.getNode()->hasOneUse() && 13052 "Counter decrement has more than one use"); 13053 13054 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 13055 N->getOperand(0), N->getOperand(4)); 13056 } 13057 13058 int CompareOpc; 13059 bool isDot; 13060 13061 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 13062 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 13063 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 13064 assert(isDot && "Can't compare against a vector result!"); 13065 13066 // If this is a comparison against something other than 0/1, then we know 13067 // that the condition is never/always true. 13068 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 13069 if (Val != 0 && Val != 1) { 13070 if (CC == ISD::SETEQ) // Cond never true, remove branch. 13071 return N->getOperand(0); 13072 // Always !=, turn it into an unconditional branch. 13073 return DAG.getNode(ISD::BR, dl, MVT::Other, 13074 N->getOperand(0), N->getOperand(4)); 13075 } 13076 13077 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 13078 13079 // Create the PPCISD altivec 'dot' comparison node. 13080 SDValue Ops[] = { 13081 LHS.getOperand(2), // LHS of compare 13082 LHS.getOperand(3), // RHS of compare 13083 DAG.getConstant(CompareOpc, dl, MVT::i32) 13084 }; 13085 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 13086 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 13087 13088 // Unpack the result based on how the target uses it. 13089 PPC::Predicate CompOpc; 13090 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 13091 default: // Can't happen, don't crash on invalid number though. 13092 case 0: // Branch on the value of the EQ bit of CR6. 13093 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 13094 break; 13095 case 1: // Branch on the inverted value of the EQ bit of CR6. 13096 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 13097 break; 13098 case 2: // Branch on the value of the LT bit of CR6. 13099 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 13100 break; 13101 case 3: // Branch on the inverted value of the LT bit of CR6. 13102 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 13103 break; 13104 } 13105 13106 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 13107 DAG.getConstant(CompOpc, dl, MVT::i32), 13108 DAG.getRegister(PPC::CR6, MVT::i32), 13109 N->getOperand(4), CompNode.getValue(1)); 13110 } 13111 break; 13112 } 13113 case ISD::BUILD_VECTOR: 13114 return DAGCombineBuildVector(N, DCI); 13115 } 13116 13117 return SDValue(); 13118 } 13119 13120 SDValue 13121 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 13122 SelectionDAG &DAG, 13123 SmallVectorImpl<SDNode *> &Created) const { 13124 // fold (sdiv X, pow2) 13125 EVT VT = N->getValueType(0); 13126 if (VT == MVT::i64 && !Subtarget.isPPC64()) 13127 return SDValue(); 13128 if ((VT != MVT::i32 && VT != MVT::i64) || 13129 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 13130 return SDValue(); 13131 13132 SDLoc DL(N); 13133 SDValue N0 = N->getOperand(0); 13134 13135 bool IsNegPow2 = (-Divisor).isPowerOf2(); 13136 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 13137 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 13138 13139 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 13140 Created.push_back(Op.getNode()); 13141 13142 if (IsNegPow2) { 13143 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 13144 Created.push_back(Op.getNode()); 13145 } 13146 13147 return Op; 13148 } 13149 13150 //===----------------------------------------------------------------------===// 13151 // Inline Assembly Support 13152 //===----------------------------------------------------------------------===// 13153 13154 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 13155 KnownBits &Known, 13156 const APInt &DemandedElts, 13157 const SelectionDAG &DAG, 13158 unsigned Depth) const { 13159 Known.resetAll(); 13160 switch (Op.getOpcode()) { 13161 default: break; 13162 case PPCISD::LBRX: { 13163 // lhbrx is known to have the top bits cleared out. 13164 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 13165 Known.Zero = 0xFFFF0000; 13166 break; 13167 } 13168 case ISD::INTRINSIC_WO_CHAIN: { 13169 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 13170 default: break; 13171 case Intrinsic::ppc_altivec_vcmpbfp_p: 13172 case Intrinsic::ppc_altivec_vcmpeqfp_p: 13173 case Intrinsic::ppc_altivec_vcmpequb_p: 13174 case Intrinsic::ppc_altivec_vcmpequh_p: 13175 case Intrinsic::ppc_altivec_vcmpequw_p: 13176 case Intrinsic::ppc_altivec_vcmpequd_p: 13177 case Intrinsic::ppc_altivec_vcmpgefp_p: 13178 case Intrinsic::ppc_altivec_vcmpgtfp_p: 13179 case Intrinsic::ppc_altivec_vcmpgtsb_p: 13180 case Intrinsic::ppc_altivec_vcmpgtsh_p: 13181 case Intrinsic::ppc_altivec_vcmpgtsw_p: 13182 case Intrinsic::ppc_altivec_vcmpgtsd_p: 13183 case Intrinsic::ppc_altivec_vcmpgtub_p: 13184 case Intrinsic::ppc_altivec_vcmpgtuh_p: 13185 case Intrinsic::ppc_altivec_vcmpgtuw_p: 13186 case Intrinsic::ppc_altivec_vcmpgtud_p: 13187 Known.Zero = ~1U; // All bits but the low one are known to be zero. 13188 break; 13189 } 13190 } 13191 } 13192 } 13193 13194 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 13195 switch (Subtarget.getDarwinDirective()) { 13196 default: break; 13197 case PPC::DIR_970: 13198 case PPC::DIR_PWR4: 13199 case PPC::DIR_PWR5: 13200 case PPC::DIR_PWR5X: 13201 case PPC::DIR_PWR6: 13202 case PPC::DIR_PWR6X: 13203 case PPC::DIR_PWR7: 13204 case PPC::DIR_PWR8: 13205 case PPC::DIR_PWR9: { 13206 if (!ML) 13207 break; 13208 13209 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 13210 13211 // For small loops (between 5 and 8 instructions), align to a 32-byte 13212 // boundary so that the entire loop fits in one instruction-cache line. 13213 uint64_t LoopSize = 0; 13214 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 13215 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 13216 LoopSize += TII->getInstSizeInBytes(*J); 13217 if (LoopSize > 32) 13218 break; 13219 } 13220 13221 if (LoopSize > 16 && LoopSize <= 32) 13222 return 5; 13223 13224 break; 13225 } 13226 } 13227 13228 return TargetLowering::getPrefLoopAlignment(ML); 13229 } 13230 13231 /// getConstraintType - Given a constraint, return the type of 13232 /// constraint it is for this target. 13233 PPCTargetLowering::ConstraintType 13234 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 13235 if (Constraint.size() == 1) { 13236 switch (Constraint[0]) { 13237 default: break; 13238 case 'b': 13239 case 'r': 13240 case 'f': 13241 case 'd': 13242 case 'v': 13243 case 'y': 13244 return C_RegisterClass; 13245 case 'Z': 13246 // FIXME: While Z does indicate a memory constraint, it specifically 13247 // indicates an r+r address (used in conjunction with the 'y' modifier 13248 // in the replacement string). Currently, we're forcing the base 13249 // register to be r0 in the asm printer (which is interpreted as zero) 13250 // and forming the complete address in the second register. This is 13251 // suboptimal. 13252 return C_Memory; 13253 } 13254 } else if (Constraint == "wc") { // individual CR bits. 13255 return C_RegisterClass; 13256 } else if (Constraint == "wa" || Constraint == "wd" || 13257 Constraint == "wf" || Constraint == "ws") { 13258 return C_RegisterClass; // VSX registers. 13259 } 13260 return TargetLowering::getConstraintType(Constraint); 13261 } 13262 13263 /// Examine constraint type and operand type and determine a weight value. 13264 /// This object must already have been set up with the operand type 13265 /// and the current alternative constraint selected. 13266 TargetLowering::ConstraintWeight 13267 PPCTargetLowering::getSingleConstraintMatchWeight( 13268 AsmOperandInfo &info, const char *constraint) const { 13269 ConstraintWeight weight = CW_Invalid; 13270 Value *CallOperandVal = info.CallOperandVal; 13271 // If we don't have a value, we can't do a match, 13272 // but allow it at the lowest weight. 13273 if (!CallOperandVal) 13274 return CW_Default; 13275 Type *type = CallOperandVal->getType(); 13276 13277 // Look at the constraint type. 13278 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 13279 return CW_Register; // an individual CR bit. 13280 else if ((StringRef(constraint) == "wa" || 13281 StringRef(constraint) == "wd" || 13282 StringRef(constraint) == "wf") && 13283 type->isVectorTy()) 13284 return CW_Register; 13285 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 13286 return CW_Register; 13287 13288 switch (*constraint) { 13289 default: 13290 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 13291 break; 13292 case 'b': 13293 if (type->isIntegerTy()) 13294 weight = CW_Register; 13295 break; 13296 case 'f': 13297 if (type->isFloatTy()) 13298 weight = CW_Register; 13299 break; 13300 case 'd': 13301 if (type->isDoubleTy()) 13302 weight = CW_Register; 13303 break; 13304 case 'v': 13305 if (type->isVectorTy()) 13306 weight = CW_Register; 13307 break; 13308 case 'y': 13309 weight = CW_Register; 13310 break; 13311 case 'Z': 13312 weight = CW_Memory; 13313 break; 13314 } 13315 return weight; 13316 } 13317 13318 std::pair<unsigned, const TargetRegisterClass *> 13319 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 13320 StringRef Constraint, 13321 MVT VT) const { 13322 if (Constraint.size() == 1) { 13323 // GCC RS6000 Constraint Letters 13324 switch (Constraint[0]) { 13325 case 'b': // R1-R31 13326 if (VT == MVT::i64 && Subtarget.isPPC64()) 13327 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 13328 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 13329 case 'r': // R0-R31 13330 if (VT == MVT::i64 && Subtarget.isPPC64()) 13331 return std::make_pair(0U, &PPC::G8RCRegClass); 13332 return std::make_pair(0U, &PPC::GPRCRegClass); 13333 // 'd' and 'f' constraints are both defined to be "the floating point 13334 // registers", where one is for 32-bit and the other for 64-bit. We don't 13335 // really care overly much here so just give them all the same reg classes. 13336 case 'd': 13337 case 'f': 13338 if (Subtarget.hasSPE()) { 13339 if (VT == MVT::f32 || VT == MVT::i32) 13340 return std::make_pair(0U, &PPC::SPE4RCRegClass); 13341 if (VT == MVT::f64 || VT == MVT::i64) 13342 return std::make_pair(0U, &PPC::SPERCRegClass); 13343 } else { 13344 if (VT == MVT::f32 || VT == MVT::i32) 13345 return std::make_pair(0U, &PPC::F4RCRegClass); 13346 if (VT == MVT::f64 || VT == MVT::i64) 13347 return std::make_pair(0U, &PPC::F8RCRegClass); 13348 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13349 return std::make_pair(0U, &PPC::QFRCRegClass); 13350 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13351 return std::make_pair(0U, &PPC::QSRCRegClass); 13352 } 13353 break; 13354 case 'v': 13355 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13356 return std::make_pair(0U, &PPC::QFRCRegClass); 13357 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13358 return std::make_pair(0U, &PPC::QSRCRegClass); 13359 if (Subtarget.hasAltivec()) 13360 return std::make_pair(0U, &PPC::VRRCRegClass); 13361 break; 13362 case 'y': // crrc 13363 return std::make_pair(0U, &PPC::CRRCRegClass); 13364 } 13365 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 13366 // An individual CR bit. 13367 return std::make_pair(0U, &PPC::CRBITRCRegClass); 13368 } else if ((Constraint == "wa" || Constraint == "wd" || 13369 Constraint == "wf") && Subtarget.hasVSX()) { 13370 return std::make_pair(0U, &PPC::VSRCRegClass); 13371 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 13372 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 13373 return std::make_pair(0U, &PPC::VSSRCRegClass); 13374 else 13375 return std::make_pair(0U, &PPC::VSFRCRegClass); 13376 } 13377 13378 std::pair<unsigned, const TargetRegisterClass *> R = 13379 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 13380 13381 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 13382 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 13383 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 13384 // register. 13385 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 13386 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 13387 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 13388 PPC::GPRCRegClass.contains(R.first)) 13389 return std::make_pair(TRI->getMatchingSuperReg(R.first, 13390 PPC::sub_32, &PPC::G8RCRegClass), 13391 &PPC::G8RCRegClass); 13392 13393 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 13394 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 13395 R.first = PPC::CR0; 13396 R.second = &PPC::CRRCRegClass; 13397 } 13398 13399 return R; 13400 } 13401 13402 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 13403 /// vector. If it is invalid, don't add anything to Ops. 13404 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 13405 std::string &Constraint, 13406 std::vector<SDValue>&Ops, 13407 SelectionDAG &DAG) const { 13408 SDValue Result; 13409 13410 // Only support length 1 constraints. 13411 if (Constraint.length() > 1) return; 13412 13413 char Letter = Constraint[0]; 13414 switch (Letter) { 13415 default: break; 13416 case 'I': 13417 case 'J': 13418 case 'K': 13419 case 'L': 13420 case 'M': 13421 case 'N': 13422 case 'O': 13423 case 'P': { 13424 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 13425 if (!CST) return; // Must be an immediate to match. 13426 SDLoc dl(Op); 13427 int64_t Value = CST->getSExtValue(); 13428 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 13429 // numbers are printed as such. 13430 switch (Letter) { 13431 default: llvm_unreachable("Unknown constraint letter!"); 13432 case 'I': // "I" is a signed 16-bit constant. 13433 if (isInt<16>(Value)) 13434 Result = DAG.getTargetConstant(Value, dl, TCVT); 13435 break; 13436 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 13437 if (isShiftedUInt<16, 16>(Value)) 13438 Result = DAG.getTargetConstant(Value, dl, TCVT); 13439 break; 13440 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 13441 if (isShiftedInt<16, 16>(Value)) 13442 Result = DAG.getTargetConstant(Value, dl, TCVT); 13443 break; 13444 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 13445 if (isUInt<16>(Value)) 13446 Result = DAG.getTargetConstant(Value, dl, TCVT); 13447 break; 13448 case 'M': // "M" is a constant that is greater than 31. 13449 if (Value > 31) 13450 Result = DAG.getTargetConstant(Value, dl, TCVT); 13451 break; 13452 case 'N': // "N" is a positive constant that is an exact power of two. 13453 if (Value > 0 && isPowerOf2_64(Value)) 13454 Result = DAG.getTargetConstant(Value, dl, TCVT); 13455 break; 13456 case 'O': // "O" is the constant zero. 13457 if (Value == 0) 13458 Result = DAG.getTargetConstant(Value, dl, TCVT); 13459 break; 13460 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 13461 if (isInt<16>(-Value)) 13462 Result = DAG.getTargetConstant(Value, dl, TCVT); 13463 break; 13464 } 13465 break; 13466 } 13467 } 13468 13469 if (Result.getNode()) { 13470 Ops.push_back(Result); 13471 return; 13472 } 13473 13474 // Handle standard constraint letters. 13475 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 13476 } 13477 13478 // isLegalAddressingMode - Return true if the addressing mode represented 13479 // by AM is legal for this target, for a load/store of the specified type. 13480 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 13481 const AddrMode &AM, Type *Ty, 13482 unsigned AS, Instruction *I) const { 13483 // PPC does not allow r+i addressing modes for vectors! 13484 if (Ty->isVectorTy() && AM.BaseOffs != 0) 13485 return false; 13486 13487 // PPC allows a sign-extended 16-bit immediate field. 13488 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 13489 return false; 13490 13491 // No global is ever allowed as a base. 13492 if (AM.BaseGV) 13493 return false; 13494 13495 // PPC only support r+r, 13496 switch (AM.Scale) { 13497 case 0: // "r+i" or just "i", depending on HasBaseReg. 13498 break; 13499 case 1: 13500 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 13501 return false; 13502 // Otherwise we have r+r or r+i. 13503 break; 13504 case 2: 13505 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 13506 return false; 13507 // Allow 2*r as r+r. 13508 break; 13509 default: 13510 // No other scales are supported. 13511 return false; 13512 } 13513 13514 return true; 13515 } 13516 13517 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 13518 SelectionDAG &DAG) const { 13519 MachineFunction &MF = DAG.getMachineFunction(); 13520 MachineFrameInfo &MFI = MF.getFrameInfo(); 13521 MFI.setReturnAddressIsTaken(true); 13522 13523 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 13524 return SDValue(); 13525 13526 SDLoc dl(Op); 13527 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13528 13529 // Make sure the function does not optimize away the store of the RA to 13530 // the stack. 13531 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 13532 FuncInfo->setLRStoreRequired(); 13533 bool isPPC64 = Subtarget.isPPC64(); 13534 auto PtrVT = getPointerTy(MF.getDataLayout()); 13535 13536 if (Depth > 0) { 13537 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 13538 SDValue Offset = 13539 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 13540 isPPC64 ? MVT::i64 : MVT::i32); 13541 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 13542 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 13543 MachinePointerInfo()); 13544 } 13545 13546 // Just load the return address off the stack. 13547 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 13548 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 13549 MachinePointerInfo()); 13550 } 13551 13552 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 13553 SelectionDAG &DAG) const { 13554 SDLoc dl(Op); 13555 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13556 13557 MachineFunction &MF = DAG.getMachineFunction(); 13558 MachineFrameInfo &MFI = MF.getFrameInfo(); 13559 MFI.setFrameAddressIsTaken(true); 13560 13561 EVT PtrVT = getPointerTy(MF.getDataLayout()); 13562 bool isPPC64 = PtrVT == MVT::i64; 13563 13564 // Naked functions never have a frame pointer, and so we use r1. For all 13565 // other functions, this decision must be delayed until during PEI. 13566 unsigned FrameReg; 13567 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 13568 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 13569 else 13570 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 13571 13572 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 13573 PtrVT); 13574 while (Depth--) 13575 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 13576 FrameAddr, MachinePointerInfo()); 13577 return FrameAddr; 13578 } 13579 13580 // FIXME? Maybe this could be a TableGen attribute on some registers and 13581 // this table could be generated automatically from RegInfo. 13582 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 13583 SelectionDAG &DAG) const { 13584 bool isPPC64 = Subtarget.isPPC64(); 13585 bool isDarwinABI = Subtarget.isDarwinABI(); 13586 13587 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 13588 (!isPPC64 && VT != MVT::i32)) 13589 report_fatal_error("Invalid register global variable type"); 13590 13591 bool is64Bit = isPPC64 && VT == MVT::i64; 13592 unsigned Reg = StringSwitch<unsigned>(RegName) 13593 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 13594 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 13595 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 13596 (is64Bit ? PPC::X13 : PPC::R13)) 13597 .Default(0); 13598 13599 if (Reg) 13600 return Reg; 13601 report_fatal_error("Invalid register name global variable"); 13602 } 13603 13604 bool 13605 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 13606 // The PowerPC target isn't yet aware of offsets. 13607 return false; 13608 } 13609 13610 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 13611 const CallInst &I, 13612 MachineFunction &MF, 13613 unsigned Intrinsic) const { 13614 switch (Intrinsic) { 13615 case Intrinsic::ppc_qpx_qvlfd: 13616 case Intrinsic::ppc_qpx_qvlfs: 13617 case Intrinsic::ppc_qpx_qvlfcd: 13618 case Intrinsic::ppc_qpx_qvlfcs: 13619 case Intrinsic::ppc_qpx_qvlfiwa: 13620 case Intrinsic::ppc_qpx_qvlfiwz: 13621 case Intrinsic::ppc_altivec_lvx: 13622 case Intrinsic::ppc_altivec_lvxl: 13623 case Intrinsic::ppc_altivec_lvebx: 13624 case Intrinsic::ppc_altivec_lvehx: 13625 case Intrinsic::ppc_altivec_lvewx: 13626 case Intrinsic::ppc_vsx_lxvd2x: 13627 case Intrinsic::ppc_vsx_lxvw4x: { 13628 EVT VT; 13629 switch (Intrinsic) { 13630 case Intrinsic::ppc_altivec_lvebx: 13631 VT = MVT::i8; 13632 break; 13633 case Intrinsic::ppc_altivec_lvehx: 13634 VT = MVT::i16; 13635 break; 13636 case Intrinsic::ppc_altivec_lvewx: 13637 VT = MVT::i32; 13638 break; 13639 case Intrinsic::ppc_vsx_lxvd2x: 13640 VT = MVT::v2f64; 13641 break; 13642 case Intrinsic::ppc_qpx_qvlfd: 13643 VT = MVT::v4f64; 13644 break; 13645 case Intrinsic::ppc_qpx_qvlfs: 13646 VT = MVT::v4f32; 13647 break; 13648 case Intrinsic::ppc_qpx_qvlfcd: 13649 VT = MVT::v2f64; 13650 break; 13651 case Intrinsic::ppc_qpx_qvlfcs: 13652 VT = MVT::v2f32; 13653 break; 13654 default: 13655 VT = MVT::v4i32; 13656 break; 13657 } 13658 13659 Info.opc = ISD::INTRINSIC_W_CHAIN; 13660 Info.memVT = VT; 13661 Info.ptrVal = I.getArgOperand(0); 13662 Info.offset = -VT.getStoreSize()+1; 13663 Info.size = 2*VT.getStoreSize()-1; 13664 Info.align = 1; 13665 Info.flags = MachineMemOperand::MOLoad; 13666 return true; 13667 } 13668 case Intrinsic::ppc_qpx_qvlfda: 13669 case Intrinsic::ppc_qpx_qvlfsa: 13670 case Intrinsic::ppc_qpx_qvlfcda: 13671 case Intrinsic::ppc_qpx_qvlfcsa: 13672 case Intrinsic::ppc_qpx_qvlfiwaa: 13673 case Intrinsic::ppc_qpx_qvlfiwza: { 13674 EVT VT; 13675 switch (Intrinsic) { 13676 case Intrinsic::ppc_qpx_qvlfda: 13677 VT = MVT::v4f64; 13678 break; 13679 case Intrinsic::ppc_qpx_qvlfsa: 13680 VT = MVT::v4f32; 13681 break; 13682 case Intrinsic::ppc_qpx_qvlfcda: 13683 VT = MVT::v2f64; 13684 break; 13685 case Intrinsic::ppc_qpx_qvlfcsa: 13686 VT = MVT::v2f32; 13687 break; 13688 default: 13689 VT = MVT::v4i32; 13690 break; 13691 } 13692 13693 Info.opc = ISD::INTRINSIC_W_CHAIN; 13694 Info.memVT = VT; 13695 Info.ptrVal = I.getArgOperand(0); 13696 Info.offset = 0; 13697 Info.size = VT.getStoreSize(); 13698 Info.align = 1; 13699 Info.flags = MachineMemOperand::MOLoad; 13700 return true; 13701 } 13702 case Intrinsic::ppc_qpx_qvstfd: 13703 case Intrinsic::ppc_qpx_qvstfs: 13704 case Intrinsic::ppc_qpx_qvstfcd: 13705 case Intrinsic::ppc_qpx_qvstfcs: 13706 case Intrinsic::ppc_qpx_qvstfiw: 13707 case Intrinsic::ppc_altivec_stvx: 13708 case Intrinsic::ppc_altivec_stvxl: 13709 case Intrinsic::ppc_altivec_stvebx: 13710 case Intrinsic::ppc_altivec_stvehx: 13711 case Intrinsic::ppc_altivec_stvewx: 13712 case Intrinsic::ppc_vsx_stxvd2x: 13713 case Intrinsic::ppc_vsx_stxvw4x: { 13714 EVT VT; 13715 switch (Intrinsic) { 13716 case Intrinsic::ppc_altivec_stvebx: 13717 VT = MVT::i8; 13718 break; 13719 case Intrinsic::ppc_altivec_stvehx: 13720 VT = MVT::i16; 13721 break; 13722 case Intrinsic::ppc_altivec_stvewx: 13723 VT = MVT::i32; 13724 break; 13725 case Intrinsic::ppc_vsx_stxvd2x: 13726 VT = MVT::v2f64; 13727 break; 13728 case Intrinsic::ppc_qpx_qvstfd: 13729 VT = MVT::v4f64; 13730 break; 13731 case Intrinsic::ppc_qpx_qvstfs: 13732 VT = MVT::v4f32; 13733 break; 13734 case Intrinsic::ppc_qpx_qvstfcd: 13735 VT = MVT::v2f64; 13736 break; 13737 case Intrinsic::ppc_qpx_qvstfcs: 13738 VT = MVT::v2f32; 13739 break; 13740 default: 13741 VT = MVT::v4i32; 13742 break; 13743 } 13744 13745 Info.opc = ISD::INTRINSIC_VOID; 13746 Info.memVT = VT; 13747 Info.ptrVal = I.getArgOperand(1); 13748 Info.offset = -VT.getStoreSize()+1; 13749 Info.size = 2*VT.getStoreSize()-1; 13750 Info.align = 1; 13751 Info.flags = MachineMemOperand::MOStore; 13752 return true; 13753 } 13754 case Intrinsic::ppc_qpx_qvstfda: 13755 case Intrinsic::ppc_qpx_qvstfsa: 13756 case Intrinsic::ppc_qpx_qvstfcda: 13757 case Intrinsic::ppc_qpx_qvstfcsa: 13758 case Intrinsic::ppc_qpx_qvstfiwa: { 13759 EVT VT; 13760 switch (Intrinsic) { 13761 case Intrinsic::ppc_qpx_qvstfda: 13762 VT = MVT::v4f64; 13763 break; 13764 case Intrinsic::ppc_qpx_qvstfsa: 13765 VT = MVT::v4f32; 13766 break; 13767 case Intrinsic::ppc_qpx_qvstfcda: 13768 VT = MVT::v2f64; 13769 break; 13770 case Intrinsic::ppc_qpx_qvstfcsa: 13771 VT = MVT::v2f32; 13772 break; 13773 default: 13774 VT = MVT::v4i32; 13775 break; 13776 } 13777 13778 Info.opc = ISD::INTRINSIC_VOID; 13779 Info.memVT = VT; 13780 Info.ptrVal = I.getArgOperand(1); 13781 Info.offset = 0; 13782 Info.size = VT.getStoreSize(); 13783 Info.align = 1; 13784 Info.flags = MachineMemOperand::MOStore; 13785 return true; 13786 } 13787 default: 13788 break; 13789 } 13790 13791 return false; 13792 } 13793 13794 /// getOptimalMemOpType - Returns the target specific optimal type for load 13795 /// and store operations as a result of memset, memcpy, and memmove 13796 /// lowering. If DstAlign is zero that means it's safe to destination 13797 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 13798 /// means there isn't a need to check it against alignment requirement, 13799 /// probably because the source does not need to be loaded. If 'IsMemset' is 13800 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 13801 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 13802 /// source is constant so it does not need to be loaded. 13803 /// It returns EVT::Other if the type should be determined using generic 13804 /// target-independent logic. 13805 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 13806 unsigned DstAlign, unsigned SrcAlign, 13807 bool IsMemset, bool ZeroMemset, 13808 bool MemcpyStrSrc, 13809 MachineFunction &MF) const { 13810 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 13811 const Function &F = MF.getFunction(); 13812 // When expanding a memset, require at least two QPX instructions to cover 13813 // the cost of loading the value to be stored from the constant pool. 13814 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 13815 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 13816 !F.hasFnAttribute(Attribute::NoImplicitFloat)) { 13817 return MVT::v4f64; 13818 } 13819 13820 // We should use Altivec/VSX loads and stores when available. For unaligned 13821 // addresses, unaligned VSX loads are only fast starting with the P8. 13822 if (Subtarget.hasAltivec() && Size >= 16 && 13823 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 13824 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 13825 return MVT::v4i32; 13826 } 13827 13828 if (Subtarget.isPPC64()) { 13829 return MVT::i64; 13830 } 13831 13832 return MVT::i32; 13833 } 13834 13835 /// Returns true if it is beneficial to convert a load of a constant 13836 /// to just the constant itself. 13837 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 13838 Type *Ty) const { 13839 assert(Ty->isIntegerTy()); 13840 13841 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 13842 return !(BitSize == 0 || BitSize > 64); 13843 } 13844 13845 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 13846 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 13847 return false; 13848 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 13849 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 13850 return NumBits1 == 64 && NumBits2 == 32; 13851 } 13852 13853 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 13854 if (!VT1.isInteger() || !VT2.isInteger()) 13855 return false; 13856 unsigned NumBits1 = VT1.getSizeInBits(); 13857 unsigned NumBits2 = VT2.getSizeInBits(); 13858 return NumBits1 == 64 && NumBits2 == 32; 13859 } 13860 13861 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 13862 // Generally speaking, zexts are not free, but they are free when they can be 13863 // folded with other operations. 13864 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 13865 EVT MemVT = LD->getMemoryVT(); 13866 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 13867 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 13868 (LD->getExtensionType() == ISD::NON_EXTLOAD || 13869 LD->getExtensionType() == ISD::ZEXTLOAD)) 13870 return true; 13871 } 13872 13873 // FIXME: Add other cases... 13874 // - 32-bit shifts with a zext to i64 13875 // - zext after ctlz, bswap, etc. 13876 // - zext after and by a constant mask 13877 13878 return TargetLowering::isZExtFree(Val, VT2); 13879 } 13880 13881 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 13882 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 13883 "invalid fpext types"); 13884 // Extending to float128 is not free. 13885 if (DestVT == MVT::f128) 13886 return false; 13887 return true; 13888 } 13889 13890 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 13891 return isInt<16>(Imm) || isUInt<16>(Imm); 13892 } 13893 13894 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 13895 return isInt<16>(Imm) || isUInt<16>(Imm); 13896 } 13897 13898 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 13899 unsigned, 13900 unsigned, 13901 bool *Fast) const { 13902 if (DisablePPCUnaligned) 13903 return false; 13904 13905 // PowerPC supports unaligned memory access for simple non-vector types. 13906 // Although accessing unaligned addresses is not as efficient as accessing 13907 // aligned addresses, it is generally more efficient than manual expansion, 13908 // and generally only traps for software emulation when crossing page 13909 // boundaries. 13910 13911 if (!VT.isSimple()) 13912 return false; 13913 13914 if (VT.getSimpleVT().isVector()) { 13915 if (Subtarget.hasVSX()) { 13916 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 13917 VT != MVT::v4f32 && VT != MVT::v4i32) 13918 return false; 13919 } else { 13920 return false; 13921 } 13922 } 13923 13924 if (VT == MVT::ppcf128) 13925 return false; 13926 13927 if (Fast) 13928 *Fast = true; 13929 13930 return true; 13931 } 13932 13933 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 13934 VT = VT.getScalarType(); 13935 13936 if (!VT.isSimple()) 13937 return false; 13938 13939 switch (VT.getSimpleVT().SimpleTy) { 13940 case MVT::f32: 13941 case MVT::f64: 13942 return true; 13943 case MVT::f128: 13944 return (EnableQuadPrecision && Subtarget.hasP9Vector()); 13945 default: 13946 break; 13947 } 13948 13949 return false; 13950 } 13951 13952 const MCPhysReg * 13953 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 13954 // LR is a callee-save register, but we must treat it as clobbered by any call 13955 // site. Hence we include LR in the scratch registers, which are in turn added 13956 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 13957 // to CTR, which is used by any indirect call. 13958 static const MCPhysReg ScratchRegs[] = { 13959 PPC::X12, PPC::LR8, PPC::CTR8, 0 13960 }; 13961 13962 return ScratchRegs; 13963 } 13964 13965 unsigned PPCTargetLowering::getExceptionPointerRegister( 13966 const Constant *PersonalityFn) const { 13967 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 13968 } 13969 13970 unsigned PPCTargetLowering::getExceptionSelectorRegister( 13971 const Constant *PersonalityFn) const { 13972 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 13973 } 13974 13975 bool 13976 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 13977 EVT VT , unsigned DefinedValues) const { 13978 if (VT == MVT::v2i64) 13979 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 13980 13981 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 13982 return true; 13983 13984 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 13985 } 13986 13987 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 13988 if (DisableILPPref || Subtarget.enableMachineScheduler()) 13989 return TargetLowering::getSchedulingPreference(N); 13990 13991 return Sched::ILP; 13992 } 13993 13994 // Create a fast isel object. 13995 FastISel * 13996 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 13997 const TargetLibraryInfo *LibInfo) const { 13998 return PPC::createFastISel(FuncInfo, LibInfo); 13999 } 14000 14001 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 14002 if (Subtarget.isDarwinABI()) return; 14003 if (!Subtarget.isPPC64()) return; 14004 14005 // Update IsSplitCSR in PPCFunctionInfo 14006 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 14007 PFI->setIsSplitCSR(true); 14008 } 14009 14010 void PPCTargetLowering::insertCopiesSplitCSR( 14011 MachineBasicBlock *Entry, 14012 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 14013 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 14014 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 14015 if (!IStart) 14016 return; 14017 14018 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 14019 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 14020 MachineBasicBlock::iterator MBBI = Entry->begin(); 14021 for (const MCPhysReg *I = IStart; *I; ++I) { 14022 const TargetRegisterClass *RC = nullptr; 14023 if (PPC::G8RCRegClass.contains(*I)) 14024 RC = &PPC::G8RCRegClass; 14025 else if (PPC::F8RCRegClass.contains(*I)) 14026 RC = &PPC::F8RCRegClass; 14027 else if (PPC::CRRCRegClass.contains(*I)) 14028 RC = &PPC::CRRCRegClass; 14029 else if (PPC::VRRCRegClass.contains(*I)) 14030 RC = &PPC::VRRCRegClass; 14031 else 14032 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 14033 14034 unsigned NewVR = MRI->createVirtualRegister(RC); 14035 // Create copy from CSR to a virtual register. 14036 // FIXME: this currently does not emit CFI pseudo-instructions, it works 14037 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 14038 // nounwind. If we want to generalize this later, we may need to emit 14039 // CFI pseudo-instructions. 14040 assert(Entry->getParent()->getFunction().hasFnAttribute( 14041 Attribute::NoUnwind) && 14042 "Function should be nounwind in insertCopiesSplitCSR!"); 14043 Entry->addLiveIn(*I); 14044 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 14045 .addReg(*I); 14046 14047 // Insert the copy-back instructions right before the terminator 14048 for (auto *Exit : Exits) 14049 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 14050 TII->get(TargetOpcode::COPY), *I) 14051 .addReg(NewVR); 14052 } 14053 } 14054 14055 // Override to enable LOAD_STACK_GUARD lowering on Linux. 14056 bool PPCTargetLowering::useLoadStackGuardNode() const { 14057 if (!Subtarget.isTargetLinux()) 14058 return TargetLowering::useLoadStackGuardNode(); 14059 return true; 14060 } 14061 14062 // Override to disable global variable loading on Linux. 14063 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 14064 if (!Subtarget.isTargetLinux()) 14065 return TargetLowering::insertSSPDeclarations(M); 14066 } 14067 14068 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 14069 if (!VT.isSimple() || !Subtarget.hasVSX()) 14070 return false; 14071 14072 switch(VT.getSimpleVT().SimpleTy) { 14073 default: 14074 // For FP types that are currently not supported by PPC backend, return 14075 // false. Examples: f16, f80. 14076 return false; 14077 case MVT::f32: 14078 case MVT::f64: 14079 case MVT::ppcf128: 14080 return Imm.isPosZero(); 14081 } 14082 } 14083 14084 // For vector shift operation op, fold 14085 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 14086 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 14087 SelectionDAG &DAG) { 14088 SDValue N0 = N->getOperand(0); 14089 SDValue N1 = N->getOperand(1); 14090 EVT VT = N0.getValueType(); 14091 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 14092 unsigned Opcode = N->getOpcode(); 14093 unsigned TargetOpcode; 14094 14095 switch (Opcode) { 14096 default: 14097 llvm_unreachable("Unexpected shift operation"); 14098 case ISD::SHL: 14099 TargetOpcode = PPCISD::SHL; 14100 break; 14101 case ISD::SRL: 14102 TargetOpcode = PPCISD::SRL; 14103 break; 14104 case ISD::SRA: 14105 TargetOpcode = PPCISD::SRA; 14106 break; 14107 } 14108 14109 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 14110 N1->getOpcode() == ISD::AND) 14111 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 14112 if (Mask->getZExtValue() == OpSizeInBits - 1) 14113 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 14114 14115 return SDValue(); 14116 } 14117 14118 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 14119 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 14120 return Value; 14121 14122 SDValue N0 = N->getOperand(0); 14123 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 14124 if (!Subtarget.isISA3_0() || 14125 N0.getOpcode() != ISD::SIGN_EXTEND || 14126 N0.getOperand(0).getValueType() != MVT::i32 || 14127 CN1 == nullptr || N->getValueType(0) != MVT::i64) 14128 return SDValue(); 14129 14130 // We can't save an operation here if the value is already extended, and 14131 // the existing shift is easier to combine. 14132 SDValue ExtsSrc = N0.getOperand(0); 14133 if (ExtsSrc.getOpcode() == ISD::TRUNCATE && 14134 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext) 14135 return SDValue(); 14136 14137 SDLoc DL(N0); 14138 SDValue ShiftBy = SDValue(CN1, 0); 14139 // We want the shift amount to be i32 on the extswli, but the shift could 14140 // have an i64. 14141 if (ShiftBy.getValueType() == MVT::i64) 14142 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32); 14143 14144 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0), 14145 ShiftBy); 14146 } 14147 14148 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 14149 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 14150 return Value; 14151 14152 return SDValue(); 14153 } 14154 14155 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 14156 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 14157 return Value; 14158 14159 return SDValue(); 14160 } 14161 14162 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1)) 14163 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0)) 14164 // When C is zero, the equation (addi Z, -C) can be simplified to Z 14165 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types 14166 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, 14167 const PPCSubtarget &Subtarget) { 14168 if (!Subtarget.isPPC64()) 14169 return SDValue(); 14170 14171 SDValue LHS = N->getOperand(0); 14172 SDValue RHS = N->getOperand(1); 14173 14174 auto isZextOfCompareWithConstant = [](SDValue Op) { 14175 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() || 14176 Op.getValueType() != MVT::i64) 14177 return false; 14178 14179 SDValue Cmp = Op.getOperand(0); 14180 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() || 14181 Cmp.getOperand(0).getValueType() != MVT::i64) 14182 return false; 14183 14184 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) { 14185 int64_t NegConstant = 0 - Constant->getSExtValue(); 14186 // Due to the limitations of the addi instruction, 14187 // -C is required to be [-32768, 32767]. 14188 return isInt<16>(NegConstant); 14189 } 14190 14191 return false; 14192 }; 14193 14194 bool LHSHasPattern = isZextOfCompareWithConstant(LHS); 14195 bool RHSHasPattern = isZextOfCompareWithConstant(RHS); 14196 14197 // If there is a pattern, canonicalize a zext operand to the RHS. 14198 if (LHSHasPattern && !RHSHasPattern) 14199 std::swap(LHS, RHS); 14200 else if (!LHSHasPattern && !RHSHasPattern) 14201 return SDValue(); 14202 14203 SDLoc DL(N); 14204 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i64); 14205 SDValue Cmp = RHS.getOperand(0); 14206 SDValue Z = Cmp.getOperand(0); 14207 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1)); 14208 14209 assert(Constant && "Constant Should not be a null pointer."); 14210 int64_t NegConstant = 0 - Constant->getSExtValue(); 14211 14212 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) { 14213 default: break; 14214 case ISD::SETNE: { 14215 // when C == 0 14216 // --> addze X, (addic Z, -1).carry 14217 // / 14218 // add X, (zext(setne Z, C))-- 14219 // \ when -32768 <= -C <= 32767 && C != 0 14220 // --> addze X, (addic (addi Z, -C), -1).carry 14221 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 14222 DAG.getConstant(NegConstant, DL, MVT::i64)); 14223 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 14224 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 14225 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64)); 14226 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 14227 SDValue(Addc.getNode(), 1)); 14228 } 14229 case ISD::SETEQ: { 14230 // when C == 0 14231 // --> addze X, (subfic Z, 0).carry 14232 // / 14233 // add X, (zext(sete Z, C))-- 14234 // \ when -32768 <= -C <= 32767 && C != 0 14235 // --> addze X, (subfic (addi Z, -C), 0).carry 14236 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 14237 DAG.getConstant(NegConstant, DL, MVT::i64)); 14238 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 14239 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 14240 DAG.getConstant(0, DL, MVT::i64), AddOrZ); 14241 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 14242 SDValue(Subc.getNode(), 1)); 14243 } 14244 } 14245 14246 return SDValue(); 14247 } 14248 14249 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const { 14250 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget)) 14251 return Value; 14252 14253 return SDValue(); 14254 } 14255 14256 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 14257 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 14258 if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64()) 14259 return false; 14260 14261 // If not a tail call then no need to proceed. 14262 if (!CI->isTailCall()) 14263 return false; 14264 14265 // If tail calls are disabled for the caller then we are done. 14266 const Function *Caller = CI->getParent()->getParent(); 14267 auto Attr = Caller->getFnAttribute("disable-tail-calls"); 14268 if (Attr.getValueAsString() == "true") 14269 return false; 14270 14271 // If sibling calls have been disabled and tail-calls aren't guaranteed 14272 // there is no reason to duplicate. 14273 auto &TM = getTargetMachine(); 14274 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 14275 return false; 14276 14277 // Can't tail call a function called indirectly, or if it has variadic args. 14278 const Function *Callee = CI->getCalledFunction(); 14279 if (!Callee || Callee->isVarArg()) 14280 return false; 14281 14282 // Make sure the callee and caller calling conventions are eligible for tco. 14283 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 14284 CI->getCallingConv())) 14285 return false; 14286 14287 // If the function is local then we have a good chance at tail-calling it 14288 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 14289 } 14290 14291 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 14292 if (!Subtarget.hasVSX()) 14293 return false; 14294 if (Subtarget.hasP9Vector() && VT == MVT::f128) 14295 return true; 14296 return VT == MVT::f32 || VT == MVT::f64 || 14297 VT == MVT::v4f32 || VT == MVT::v2f64; 14298 } 14299 14300 bool PPCTargetLowering:: 14301 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 14302 const Value *Mask = AndI.getOperand(1); 14303 // If the mask is suitable for andi. or andis. we should sink the and. 14304 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 14305 // Can't handle constants wider than 64-bits. 14306 if (CI->getBitWidth() > 64) 14307 return false; 14308 int64_t ConstVal = CI->getZExtValue(); 14309 return isUInt<16>(ConstVal) || 14310 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 14311 } 14312 14313 // For non-constant masks, we can always use the record-form and. 14314 return true; 14315 } 14316