1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCCCState.h" 18 #include "PPCCallingConv.h" 19 #include "PPCFrameLowering.h" 20 #include "PPCInstrInfo.h" 21 #include "PPCMachineFunctionInfo.h" 22 #include "PPCPerfectShuffle.h" 23 #include "PPCRegisterInfo.h" 24 #include "PPCSubtarget.h" 25 #include "PPCTargetMachine.h" 26 #include "llvm/ADT/APFloat.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallPtrSet.h" 33 #include "llvm/ADT/SmallSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/ISDOpcodes.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFrameInfo.h" 42 #include "llvm/CodeGen/MachineFunction.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineJumpTableInfo.h" 46 #include "llvm/CodeGen/MachineLoopInfo.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetRegisterInfo.h" 56 #include "llvm/CodeGen/ValueTypes.h" 57 #include "llvm/IR/CallSite.h" 58 #include "llvm/IR/CallingConv.h" 59 #include "llvm/IR/Constant.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/DataLayout.h" 62 #include "llvm/IR/DebugLoc.h" 63 #include "llvm/IR/DerivedTypes.h" 64 #include "llvm/IR/Function.h" 65 #include "llvm/IR/GlobalValue.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/Intrinsics.h" 69 #include "llvm/IR/Module.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/MC/MCExpr.h" 74 #include "llvm/MC/MCRegisterInfo.h" 75 #include "llvm/Support/AtomicOrdering.h" 76 #include "llvm/Support/BranchProbability.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CodeGen.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/Debug.h" 82 #include "llvm/Support/ErrorHandling.h" 83 #include "llvm/Support/Format.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MachineValueType.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include "llvm/Target/TargetMachine.h" 89 #include "llvm/Target/TargetOptions.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 100 #define DEBUG_TYPE "ppc-lowering" 101 102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 104 105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 107 108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 110 111 static cl::opt<bool> DisableSCO("disable-ppc-sco", 112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 113 114 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision", 115 cl::desc("enable quad precision float support on ppc"), cl::Hidden); 116 117 STATISTIC(NumTailCalls, "Number of tail calls"); 118 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 119 120 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 121 122 // FIXME: Remove this once the bug has been fixed! 123 extern cl::opt<bool> ANDIGlueBug; 124 125 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 126 const PPCSubtarget &STI) 127 : TargetLowering(TM), Subtarget(STI) { 128 // Use _setjmp/_longjmp instead of setjmp/longjmp. 129 setUseUnderscoreSetJmp(true); 130 setUseUnderscoreLongJmp(true); 131 132 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 133 // arguments are at least 4/8 bytes aligned. 134 bool isPPC64 = Subtarget.isPPC64(); 135 setMinStackArgumentAlignment(isPPC64 ? 8:4); 136 137 // Set up the register classes. 138 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 139 if (!useSoftFloat()) { 140 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 141 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 142 } 143 144 // Match BITREVERSE to customized fast code sequence in the td file. 145 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 146 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 147 148 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 149 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 150 151 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 152 for (MVT VT : MVT::integer_valuetypes()) { 153 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 154 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 155 } 156 157 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 158 159 // PowerPC has pre-inc load and store's. 160 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 161 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 162 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 163 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 164 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 165 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 166 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 167 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 168 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 169 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 170 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 171 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 172 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 173 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 174 175 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 176 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 177 for (MVT VT : ScalarIntVTs) { 178 setOperationAction(ISD::ADDC, VT, Legal); 179 setOperationAction(ISD::ADDE, VT, Legal); 180 setOperationAction(ISD::SUBC, VT, Legal); 181 setOperationAction(ISD::SUBE, VT, Legal); 182 } 183 184 if (Subtarget.useCRBits()) { 185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 186 187 if (isPPC64 || Subtarget.hasFPCVT()) { 188 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 189 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 190 isPPC64 ? MVT::i64 : MVT::i32); 191 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 192 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 193 isPPC64 ? MVT::i64 : MVT::i32); 194 } else { 195 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 196 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 197 } 198 199 // PowerPC does not support direct load/store of condition registers. 200 setOperationAction(ISD::LOAD, MVT::i1, Custom); 201 setOperationAction(ISD::STORE, MVT::i1, Custom); 202 203 // FIXME: Remove this once the ANDI glue bug is fixed: 204 if (ANDIGlueBug) 205 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 206 207 for (MVT VT : MVT::integer_valuetypes()) { 208 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 209 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 210 setTruncStoreAction(VT, MVT::i1, Expand); 211 } 212 213 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 214 } 215 216 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 217 // PPC (the libcall is not available). 218 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 219 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 220 221 // We do not currently implement these libm ops for PowerPC. 222 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 223 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 224 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 225 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 226 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 227 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 228 229 // PowerPC has no SREM/UREM instructions unless we are on P9 230 // On P9 we may use a hardware instruction to compute the remainder. 231 // The instructions are not legalized directly because in the cases where the 232 // result of both the remainder and the division is required it is more 233 // efficient to compute the remainder from the result of the division rather 234 // than use the remainder instruction. 235 if (Subtarget.isISA3_0()) { 236 setOperationAction(ISD::SREM, MVT::i32, Custom); 237 setOperationAction(ISD::UREM, MVT::i32, Custom); 238 setOperationAction(ISD::SREM, MVT::i64, Custom); 239 setOperationAction(ISD::UREM, MVT::i64, Custom); 240 } else { 241 setOperationAction(ISD::SREM, MVT::i32, Expand); 242 setOperationAction(ISD::UREM, MVT::i32, Expand); 243 setOperationAction(ISD::SREM, MVT::i64, Expand); 244 setOperationAction(ISD::UREM, MVT::i64, Expand); 245 } 246 247 if (Subtarget.hasP9Vector()) { 248 setOperationAction(ISD::ABS, MVT::v4i32, Legal); 249 setOperationAction(ISD::ABS, MVT::v8i16, Legal); 250 setOperationAction(ISD::ABS, MVT::v16i8, Legal); 251 } 252 253 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 254 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 255 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 256 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 257 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 258 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 259 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 260 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 261 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 262 263 // We don't support sin/cos/sqrt/fmod/pow 264 setOperationAction(ISD::FSIN , MVT::f64, Expand); 265 setOperationAction(ISD::FCOS , MVT::f64, Expand); 266 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 267 setOperationAction(ISD::FREM , MVT::f64, Expand); 268 setOperationAction(ISD::FPOW , MVT::f64, Expand); 269 setOperationAction(ISD::FMA , MVT::f64, Legal); 270 setOperationAction(ISD::FSIN , MVT::f32, Expand); 271 setOperationAction(ISD::FCOS , MVT::f32, Expand); 272 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 273 setOperationAction(ISD::FREM , MVT::f32, Expand); 274 setOperationAction(ISD::FPOW , MVT::f32, Expand); 275 setOperationAction(ISD::FMA , MVT::f32, Legal); 276 277 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 278 279 // If we're enabling GP optimizations, use hardware square root 280 if (!Subtarget.hasFSQRT() && 281 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 282 Subtarget.hasFRE())) 283 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 284 285 if (!Subtarget.hasFSQRT() && 286 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 287 Subtarget.hasFRES())) 288 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 289 290 if (Subtarget.hasFCPSGN()) { 291 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 292 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 293 } else { 294 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 295 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 296 } 297 298 if (Subtarget.hasFPRND()) { 299 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 300 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 301 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 302 setOperationAction(ISD::FROUND, MVT::f64, Legal); 303 304 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 305 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 306 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 307 setOperationAction(ISD::FROUND, MVT::f32, Legal); 308 } 309 310 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 311 // to speed up scalar BSWAP64. 312 // CTPOP or CTTZ were introduced in P8/P9 respectively 313 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 314 if (Subtarget.isISA3_0()) { 315 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 316 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 317 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 318 } else { 319 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 320 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 321 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 322 } 323 324 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 325 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 326 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 327 } else { 328 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 329 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 330 } 331 332 // PowerPC does not have ROTR 333 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 334 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 335 336 if (!Subtarget.useCRBits()) { 337 // PowerPC does not have Select 338 setOperationAction(ISD::SELECT, MVT::i32, Expand); 339 setOperationAction(ISD::SELECT, MVT::i64, Expand); 340 setOperationAction(ISD::SELECT, MVT::f32, Expand); 341 setOperationAction(ISD::SELECT, MVT::f64, Expand); 342 } 343 344 // PowerPC wants to turn select_cc of FP into fsel when possible. 345 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 346 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 347 348 // PowerPC wants to optimize integer setcc a bit 349 if (!Subtarget.useCRBits()) 350 setOperationAction(ISD::SETCC, MVT::i32, Custom); 351 352 // PowerPC does not have BRCOND which requires SetCC 353 if (!Subtarget.useCRBits()) 354 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 355 356 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 357 358 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 359 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 360 361 // PowerPC does not have [U|S]INT_TO_FP 362 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 363 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 364 365 if (Subtarget.hasDirectMove() && isPPC64) { 366 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 367 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 368 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 369 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 370 } else { 371 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 372 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 373 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 374 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 375 } 376 377 // We cannot sextinreg(i1). Expand to shifts. 378 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 379 380 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 381 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 382 // support continuation, user-level threading, and etc.. As a result, no 383 // other SjLj exception interfaces are implemented and please don't build 384 // your own exception handling based on them. 385 // LLVM/Clang supports zero-cost DWARF exception handling. 386 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 387 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 388 389 // We want to legalize GlobalAddress and ConstantPool nodes into the 390 // appropriate instructions to materialize the address. 391 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 392 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 393 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 394 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 395 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 396 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 397 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 398 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 399 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 400 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 401 402 // TRAP is legal. 403 setOperationAction(ISD::TRAP, MVT::Other, Legal); 404 405 // TRAMPOLINE is custom lowered. 406 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 407 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 408 409 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 410 setOperationAction(ISD::VASTART , MVT::Other, Custom); 411 412 if (Subtarget.isSVR4ABI()) { 413 if (isPPC64) { 414 // VAARG always uses double-word chunks, so promote anything smaller. 415 setOperationAction(ISD::VAARG, MVT::i1, Promote); 416 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 417 setOperationAction(ISD::VAARG, MVT::i8, Promote); 418 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 419 setOperationAction(ISD::VAARG, MVT::i16, Promote); 420 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 421 setOperationAction(ISD::VAARG, MVT::i32, Promote); 422 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 423 setOperationAction(ISD::VAARG, MVT::Other, Expand); 424 } else { 425 // VAARG is custom lowered with the 32-bit SVR4 ABI. 426 setOperationAction(ISD::VAARG, MVT::Other, Custom); 427 setOperationAction(ISD::VAARG, MVT::i64, Custom); 428 } 429 } else 430 setOperationAction(ISD::VAARG, MVT::Other, Expand); 431 432 if (Subtarget.isSVR4ABI() && !isPPC64) 433 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 434 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 435 else 436 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 437 438 // Use the default implementation. 439 setOperationAction(ISD::VAEND , MVT::Other, Expand); 440 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 441 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 442 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 443 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 444 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 445 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 446 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 447 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 448 449 // We want to custom lower some of our intrinsics. 450 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 451 452 // To handle counter-based loop conditions. 453 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 454 455 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 456 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 457 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 458 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 459 460 // Comparisons that require checking two conditions. 461 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 462 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 463 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 464 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 465 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 466 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 467 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 468 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 469 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 470 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 471 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 472 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 473 474 if (Subtarget.has64BitSupport()) { 475 // They also have instructions for converting between i64 and fp. 476 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 477 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 478 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 479 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 480 // This is just the low 32 bits of a (signed) fp->i64 conversion. 481 // We cannot do this with Promote because i64 is not a legal type. 482 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 483 484 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 485 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 486 } else { 487 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 488 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 489 } 490 491 // With the instructions enabled under FPCVT, we can do everything. 492 if (Subtarget.hasFPCVT()) { 493 if (Subtarget.has64BitSupport()) { 494 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 495 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 496 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 497 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 498 } 499 500 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 501 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 502 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 503 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 504 } 505 506 if (Subtarget.use64BitRegs()) { 507 // 64-bit PowerPC implementations can support i64 types directly 508 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 509 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 510 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 511 // 64-bit PowerPC wants to expand i128 shifts itself. 512 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 513 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 514 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 515 } else { 516 // 32-bit PowerPC wants to expand i64 shifts itself. 517 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 518 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 519 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 520 } 521 522 if (Subtarget.hasAltivec()) { 523 // First set operation action for all vector types to expand. Then we 524 // will selectively turn on ones that can be effectively codegen'd. 525 for (MVT VT : MVT::vector_valuetypes()) { 526 // add/sub are legal for all supported vector VT's. 527 setOperationAction(ISD::ADD, VT, Legal); 528 setOperationAction(ISD::SUB, VT, Legal); 529 530 // Vector instructions introduced in P8 531 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 532 setOperationAction(ISD::CTPOP, VT, Legal); 533 setOperationAction(ISD::CTLZ, VT, Legal); 534 } 535 else { 536 setOperationAction(ISD::CTPOP, VT, Expand); 537 setOperationAction(ISD::CTLZ, VT, Expand); 538 } 539 540 // Vector instructions introduced in P9 541 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 542 setOperationAction(ISD::CTTZ, VT, Legal); 543 else 544 setOperationAction(ISD::CTTZ, VT, Expand); 545 546 // We promote all shuffles to v16i8. 547 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 548 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 549 550 // We promote all non-typed operations to v4i32. 551 setOperationAction(ISD::AND , VT, Promote); 552 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 553 setOperationAction(ISD::OR , VT, Promote); 554 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 555 setOperationAction(ISD::XOR , VT, Promote); 556 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 557 setOperationAction(ISD::LOAD , VT, Promote); 558 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 559 setOperationAction(ISD::SELECT, VT, Promote); 560 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 561 setOperationAction(ISD::SELECT_CC, VT, Promote); 562 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 563 setOperationAction(ISD::STORE, VT, Promote); 564 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 565 566 // No other operations are legal. 567 setOperationAction(ISD::MUL , VT, Expand); 568 setOperationAction(ISD::SDIV, VT, Expand); 569 setOperationAction(ISD::SREM, VT, Expand); 570 setOperationAction(ISD::UDIV, VT, Expand); 571 setOperationAction(ISD::UREM, VT, Expand); 572 setOperationAction(ISD::FDIV, VT, Expand); 573 setOperationAction(ISD::FREM, VT, Expand); 574 setOperationAction(ISD::FNEG, VT, Expand); 575 setOperationAction(ISD::FSQRT, VT, Expand); 576 setOperationAction(ISD::FLOG, VT, Expand); 577 setOperationAction(ISD::FLOG10, VT, Expand); 578 setOperationAction(ISD::FLOG2, VT, Expand); 579 setOperationAction(ISD::FEXP, VT, Expand); 580 setOperationAction(ISD::FEXP2, VT, Expand); 581 setOperationAction(ISD::FSIN, VT, Expand); 582 setOperationAction(ISD::FCOS, VT, Expand); 583 setOperationAction(ISD::FABS, VT, Expand); 584 setOperationAction(ISD::FFLOOR, VT, Expand); 585 setOperationAction(ISD::FCEIL, VT, Expand); 586 setOperationAction(ISD::FTRUNC, VT, Expand); 587 setOperationAction(ISD::FRINT, VT, Expand); 588 setOperationAction(ISD::FNEARBYINT, VT, Expand); 589 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 590 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 591 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 592 setOperationAction(ISD::MULHU, VT, Expand); 593 setOperationAction(ISD::MULHS, VT, Expand); 594 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 595 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 596 setOperationAction(ISD::UDIVREM, VT, Expand); 597 setOperationAction(ISD::SDIVREM, VT, Expand); 598 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 599 setOperationAction(ISD::FPOW, VT, Expand); 600 setOperationAction(ISD::BSWAP, VT, Expand); 601 setOperationAction(ISD::VSELECT, VT, Expand); 602 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 603 setOperationAction(ISD::ROTL, VT, Expand); 604 setOperationAction(ISD::ROTR, VT, Expand); 605 606 for (MVT InnerVT : MVT::vector_valuetypes()) { 607 setTruncStoreAction(VT, InnerVT, Expand); 608 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 609 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 610 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 611 } 612 } 613 614 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 615 // with merges, splats, etc. 616 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 617 618 setOperationAction(ISD::AND , MVT::v4i32, Legal); 619 setOperationAction(ISD::OR , MVT::v4i32, Legal); 620 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 621 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 622 setOperationAction(ISD::SELECT, MVT::v4i32, 623 Subtarget.useCRBits() ? Legal : Expand); 624 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 625 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 626 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 627 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 628 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 629 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 630 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 631 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 632 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 633 634 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 635 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 636 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 637 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 638 639 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 640 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 641 642 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 643 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 644 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 645 } 646 647 if (Subtarget.hasP8Altivec()) 648 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 649 else 650 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 651 652 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 653 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 654 655 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 656 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 657 658 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 659 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 660 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 661 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 662 663 // Altivec does not contain unordered floating-point compare instructions 664 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 665 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 666 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 667 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 668 669 if (Subtarget.hasVSX()) { 670 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 671 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 672 if (Subtarget.hasP8Vector()) { 673 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 674 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 675 } 676 if (Subtarget.hasDirectMove() && isPPC64) { 677 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 678 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 679 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 680 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 681 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 682 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 683 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 684 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 685 } 686 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 687 688 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 689 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 690 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 691 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 692 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 693 694 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 695 696 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 697 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 698 699 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 700 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 701 702 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 703 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 704 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 705 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 706 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 707 708 // Share the Altivec comparison restrictions. 709 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 710 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 711 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 712 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 713 714 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 715 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 716 717 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 718 719 if (Subtarget.hasP8Vector()) 720 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 721 722 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 723 724 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 725 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 726 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 727 728 if (Subtarget.hasP8Altivec()) { 729 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 730 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 731 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 732 733 // 128 bit shifts can be accomplished via 3 instructions for SHL and 734 // SRL, but not for SRA because of the instructions available: 735 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 736 // doing 737 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 738 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 739 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 740 741 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 742 } 743 else { 744 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 745 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 746 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 747 748 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 749 750 // VSX v2i64 only supports non-arithmetic operations. 751 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 752 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 753 } 754 755 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 756 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 757 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 758 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 759 760 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 761 762 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 763 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 764 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 765 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 766 767 // Vector operation legalization checks the result type of 768 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 769 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 770 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 771 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 772 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 773 774 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 775 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 776 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 777 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 778 779 if (Subtarget.hasDirectMove()) 780 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 781 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 782 783 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 784 } 785 786 if (Subtarget.hasP8Altivec()) { 787 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 788 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 789 } 790 791 if (Subtarget.hasP9Vector()) { 792 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 793 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 794 795 // 128 bit shifts can be accomplished via 3 instructions for SHL and 796 // SRL, but not for SRA because of the instructions available: 797 // VS{RL} and VS{RL}O. 798 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 799 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 800 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 801 802 if (EnableQuadPrecision) { 803 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 804 setOperationAction(ISD::FADD, MVT::f128, Legal); 805 setOperationAction(ISD::FSUB, MVT::f128, Legal); 806 setOperationAction(ISD::FDIV, MVT::f128, Legal); 807 setOperationAction(ISD::FMUL, MVT::f128, Legal); 808 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 809 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand); 810 setOperationAction(ISD::FMA, MVT::f128, Legal); 811 } 812 813 } 814 815 if (Subtarget.hasP9Altivec()) { 816 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 817 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 818 } 819 } 820 821 if (Subtarget.hasQPX()) { 822 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 823 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 824 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 825 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 826 827 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 828 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 829 830 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 831 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 832 833 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 834 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 835 836 if (!Subtarget.useCRBits()) 837 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 838 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 839 840 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 841 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 842 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 843 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 844 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 845 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 846 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 847 848 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 849 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 850 851 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 852 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 853 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 854 855 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 856 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 857 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 858 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 859 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 860 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 861 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 862 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 863 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 864 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 865 866 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 867 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 868 869 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 870 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 871 872 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 873 874 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 875 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 876 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 877 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 878 879 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 880 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 881 882 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 883 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 884 885 if (!Subtarget.useCRBits()) 886 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 887 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 888 889 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 890 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 891 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 892 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 893 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 894 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 895 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 896 897 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 898 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 899 900 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 901 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 902 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 903 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 904 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 905 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 906 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 907 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 908 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 909 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 910 911 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 912 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 913 914 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 915 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 916 917 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 918 919 setOperationAction(ISD::AND , MVT::v4i1, Legal); 920 setOperationAction(ISD::OR , MVT::v4i1, Legal); 921 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 922 923 if (!Subtarget.useCRBits()) 924 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 925 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 926 927 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 928 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 929 930 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 931 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 932 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 933 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 934 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 935 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 936 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 937 938 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 939 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 940 941 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 942 943 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 944 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 945 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 946 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 947 948 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 949 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 950 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 951 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 952 953 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 954 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 955 956 // These need to set FE_INEXACT, and so cannot be vectorized here. 957 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 958 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 959 960 if (TM.Options.UnsafeFPMath) { 961 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 962 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 963 964 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 965 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 966 } else { 967 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 968 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 969 970 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 971 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 972 } 973 } 974 975 if (Subtarget.has64BitSupport()) 976 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 977 978 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 979 980 if (!isPPC64) { 981 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 982 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 983 } 984 985 setBooleanContents(ZeroOrOneBooleanContent); 986 987 if (Subtarget.hasAltivec()) { 988 // Altivec instructions set fields to all zeros or all ones. 989 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 990 } 991 992 if (!isPPC64) { 993 // These libcalls are not available in 32-bit. 994 setLibcallName(RTLIB::SHL_I128, nullptr); 995 setLibcallName(RTLIB::SRL_I128, nullptr); 996 setLibcallName(RTLIB::SRA_I128, nullptr); 997 } 998 999 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1000 1001 // We have target-specific dag combine patterns for the following nodes: 1002 setTargetDAGCombine(ISD::SHL); 1003 setTargetDAGCombine(ISD::SRA); 1004 setTargetDAGCombine(ISD::SRL); 1005 setTargetDAGCombine(ISD::SINT_TO_FP); 1006 setTargetDAGCombine(ISD::BUILD_VECTOR); 1007 if (Subtarget.hasFPCVT()) 1008 setTargetDAGCombine(ISD::UINT_TO_FP); 1009 setTargetDAGCombine(ISD::LOAD); 1010 setTargetDAGCombine(ISD::STORE); 1011 setTargetDAGCombine(ISD::BR_CC); 1012 if (Subtarget.useCRBits()) 1013 setTargetDAGCombine(ISD::BRCOND); 1014 setTargetDAGCombine(ISD::BSWAP); 1015 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1016 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1017 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1018 1019 setTargetDAGCombine(ISD::SIGN_EXTEND); 1020 setTargetDAGCombine(ISD::ZERO_EXTEND); 1021 setTargetDAGCombine(ISD::ANY_EXTEND); 1022 1023 if (Subtarget.useCRBits()) { 1024 setTargetDAGCombine(ISD::TRUNCATE); 1025 setTargetDAGCombine(ISD::SETCC); 1026 setTargetDAGCombine(ISD::SELECT_CC); 1027 } 1028 1029 // Use reciprocal estimates. 1030 if (TM.Options.UnsafeFPMath) { 1031 setTargetDAGCombine(ISD::FDIV); 1032 setTargetDAGCombine(ISD::FSQRT); 1033 } 1034 1035 // Darwin long double math library functions have $LDBL128 appended. 1036 if (Subtarget.isDarwin()) { 1037 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1038 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1039 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1040 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1041 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1042 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1043 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1044 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1045 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1046 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1047 } 1048 1049 // With 32 condition bits, we don't need to sink (and duplicate) compares 1050 // aggressively in CodeGenPrep. 1051 if (Subtarget.useCRBits()) { 1052 setHasMultipleConditionRegisters(); 1053 setJumpIsExpensive(); 1054 } 1055 1056 setMinFunctionAlignment(2); 1057 if (Subtarget.isDarwin()) 1058 setPrefFunctionAlignment(4); 1059 1060 switch (Subtarget.getDarwinDirective()) { 1061 default: break; 1062 case PPC::DIR_970: 1063 case PPC::DIR_A2: 1064 case PPC::DIR_E500mc: 1065 case PPC::DIR_E5500: 1066 case PPC::DIR_PWR4: 1067 case PPC::DIR_PWR5: 1068 case PPC::DIR_PWR5X: 1069 case PPC::DIR_PWR6: 1070 case PPC::DIR_PWR6X: 1071 case PPC::DIR_PWR7: 1072 case PPC::DIR_PWR8: 1073 case PPC::DIR_PWR9: 1074 setPrefFunctionAlignment(4); 1075 setPrefLoopAlignment(4); 1076 break; 1077 } 1078 1079 if (Subtarget.enableMachineScheduler()) 1080 setSchedulingPreference(Sched::Source); 1081 else 1082 setSchedulingPreference(Sched::Hybrid); 1083 1084 computeRegisterProperties(STI.getRegisterInfo()); 1085 1086 // The Freescale cores do better with aggressive inlining of memcpy and 1087 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1088 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1089 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1090 MaxStoresPerMemset = 32; 1091 MaxStoresPerMemsetOptSize = 16; 1092 MaxStoresPerMemcpy = 32; 1093 MaxStoresPerMemcpyOptSize = 8; 1094 MaxStoresPerMemmove = 32; 1095 MaxStoresPerMemmoveOptSize = 8; 1096 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1097 // The A2 also benefits from (very) aggressive inlining of memcpy and 1098 // friends. The overhead of a the function call, even when warm, can be 1099 // over one hundred cycles. 1100 MaxStoresPerMemset = 128; 1101 MaxStoresPerMemcpy = 128; 1102 MaxStoresPerMemmove = 128; 1103 MaxLoadsPerMemcmp = 128; 1104 } else { 1105 MaxLoadsPerMemcmp = 8; 1106 MaxLoadsPerMemcmpOptSize = 4; 1107 } 1108 } 1109 1110 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1111 /// the desired ByVal argument alignment. 1112 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1113 unsigned MaxMaxAlign) { 1114 if (MaxAlign == MaxMaxAlign) 1115 return; 1116 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1117 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1118 MaxAlign = 32; 1119 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1120 MaxAlign = 16; 1121 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1122 unsigned EltAlign = 0; 1123 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1124 if (EltAlign > MaxAlign) 1125 MaxAlign = EltAlign; 1126 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1127 for (auto *EltTy : STy->elements()) { 1128 unsigned EltAlign = 0; 1129 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1130 if (EltAlign > MaxAlign) 1131 MaxAlign = EltAlign; 1132 if (MaxAlign == MaxMaxAlign) 1133 break; 1134 } 1135 } 1136 } 1137 1138 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1139 /// function arguments in the caller parameter area. 1140 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1141 const DataLayout &DL) const { 1142 // Darwin passes everything on 4 byte boundary. 1143 if (Subtarget.isDarwin()) 1144 return 4; 1145 1146 // 16byte and wider vectors are passed on 16byte boundary. 1147 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1148 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1149 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1150 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1151 return Align; 1152 } 1153 1154 bool PPCTargetLowering::useSoftFloat() const { 1155 return Subtarget.useSoftFloat(); 1156 } 1157 1158 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1159 switch ((PPCISD::NodeType)Opcode) { 1160 case PPCISD::FIRST_NUMBER: break; 1161 case PPCISD::FSEL: return "PPCISD::FSEL"; 1162 case PPCISD::FCFID: return "PPCISD::FCFID"; 1163 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1164 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1165 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1166 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1167 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1168 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1169 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1170 case PPCISD::FP_TO_UINT_IN_VSR: 1171 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1172 case PPCISD::FP_TO_SINT_IN_VSR: 1173 return "PPCISD::FP_TO_SINT_IN_VSR"; 1174 case PPCISD::FRE: return "PPCISD::FRE"; 1175 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1176 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1177 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1178 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1179 case PPCISD::VPERM: return "PPCISD::VPERM"; 1180 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1181 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1182 case PPCISD::XXREVERSE: return "PPCISD::XXREVERSE"; 1183 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1184 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1185 case PPCISD::CMPB: return "PPCISD::CMPB"; 1186 case PPCISD::Hi: return "PPCISD::Hi"; 1187 case PPCISD::Lo: return "PPCISD::Lo"; 1188 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1189 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1190 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1191 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1192 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1193 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1194 case PPCISD::SRL: return "PPCISD::SRL"; 1195 case PPCISD::SRA: return "PPCISD::SRA"; 1196 case PPCISD::SHL: return "PPCISD::SHL"; 1197 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1198 case PPCISD::CALL: return "PPCISD::CALL"; 1199 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1200 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1201 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1202 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1203 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1204 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1205 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1206 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1207 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1208 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1209 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1210 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1211 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1212 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1213 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1214 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1215 case PPCISD::VCMP: return "PPCISD::VCMP"; 1216 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1217 case PPCISD::LBRX: return "PPCISD::LBRX"; 1218 case PPCISD::STBRX: return "PPCISD::STBRX"; 1219 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1220 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1221 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1222 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1223 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1224 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1225 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1226 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1227 case PPCISD::ST_VSR_SCAL_INT: 1228 return "PPCISD::ST_VSR_SCAL_INT"; 1229 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1230 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1231 case PPCISD::BDZ: return "PPCISD::BDZ"; 1232 case PPCISD::MFFS: return "PPCISD::MFFS"; 1233 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1234 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1235 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1236 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1237 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1238 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1239 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1240 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1241 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1242 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1243 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1244 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1245 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1246 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1247 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1248 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1249 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1250 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1251 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1252 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1253 case PPCISD::SC: return "PPCISD::SC"; 1254 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1255 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1256 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1257 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1258 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1259 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1260 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1261 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1262 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1263 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1264 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1265 } 1266 return nullptr; 1267 } 1268 1269 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1270 EVT VT) const { 1271 if (!VT.isVector()) 1272 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1273 1274 if (Subtarget.hasQPX()) 1275 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1276 1277 return VT.changeVectorElementTypeToInteger(); 1278 } 1279 1280 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1281 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1282 return true; 1283 } 1284 1285 //===----------------------------------------------------------------------===// 1286 // Node matching predicates, for use by the tblgen matching code. 1287 //===----------------------------------------------------------------------===// 1288 1289 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1290 static bool isFloatingPointZero(SDValue Op) { 1291 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1292 return CFP->getValueAPF().isZero(); 1293 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1294 // Maybe this has already been legalized into the constant pool? 1295 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1296 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1297 return CFP->getValueAPF().isZero(); 1298 } 1299 return false; 1300 } 1301 1302 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1303 /// true if Op is undef or if it matches the specified value. 1304 static bool isConstantOrUndef(int Op, int Val) { 1305 return Op < 0 || Op == Val; 1306 } 1307 1308 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1309 /// VPKUHUM instruction. 1310 /// The ShuffleKind distinguishes between big-endian operations with 1311 /// two different inputs (0), either-endian operations with two identical 1312 /// inputs (1), and little-endian operations with two different inputs (2). 1313 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1314 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1315 SelectionDAG &DAG) { 1316 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1317 if (ShuffleKind == 0) { 1318 if (IsLE) 1319 return false; 1320 for (unsigned i = 0; i != 16; ++i) 1321 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1322 return false; 1323 } else if (ShuffleKind == 2) { 1324 if (!IsLE) 1325 return false; 1326 for (unsigned i = 0; i != 16; ++i) 1327 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1328 return false; 1329 } else if (ShuffleKind == 1) { 1330 unsigned j = IsLE ? 0 : 1; 1331 for (unsigned i = 0; i != 8; ++i) 1332 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1333 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1334 return false; 1335 } 1336 return true; 1337 } 1338 1339 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1340 /// VPKUWUM instruction. 1341 /// The ShuffleKind distinguishes between big-endian operations with 1342 /// two different inputs (0), either-endian operations with two identical 1343 /// inputs (1), and little-endian operations with two different inputs (2). 1344 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1345 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1346 SelectionDAG &DAG) { 1347 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1348 if (ShuffleKind == 0) { 1349 if (IsLE) 1350 return false; 1351 for (unsigned i = 0; i != 16; i += 2) 1352 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1353 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1354 return false; 1355 } else if (ShuffleKind == 2) { 1356 if (!IsLE) 1357 return false; 1358 for (unsigned i = 0; i != 16; i += 2) 1359 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1360 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1361 return false; 1362 } else if (ShuffleKind == 1) { 1363 unsigned j = IsLE ? 0 : 2; 1364 for (unsigned i = 0; i != 8; i += 2) 1365 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1366 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1367 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1368 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1369 return false; 1370 } 1371 return true; 1372 } 1373 1374 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1375 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1376 /// current subtarget. 1377 /// 1378 /// The ShuffleKind distinguishes between big-endian operations with 1379 /// two different inputs (0), either-endian operations with two identical 1380 /// inputs (1), and little-endian operations with two different inputs (2). 1381 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1382 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1383 SelectionDAG &DAG) { 1384 const PPCSubtarget& Subtarget = 1385 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1386 if (!Subtarget.hasP8Vector()) 1387 return false; 1388 1389 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1390 if (ShuffleKind == 0) { 1391 if (IsLE) 1392 return false; 1393 for (unsigned i = 0; i != 16; i += 4) 1394 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1395 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1396 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1397 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1398 return false; 1399 } else if (ShuffleKind == 2) { 1400 if (!IsLE) 1401 return false; 1402 for (unsigned i = 0; i != 16; i += 4) 1403 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1404 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1405 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1406 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1407 return false; 1408 } else if (ShuffleKind == 1) { 1409 unsigned j = IsLE ? 0 : 4; 1410 for (unsigned i = 0; i != 8; i += 4) 1411 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1412 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1413 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1414 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1415 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1416 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1417 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1418 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1419 return false; 1420 } 1421 return true; 1422 } 1423 1424 /// isVMerge - Common function, used to match vmrg* shuffles. 1425 /// 1426 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1427 unsigned LHSStart, unsigned RHSStart) { 1428 if (N->getValueType(0) != MVT::v16i8) 1429 return false; 1430 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1431 "Unsupported merge size!"); 1432 1433 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1434 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1435 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1436 LHSStart+j+i*UnitSize) || 1437 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1438 RHSStart+j+i*UnitSize)) 1439 return false; 1440 } 1441 return true; 1442 } 1443 1444 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1445 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1446 /// The ShuffleKind distinguishes between big-endian merges with two 1447 /// different inputs (0), either-endian merges with two identical inputs (1), 1448 /// and little-endian merges with two different inputs (2). For the latter, 1449 /// the input operands are swapped (see PPCInstrAltivec.td). 1450 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1451 unsigned ShuffleKind, SelectionDAG &DAG) { 1452 if (DAG.getDataLayout().isLittleEndian()) { 1453 if (ShuffleKind == 1) // unary 1454 return isVMerge(N, UnitSize, 0, 0); 1455 else if (ShuffleKind == 2) // swapped 1456 return isVMerge(N, UnitSize, 0, 16); 1457 else 1458 return false; 1459 } else { 1460 if (ShuffleKind == 1) // unary 1461 return isVMerge(N, UnitSize, 8, 8); 1462 else if (ShuffleKind == 0) // normal 1463 return isVMerge(N, UnitSize, 8, 24); 1464 else 1465 return false; 1466 } 1467 } 1468 1469 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1470 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1471 /// The ShuffleKind distinguishes between big-endian merges with two 1472 /// different inputs (0), either-endian merges with two identical inputs (1), 1473 /// and little-endian merges with two different inputs (2). For the latter, 1474 /// the input operands are swapped (see PPCInstrAltivec.td). 1475 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1476 unsigned ShuffleKind, SelectionDAG &DAG) { 1477 if (DAG.getDataLayout().isLittleEndian()) { 1478 if (ShuffleKind == 1) // unary 1479 return isVMerge(N, UnitSize, 8, 8); 1480 else if (ShuffleKind == 2) // swapped 1481 return isVMerge(N, UnitSize, 8, 24); 1482 else 1483 return false; 1484 } else { 1485 if (ShuffleKind == 1) // unary 1486 return isVMerge(N, UnitSize, 0, 0); 1487 else if (ShuffleKind == 0) // normal 1488 return isVMerge(N, UnitSize, 0, 16); 1489 else 1490 return false; 1491 } 1492 } 1493 1494 /** 1495 * Common function used to match vmrgew and vmrgow shuffles 1496 * 1497 * The indexOffset determines whether to look for even or odd words in 1498 * the shuffle mask. This is based on the of the endianness of the target 1499 * machine. 1500 * - Little Endian: 1501 * - Use offset of 0 to check for odd elements 1502 * - Use offset of 4 to check for even elements 1503 * - Big Endian: 1504 * - Use offset of 0 to check for even elements 1505 * - Use offset of 4 to check for odd elements 1506 * A detailed description of the vector element ordering for little endian and 1507 * big endian can be found at 1508 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1509 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1510 * compiler differences mean to you 1511 * 1512 * The mask to the shuffle vector instruction specifies the indices of the 1513 * elements from the two input vectors to place in the result. The elements are 1514 * numbered in array-access order, starting with the first vector. These vectors 1515 * are always of type v16i8, thus each vector will contain 16 elements of size 1516 * 8. More info on the shuffle vector can be found in the 1517 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1518 * Language Reference. 1519 * 1520 * The RHSStartValue indicates whether the same input vectors are used (unary) 1521 * or two different input vectors are used, based on the following: 1522 * - If the instruction uses the same vector for both inputs, the range of the 1523 * indices will be 0 to 15. In this case, the RHSStart value passed should 1524 * be 0. 1525 * - If the instruction has two different vectors then the range of the 1526 * indices will be 0 to 31. In this case, the RHSStart value passed should 1527 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1528 * to 31 specify elements in the second vector). 1529 * 1530 * \param[in] N The shuffle vector SD Node to analyze 1531 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1532 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1533 * vector to the shuffle_vector instruction 1534 * \return true iff this shuffle vector represents an even or odd word merge 1535 */ 1536 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1537 unsigned RHSStartValue) { 1538 if (N->getValueType(0) != MVT::v16i8) 1539 return false; 1540 1541 for (unsigned i = 0; i < 2; ++i) 1542 for (unsigned j = 0; j < 4; ++j) 1543 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1544 i*RHSStartValue+j+IndexOffset) || 1545 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1546 i*RHSStartValue+j+IndexOffset+8)) 1547 return false; 1548 return true; 1549 } 1550 1551 /** 1552 * Determine if the specified shuffle mask is suitable for the vmrgew or 1553 * vmrgow instructions. 1554 * 1555 * \param[in] N The shuffle vector SD Node to analyze 1556 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1557 * \param[in] ShuffleKind Identify the type of merge: 1558 * - 0 = big-endian merge with two different inputs; 1559 * - 1 = either-endian merge with two identical inputs; 1560 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1561 * little-endian merges). 1562 * \param[in] DAG The current SelectionDAG 1563 * \return true iff this shuffle mask 1564 */ 1565 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1566 unsigned ShuffleKind, SelectionDAG &DAG) { 1567 if (DAG.getDataLayout().isLittleEndian()) { 1568 unsigned indexOffset = CheckEven ? 4 : 0; 1569 if (ShuffleKind == 1) // Unary 1570 return isVMerge(N, indexOffset, 0); 1571 else if (ShuffleKind == 2) // swapped 1572 return isVMerge(N, indexOffset, 16); 1573 else 1574 return false; 1575 } 1576 else { 1577 unsigned indexOffset = CheckEven ? 0 : 4; 1578 if (ShuffleKind == 1) // Unary 1579 return isVMerge(N, indexOffset, 0); 1580 else if (ShuffleKind == 0) // Normal 1581 return isVMerge(N, indexOffset, 16); 1582 else 1583 return false; 1584 } 1585 return false; 1586 } 1587 1588 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1589 /// amount, otherwise return -1. 1590 /// The ShuffleKind distinguishes between big-endian operations with two 1591 /// different inputs (0), either-endian operations with two identical inputs 1592 /// (1), and little-endian operations with two different inputs (2). For the 1593 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1594 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1595 SelectionDAG &DAG) { 1596 if (N->getValueType(0) != MVT::v16i8) 1597 return -1; 1598 1599 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1600 1601 // Find the first non-undef value in the shuffle mask. 1602 unsigned i; 1603 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1604 /*search*/; 1605 1606 if (i == 16) return -1; // all undef. 1607 1608 // Otherwise, check to see if the rest of the elements are consecutively 1609 // numbered from this value. 1610 unsigned ShiftAmt = SVOp->getMaskElt(i); 1611 if (ShiftAmt < i) return -1; 1612 1613 ShiftAmt -= i; 1614 bool isLE = DAG.getDataLayout().isLittleEndian(); 1615 1616 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1617 // Check the rest of the elements to see if they are consecutive. 1618 for (++i; i != 16; ++i) 1619 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1620 return -1; 1621 } else if (ShuffleKind == 1) { 1622 // Check the rest of the elements to see if they are consecutive. 1623 for (++i; i != 16; ++i) 1624 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1625 return -1; 1626 } else 1627 return -1; 1628 1629 if (isLE) 1630 ShiftAmt = 16 - ShiftAmt; 1631 1632 return ShiftAmt; 1633 } 1634 1635 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1636 /// specifies a splat of a single element that is suitable for input to 1637 /// VSPLTB/VSPLTH/VSPLTW. 1638 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1639 assert(N->getValueType(0) == MVT::v16i8 && 1640 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1641 1642 // The consecutive indices need to specify an element, not part of two 1643 // different elements. So abandon ship early if this isn't the case. 1644 if (N->getMaskElt(0) % EltSize != 0) 1645 return false; 1646 1647 // This is a splat operation if each element of the permute is the same, and 1648 // if the value doesn't reference the second vector. 1649 unsigned ElementBase = N->getMaskElt(0); 1650 1651 // FIXME: Handle UNDEF elements too! 1652 if (ElementBase >= 16) 1653 return false; 1654 1655 // Check that the indices are consecutive, in the case of a multi-byte element 1656 // splatted with a v16i8 mask. 1657 for (unsigned i = 1; i != EltSize; ++i) 1658 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1659 return false; 1660 1661 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1662 if (N->getMaskElt(i) < 0) continue; 1663 for (unsigned j = 0; j != EltSize; ++j) 1664 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1665 return false; 1666 } 1667 return true; 1668 } 1669 1670 /// Check that the mask is shuffling N byte elements. Within each N byte 1671 /// element of the mask, the indices could be either in increasing or 1672 /// decreasing order as long as they are consecutive. 1673 /// \param[in] N the shuffle vector SD Node to analyze 1674 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1675 /// Word/DoubleWord/QuadWord). 1676 /// \param[in] StepLen the delta indices number among the N byte element, if 1677 /// the mask is in increasing/decreasing order then it is 1/-1. 1678 /// \return true iff the mask is shuffling N byte elements. 1679 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1680 int StepLen) { 1681 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1682 "Unexpected element width."); 1683 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1684 1685 unsigned NumOfElem = 16 / Width; 1686 unsigned MaskVal[16]; // Width is never greater than 16 1687 for (unsigned i = 0; i < NumOfElem; ++i) { 1688 MaskVal[0] = N->getMaskElt(i * Width); 1689 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1690 return false; 1691 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1692 return false; 1693 } 1694 1695 for (unsigned int j = 1; j < Width; ++j) { 1696 MaskVal[j] = N->getMaskElt(i * Width + j); 1697 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1698 return false; 1699 } 1700 } 1701 } 1702 1703 return true; 1704 } 1705 1706 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1707 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1708 if (!isNByteElemShuffleMask(N, 4, 1)) 1709 return false; 1710 1711 // Now we look at mask elements 0,4,8,12 1712 unsigned M0 = N->getMaskElt(0) / 4; 1713 unsigned M1 = N->getMaskElt(4) / 4; 1714 unsigned M2 = N->getMaskElt(8) / 4; 1715 unsigned M3 = N->getMaskElt(12) / 4; 1716 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1717 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1718 1719 // Below, let H and L be arbitrary elements of the shuffle mask 1720 // where H is in the range [4,7] and L is in the range [0,3]. 1721 // H, 1, 2, 3 or L, 5, 6, 7 1722 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1723 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1724 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1725 InsertAtByte = IsLE ? 12 : 0; 1726 Swap = M0 < 4; 1727 return true; 1728 } 1729 // 0, H, 2, 3 or 4, L, 6, 7 1730 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1731 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1732 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1733 InsertAtByte = IsLE ? 8 : 4; 1734 Swap = M1 < 4; 1735 return true; 1736 } 1737 // 0, 1, H, 3 or 4, 5, L, 7 1738 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1739 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1740 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1741 InsertAtByte = IsLE ? 4 : 8; 1742 Swap = M2 < 4; 1743 return true; 1744 } 1745 // 0, 1, 2, H or 4, 5, 6, L 1746 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1747 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1748 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1749 InsertAtByte = IsLE ? 0 : 12; 1750 Swap = M3 < 4; 1751 return true; 1752 } 1753 1754 // If both vector operands for the shuffle are the same vector, the mask will 1755 // contain only elements from the first one and the second one will be undef. 1756 if (N->getOperand(1).isUndef()) { 1757 ShiftElts = 0; 1758 Swap = true; 1759 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1760 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1761 InsertAtByte = IsLE ? 12 : 0; 1762 return true; 1763 } 1764 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1765 InsertAtByte = IsLE ? 8 : 4; 1766 return true; 1767 } 1768 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1769 InsertAtByte = IsLE ? 4 : 8; 1770 return true; 1771 } 1772 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1773 InsertAtByte = IsLE ? 0 : 12; 1774 return true; 1775 } 1776 } 1777 1778 return false; 1779 } 1780 1781 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1782 bool &Swap, bool IsLE) { 1783 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1784 // Ensure each byte index of the word is consecutive. 1785 if (!isNByteElemShuffleMask(N, 4, 1)) 1786 return false; 1787 1788 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1789 unsigned M0 = N->getMaskElt(0) / 4; 1790 unsigned M1 = N->getMaskElt(4) / 4; 1791 unsigned M2 = N->getMaskElt(8) / 4; 1792 unsigned M3 = N->getMaskElt(12) / 4; 1793 1794 // If both vector operands for the shuffle are the same vector, the mask will 1795 // contain only elements from the first one and the second one will be undef. 1796 if (N->getOperand(1).isUndef()) { 1797 assert(M0 < 4 && "Indexing into an undef vector?"); 1798 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1799 return false; 1800 1801 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1802 Swap = false; 1803 return true; 1804 } 1805 1806 // Ensure each word index of the ShuffleVector Mask is consecutive. 1807 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1808 return false; 1809 1810 if (IsLE) { 1811 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1812 // Input vectors don't need to be swapped if the leading element 1813 // of the result is one of the 3 left elements of the second vector 1814 // (or if there is no shift to be done at all). 1815 Swap = false; 1816 ShiftElts = (8 - M0) % 8; 1817 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 1818 // Input vectors need to be swapped if the leading element 1819 // of the result is one of the 3 left elements of the first vector 1820 // (or if we're shifting by 4 - thereby simply swapping the vectors). 1821 Swap = true; 1822 ShiftElts = (4 - M0) % 4; 1823 } 1824 1825 return true; 1826 } else { // BE 1827 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 1828 // Input vectors don't need to be swapped if the leading element 1829 // of the result is one of the 4 elements of the first vector. 1830 Swap = false; 1831 ShiftElts = M0; 1832 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 1833 // Input vectors need to be swapped if the leading element 1834 // of the result is one of the 4 elements of the right vector. 1835 Swap = true; 1836 ShiftElts = M0 - 4; 1837 } 1838 1839 return true; 1840 } 1841 } 1842 1843 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 1844 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1845 1846 if (!isNByteElemShuffleMask(N, Width, -1)) 1847 return false; 1848 1849 for (int i = 0; i < 16; i += Width) 1850 if (N->getMaskElt(i) != i + Width - 1) 1851 return false; 1852 1853 return true; 1854 } 1855 1856 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 1857 return isXXBRShuffleMaskHelper(N, 2); 1858 } 1859 1860 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 1861 return isXXBRShuffleMaskHelper(N, 4); 1862 } 1863 1864 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 1865 return isXXBRShuffleMaskHelper(N, 8); 1866 } 1867 1868 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 1869 return isXXBRShuffleMaskHelper(N, 16); 1870 } 1871 1872 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 1873 /// if the inputs to the instruction should be swapped and set \p DM to the 1874 /// value for the immediate. 1875 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 1876 /// AND element 0 of the result comes from the first input (LE) or second input 1877 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 1878 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 1879 /// mask. 1880 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 1881 bool &Swap, bool IsLE) { 1882 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1883 1884 // Ensure each byte index of the double word is consecutive. 1885 if (!isNByteElemShuffleMask(N, 8, 1)) 1886 return false; 1887 1888 unsigned M0 = N->getMaskElt(0) / 8; 1889 unsigned M1 = N->getMaskElt(8) / 8; 1890 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 1891 1892 // If both vector operands for the shuffle are the same vector, the mask will 1893 // contain only elements from the first one and the second one will be undef. 1894 if (N->getOperand(1).isUndef()) { 1895 if ((M0 | M1) < 2) { 1896 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 1897 Swap = false; 1898 return true; 1899 } else 1900 return false; 1901 } 1902 1903 if (IsLE) { 1904 if (M0 > 1 && M1 < 2) { 1905 Swap = false; 1906 } else if (M0 < 2 && M1 > 1) { 1907 M0 = (M0 + 2) % 4; 1908 M1 = (M1 + 2) % 4; 1909 Swap = true; 1910 } else 1911 return false; 1912 1913 // Note: if control flow comes here that means Swap is already set above 1914 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 1915 return true; 1916 } else { // BE 1917 if (M0 < 2 && M1 > 1) { 1918 Swap = false; 1919 } else if (M0 > 1 && M1 < 2) { 1920 M0 = (M0 + 2) % 4; 1921 M1 = (M1 + 2) % 4; 1922 Swap = true; 1923 } else 1924 return false; 1925 1926 // Note: if control flow comes here that means Swap is already set above 1927 DM = (M0 << 1) + (M1 & 1); 1928 return true; 1929 } 1930 } 1931 1932 1933 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1934 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1935 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1936 SelectionDAG &DAG) { 1937 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1938 assert(isSplatShuffleMask(SVOp, EltSize)); 1939 if (DAG.getDataLayout().isLittleEndian()) 1940 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1941 else 1942 return SVOp->getMaskElt(0) / EltSize; 1943 } 1944 1945 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1946 /// by using a vspltis[bhw] instruction of the specified element size, return 1947 /// the constant being splatted. The ByteSize field indicates the number of 1948 /// bytes of each element [124] -> [bhw]. 1949 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1950 SDValue OpVal(nullptr, 0); 1951 1952 // If ByteSize of the splat is bigger than the element size of the 1953 // build_vector, then we have a case where we are checking for a splat where 1954 // multiple elements of the buildvector are folded together into a single 1955 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1956 unsigned EltSize = 16/N->getNumOperands(); 1957 if (EltSize < ByteSize) { 1958 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1959 SDValue UniquedVals[4]; 1960 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1961 1962 // See if all of the elements in the buildvector agree across. 1963 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1964 if (N->getOperand(i).isUndef()) continue; 1965 // If the element isn't a constant, bail fully out. 1966 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1967 1968 if (!UniquedVals[i&(Multiple-1)].getNode()) 1969 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1970 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1971 return SDValue(); // no match. 1972 } 1973 1974 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1975 // either constant or undef values that are identical for each chunk. See 1976 // if these chunks can form into a larger vspltis*. 1977 1978 // Check to see if all of the leading entries are either 0 or -1. If 1979 // neither, then this won't fit into the immediate field. 1980 bool LeadingZero = true; 1981 bool LeadingOnes = true; 1982 for (unsigned i = 0; i != Multiple-1; ++i) { 1983 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1984 1985 LeadingZero &= isNullConstant(UniquedVals[i]); 1986 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1987 } 1988 // Finally, check the least significant entry. 1989 if (LeadingZero) { 1990 if (!UniquedVals[Multiple-1].getNode()) 1991 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1992 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1993 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1994 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1995 } 1996 if (LeadingOnes) { 1997 if (!UniquedVals[Multiple-1].getNode()) 1998 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1999 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2000 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2001 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2002 } 2003 2004 return SDValue(); 2005 } 2006 2007 // Check to see if this buildvec has a single non-undef value in its elements. 2008 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2009 if (N->getOperand(i).isUndef()) continue; 2010 if (!OpVal.getNode()) 2011 OpVal = N->getOperand(i); 2012 else if (OpVal != N->getOperand(i)) 2013 return SDValue(); 2014 } 2015 2016 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2017 2018 unsigned ValSizeInBytes = EltSize; 2019 uint64_t Value = 0; 2020 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2021 Value = CN->getZExtValue(); 2022 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2023 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2024 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2025 } 2026 2027 // If the splat value is larger than the element value, then we can never do 2028 // this splat. The only case that we could fit the replicated bits into our 2029 // immediate field for would be zero, and we prefer to use vxor for it. 2030 if (ValSizeInBytes < ByteSize) return SDValue(); 2031 2032 // If the element value is larger than the splat value, check if it consists 2033 // of a repeated bit pattern of size ByteSize. 2034 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2035 return SDValue(); 2036 2037 // Properly sign extend the value. 2038 int MaskVal = SignExtend32(Value, ByteSize * 8); 2039 2040 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2041 if (MaskVal == 0) return SDValue(); 2042 2043 // Finally, if this value fits in a 5 bit sext field, return it 2044 if (SignExtend32<5>(MaskVal) == MaskVal) 2045 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2046 return SDValue(); 2047 } 2048 2049 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2050 /// amount, otherwise return -1. 2051 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2052 EVT VT = N->getValueType(0); 2053 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2054 return -1; 2055 2056 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2057 2058 // Find the first non-undef value in the shuffle mask. 2059 unsigned i; 2060 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2061 /*search*/; 2062 2063 if (i == 4) return -1; // all undef. 2064 2065 // Otherwise, check to see if the rest of the elements are consecutively 2066 // numbered from this value. 2067 unsigned ShiftAmt = SVOp->getMaskElt(i); 2068 if (ShiftAmt < i) return -1; 2069 ShiftAmt -= i; 2070 2071 // Check the rest of the elements to see if they are consecutive. 2072 for (++i; i != 4; ++i) 2073 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2074 return -1; 2075 2076 return ShiftAmt; 2077 } 2078 2079 //===----------------------------------------------------------------------===// 2080 // Addressing Mode Selection 2081 //===----------------------------------------------------------------------===// 2082 2083 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2084 /// or 64-bit immediate, and if the value can be accurately represented as a 2085 /// sign extension from a 16-bit value. If so, this returns true and the 2086 /// immediate. 2087 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2088 if (!isa<ConstantSDNode>(N)) 2089 return false; 2090 2091 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2092 if (N->getValueType(0) == MVT::i32) 2093 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2094 else 2095 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2096 } 2097 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2098 return isIntS16Immediate(Op.getNode(), Imm); 2099 } 2100 2101 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2102 /// can be represented as an indexed [r+r] operation. Returns false if it 2103 /// can be more efficiently represented with [r+imm]. 2104 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2105 SDValue &Index, 2106 SelectionDAG &DAG) const { 2107 int16_t imm = 0; 2108 if (N.getOpcode() == ISD::ADD) { 2109 if (isIntS16Immediate(N.getOperand(1), imm)) 2110 return false; // r+i 2111 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2112 return false; // r+i 2113 2114 Base = N.getOperand(0); 2115 Index = N.getOperand(1); 2116 return true; 2117 } else if (N.getOpcode() == ISD::OR) { 2118 if (isIntS16Immediate(N.getOperand(1), imm)) 2119 return false; // r+i can fold it if we can. 2120 2121 // If this is an or of disjoint bitfields, we can codegen this as an add 2122 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2123 // disjoint. 2124 KnownBits LHSKnown, RHSKnown; 2125 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2126 2127 if (LHSKnown.Zero.getBoolValue()) { 2128 DAG.computeKnownBits(N.getOperand(1), RHSKnown); 2129 // If all of the bits are known zero on the LHS or RHS, the add won't 2130 // carry. 2131 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2132 Base = N.getOperand(0); 2133 Index = N.getOperand(1); 2134 return true; 2135 } 2136 } 2137 } 2138 2139 return false; 2140 } 2141 2142 // If we happen to be doing an i64 load or store into a stack slot that has 2143 // less than a 4-byte alignment, then the frame-index elimination may need to 2144 // use an indexed load or store instruction (because the offset may not be a 2145 // multiple of 4). The extra register needed to hold the offset comes from the 2146 // register scavenger, and it is possible that the scavenger will need to use 2147 // an emergency spill slot. As a result, we need to make sure that a spill slot 2148 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2149 // stack slot. 2150 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2151 // FIXME: This does not handle the LWA case. 2152 if (VT != MVT::i64) 2153 return; 2154 2155 // NOTE: We'll exclude negative FIs here, which come from argument 2156 // lowering, because there are no known test cases triggering this problem 2157 // using packed structures (or similar). We can remove this exclusion if 2158 // we find such a test case. The reason why this is so test-case driven is 2159 // because this entire 'fixup' is only to prevent crashes (from the 2160 // register scavenger) on not-really-valid inputs. For example, if we have: 2161 // %a = alloca i1 2162 // %b = bitcast i1* %a to i64* 2163 // store i64* a, i64 b 2164 // then the store should really be marked as 'align 1', but is not. If it 2165 // were marked as 'align 1' then the indexed form would have been 2166 // instruction-selected initially, and the problem this 'fixup' is preventing 2167 // won't happen regardless. 2168 if (FrameIdx < 0) 2169 return; 2170 2171 MachineFunction &MF = DAG.getMachineFunction(); 2172 MachineFrameInfo &MFI = MF.getFrameInfo(); 2173 2174 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2175 if (Align >= 4) 2176 return; 2177 2178 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2179 FuncInfo->setHasNonRISpills(); 2180 } 2181 2182 /// Returns true if the address N can be represented by a base register plus 2183 /// a signed 16-bit displacement [r+imm], and if it is not better 2184 /// represented as reg+reg. If \p Alignment is non-zero, only accept 2185 /// displacements that are multiples of that value. 2186 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2187 SDValue &Base, 2188 SelectionDAG &DAG, 2189 unsigned Alignment) const { 2190 // FIXME dl should come from parent load or store, not from address 2191 SDLoc dl(N); 2192 // If this can be more profitably realized as r+r, fail. 2193 if (SelectAddressRegReg(N, Disp, Base, DAG)) 2194 return false; 2195 2196 if (N.getOpcode() == ISD::ADD) { 2197 int16_t imm = 0; 2198 if (isIntS16Immediate(N.getOperand(1), imm) && 2199 (!Alignment || (imm % Alignment) == 0)) { 2200 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2201 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2202 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2203 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2204 } else { 2205 Base = N.getOperand(0); 2206 } 2207 return true; // [r+i] 2208 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2209 // Match LOAD (ADD (X, Lo(G))). 2210 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2211 && "Cannot handle constant offsets yet!"); 2212 Disp = N.getOperand(1).getOperand(0); // The global address. 2213 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2214 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2215 Disp.getOpcode() == ISD::TargetConstantPool || 2216 Disp.getOpcode() == ISD::TargetJumpTable); 2217 Base = N.getOperand(0); 2218 return true; // [&g+r] 2219 } 2220 } else if (N.getOpcode() == ISD::OR) { 2221 int16_t imm = 0; 2222 if (isIntS16Immediate(N.getOperand(1), imm) && 2223 (!Alignment || (imm % Alignment) == 0)) { 2224 // If this is an or of disjoint bitfields, we can codegen this as an add 2225 // (for better address arithmetic) if the LHS and RHS of the OR are 2226 // provably disjoint. 2227 KnownBits LHSKnown; 2228 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2229 2230 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2231 // If all of the bits are known zero on the LHS or RHS, the add won't 2232 // carry. 2233 if (FrameIndexSDNode *FI = 2234 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2235 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2236 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2237 } else { 2238 Base = N.getOperand(0); 2239 } 2240 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2241 return true; 2242 } 2243 } 2244 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2245 // Loading from a constant address. 2246 2247 // If this address fits entirely in a 16-bit sext immediate field, codegen 2248 // this as "d, 0" 2249 int16_t Imm; 2250 if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) { 2251 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2252 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2253 CN->getValueType(0)); 2254 return true; 2255 } 2256 2257 // Handle 32-bit sext immediates with LIS + addr mode. 2258 if ((CN->getValueType(0) == MVT::i32 || 2259 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2260 (!Alignment || (CN->getZExtValue() % Alignment) == 0)) { 2261 int Addr = (int)CN->getZExtValue(); 2262 2263 // Otherwise, break this down into an LIS + disp. 2264 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2265 2266 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2267 MVT::i32); 2268 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2269 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2270 return true; 2271 } 2272 } 2273 2274 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2275 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2276 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2277 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2278 } else 2279 Base = N; 2280 return true; // [r+0] 2281 } 2282 2283 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2284 /// represented as an indexed [r+r] operation. 2285 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2286 SDValue &Index, 2287 SelectionDAG &DAG) const { 2288 // Check to see if we can easily represent this as an [r+r] address. This 2289 // will fail if it thinks that the address is more profitably represented as 2290 // reg+imm, e.g. where imm = 0. 2291 if (SelectAddressRegReg(N, Base, Index, DAG)) 2292 return true; 2293 2294 // If the address is the result of an add, we will utilize the fact that the 2295 // address calculation includes an implicit add. However, we can reduce 2296 // register pressure if we do not materialize a constant just for use as the 2297 // index register. We only get rid of the add if it is not an add of a 2298 // value and a 16-bit signed constant and both have a single use. 2299 int16_t imm = 0; 2300 if (N.getOpcode() == ISD::ADD && 2301 (!isIntS16Immediate(N.getOperand(1), imm) || 2302 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2303 Base = N.getOperand(0); 2304 Index = N.getOperand(1); 2305 return true; 2306 } 2307 2308 // Otherwise, do it the hard way, using R0 as the base register. 2309 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2310 N.getValueType()); 2311 Index = N; 2312 return true; 2313 } 2314 2315 /// getPreIndexedAddressParts - returns true by value, base pointer and 2316 /// offset pointer and addressing mode by reference if the node's address 2317 /// can be legally represented as pre-indexed load / store address. 2318 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2319 SDValue &Offset, 2320 ISD::MemIndexedMode &AM, 2321 SelectionDAG &DAG) const { 2322 if (DisablePPCPreinc) return false; 2323 2324 bool isLoad = true; 2325 SDValue Ptr; 2326 EVT VT; 2327 unsigned Alignment; 2328 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2329 Ptr = LD->getBasePtr(); 2330 VT = LD->getMemoryVT(); 2331 Alignment = LD->getAlignment(); 2332 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2333 Ptr = ST->getBasePtr(); 2334 VT = ST->getMemoryVT(); 2335 Alignment = ST->getAlignment(); 2336 isLoad = false; 2337 } else 2338 return false; 2339 2340 // PowerPC doesn't have preinc load/store instructions for vectors (except 2341 // for QPX, which does have preinc r+r forms). 2342 if (VT.isVector()) { 2343 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2344 return false; 2345 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2346 AM = ISD::PRE_INC; 2347 return true; 2348 } 2349 } 2350 2351 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2352 // Common code will reject creating a pre-inc form if the base pointer 2353 // is a frame index, or if N is a store and the base pointer is either 2354 // the same as or a predecessor of the value being stored. Check for 2355 // those situations here, and try with swapped Base/Offset instead. 2356 bool Swap = false; 2357 2358 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2359 Swap = true; 2360 else if (!isLoad) { 2361 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2362 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2363 Swap = true; 2364 } 2365 2366 if (Swap) 2367 std::swap(Base, Offset); 2368 2369 AM = ISD::PRE_INC; 2370 return true; 2371 } 2372 2373 // LDU/STU can only handle immediates that are a multiple of 4. 2374 if (VT != MVT::i64) { 2375 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2376 return false; 2377 } else { 2378 // LDU/STU need an address with at least 4-byte alignment. 2379 if (Alignment < 4) 2380 return false; 2381 2382 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2383 return false; 2384 } 2385 2386 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2387 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2388 // sext i32 to i64 when addr mode is r+i. 2389 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2390 LD->getExtensionType() == ISD::SEXTLOAD && 2391 isa<ConstantSDNode>(Offset)) 2392 return false; 2393 } 2394 2395 AM = ISD::PRE_INC; 2396 return true; 2397 } 2398 2399 //===----------------------------------------------------------------------===// 2400 // LowerOperation implementation 2401 //===----------------------------------------------------------------------===// 2402 2403 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2404 /// and LoOpFlags to the target MO flags. 2405 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2406 unsigned &HiOpFlags, unsigned &LoOpFlags, 2407 const GlobalValue *GV = nullptr) { 2408 HiOpFlags = PPCII::MO_HA; 2409 LoOpFlags = PPCII::MO_LO; 2410 2411 // Don't use the pic base if not in PIC relocation model. 2412 if (IsPIC) { 2413 HiOpFlags |= PPCII::MO_PIC_FLAG; 2414 LoOpFlags |= PPCII::MO_PIC_FLAG; 2415 } 2416 2417 // If this is a reference to a global value that requires a non-lazy-ptr, make 2418 // sure that instruction lowering adds it. 2419 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2420 HiOpFlags |= PPCII::MO_NLP_FLAG; 2421 LoOpFlags |= PPCII::MO_NLP_FLAG; 2422 2423 if (GV->hasHiddenVisibility()) { 2424 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2425 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2426 } 2427 } 2428 } 2429 2430 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2431 SelectionDAG &DAG) { 2432 SDLoc DL(HiPart); 2433 EVT PtrVT = HiPart.getValueType(); 2434 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2435 2436 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2437 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2438 2439 // With PIC, the first instruction is actually "GR+hi(&G)". 2440 if (isPIC) 2441 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2442 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2443 2444 // Generate non-pic code that has direct accesses to the constant pool. 2445 // The address of the global is just (hi(&g)+lo(&g)). 2446 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2447 } 2448 2449 static void setUsesTOCBasePtr(MachineFunction &MF) { 2450 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2451 FuncInfo->setUsesTOCBasePtr(); 2452 } 2453 2454 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2455 setUsesTOCBasePtr(DAG.getMachineFunction()); 2456 } 2457 2458 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2459 SDValue GA) { 2460 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2461 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2462 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2463 2464 SDValue Ops[] = { GA, Reg }; 2465 return DAG.getMemIntrinsicNode( 2466 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2467 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, 2468 MachineMemOperand::MOLoad); 2469 } 2470 2471 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2472 SelectionDAG &DAG) const { 2473 EVT PtrVT = Op.getValueType(); 2474 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2475 const Constant *C = CP->getConstVal(); 2476 2477 // 64-bit SVR4 ABI code is always position-independent. 2478 // The actual address of the GlobalValue is stored in the TOC. 2479 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2480 setUsesTOCBasePtr(DAG); 2481 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2482 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2483 } 2484 2485 unsigned MOHiFlag, MOLoFlag; 2486 bool IsPIC = isPositionIndependent(); 2487 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2488 2489 if (IsPIC && Subtarget.isSVR4ABI()) { 2490 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2491 PPCII::MO_PIC_FLAG); 2492 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2493 } 2494 2495 SDValue CPIHi = 2496 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2497 SDValue CPILo = 2498 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2499 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2500 } 2501 2502 // For 64-bit PowerPC, prefer the more compact relative encodings. 2503 // This trades 32 bits per jump table entry for one or two instructions 2504 // on the jump site. 2505 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2506 if (isJumpTableRelative()) 2507 return MachineJumpTableInfo::EK_LabelDifference32; 2508 2509 return TargetLowering::getJumpTableEncoding(); 2510 } 2511 2512 bool PPCTargetLowering::isJumpTableRelative() const { 2513 if (Subtarget.isPPC64()) 2514 return true; 2515 return TargetLowering::isJumpTableRelative(); 2516 } 2517 2518 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2519 SelectionDAG &DAG) const { 2520 if (!Subtarget.isPPC64()) 2521 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2522 2523 switch (getTargetMachine().getCodeModel()) { 2524 case CodeModel::Small: 2525 case CodeModel::Medium: 2526 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2527 default: 2528 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2529 getPointerTy(DAG.getDataLayout())); 2530 } 2531 } 2532 2533 const MCExpr * 2534 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2535 unsigned JTI, 2536 MCContext &Ctx) const { 2537 if (!Subtarget.isPPC64()) 2538 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2539 2540 switch (getTargetMachine().getCodeModel()) { 2541 case CodeModel::Small: 2542 case CodeModel::Medium: 2543 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2544 default: 2545 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2546 } 2547 } 2548 2549 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2550 EVT PtrVT = Op.getValueType(); 2551 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2552 2553 // 64-bit SVR4 ABI code is always position-independent. 2554 // The actual address of the GlobalValue is stored in the TOC. 2555 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2556 setUsesTOCBasePtr(DAG); 2557 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2558 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2559 } 2560 2561 unsigned MOHiFlag, MOLoFlag; 2562 bool IsPIC = isPositionIndependent(); 2563 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2564 2565 if (IsPIC && Subtarget.isSVR4ABI()) { 2566 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2567 PPCII::MO_PIC_FLAG); 2568 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2569 } 2570 2571 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2572 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2573 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2574 } 2575 2576 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2577 SelectionDAG &DAG) const { 2578 EVT PtrVT = Op.getValueType(); 2579 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2580 const BlockAddress *BA = BASDN->getBlockAddress(); 2581 2582 // 64-bit SVR4 ABI code is always position-independent. 2583 // The actual BlockAddress is stored in the TOC. 2584 if (Subtarget.isSVR4ABI() && isPositionIndependent()) { 2585 if (Subtarget.isPPC64()) 2586 setUsesTOCBasePtr(DAG); 2587 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2588 return getTOCEntry(DAG, SDLoc(BASDN), Subtarget.isPPC64(), GA); 2589 } 2590 2591 unsigned MOHiFlag, MOLoFlag; 2592 bool IsPIC = isPositionIndependent(); 2593 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2594 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2595 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2596 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2597 } 2598 2599 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2600 SelectionDAG &DAG) const { 2601 // FIXME: TLS addresses currently use medium model code sequences, 2602 // which is the most useful form. Eventually support for small and 2603 // large models could be added if users need it, at the cost of 2604 // additional complexity. 2605 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2606 if (DAG.getTarget().useEmulatedTLS()) 2607 return LowerToTLSEmulatedModel(GA, DAG); 2608 2609 SDLoc dl(GA); 2610 const GlobalValue *GV = GA->getGlobal(); 2611 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2612 bool is64bit = Subtarget.isPPC64(); 2613 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 2614 PICLevel::Level picLevel = M->getPICLevel(); 2615 2616 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2617 2618 if (Model == TLSModel::LocalExec) { 2619 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2620 PPCII::MO_TPREL_HA); 2621 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2622 PPCII::MO_TPREL_LO); 2623 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2624 : DAG.getRegister(PPC::R2, MVT::i32); 2625 2626 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2627 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2628 } 2629 2630 if (Model == TLSModel::InitialExec) { 2631 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2632 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2633 PPCII::MO_TLS); 2634 SDValue GOTPtr; 2635 if (is64bit) { 2636 setUsesTOCBasePtr(DAG); 2637 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2638 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2639 PtrVT, GOTReg, TGA); 2640 } else 2641 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2642 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2643 PtrVT, TGA, GOTPtr); 2644 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2645 } 2646 2647 if (Model == TLSModel::GeneralDynamic) { 2648 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2649 SDValue GOTPtr; 2650 if (is64bit) { 2651 setUsesTOCBasePtr(DAG); 2652 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2653 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2654 GOTReg, TGA); 2655 } else { 2656 if (picLevel == PICLevel::SmallPIC) 2657 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2658 else 2659 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2660 } 2661 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2662 GOTPtr, TGA, TGA); 2663 } 2664 2665 if (Model == TLSModel::LocalDynamic) { 2666 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2667 SDValue GOTPtr; 2668 if (is64bit) { 2669 setUsesTOCBasePtr(DAG); 2670 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2671 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2672 GOTReg, TGA); 2673 } else { 2674 if (picLevel == PICLevel::SmallPIC) 2675 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2676 else 2677 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2678 } 2679 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2680 PtrVT, GOTPtr, TGA, TGA); 2681 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2682 PtrVT, TLSAddr, TGA); 2683 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2684 } 2685 2686 llvm_unreachable("Unknown TLS model!"); 2687 } 2688 2689 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2690 SelectionDAG &DAG) const { 2691 EVT PtrVT = Op.getValueType(); 2692 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2693 SDLoc DL(GSDN); 2694 const GlobalValue *GV = GSDN->getGlobal(); 2695 2696 // 64-bit SVR4 ABI code is always position-independent. 2697 // The actual address of the GlobalValue is stored in the TOC. 2698 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2699 setUsesTOCBasePtr(DAG); 2700 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2701 return getTOCEntry(DAG, DL, true, GA); 2702 } 2703 2704 unsigned MOHiFlag, MOLoFlag; 2705 bool IsPIC = isPositionIndependent(); 2706 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2707 2708 if (IsPIC && Subtarget.isSVR4ABI()) { 2709 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2710 GSDN->getOffset(), 2711 PPCII::MO_PIC_FLAG); 2712 return getTOCEntry(DAG, DL, false, GA); 2713 } 2714 2715 SDValue GAHi = 2716 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2717 SDValue GALo = 2718 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2719 2720 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2721 2722 // If the global reference is actually to a non-lazy-pointer, we have to do an 2723 // extra load to get the address of the global. 2724 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2725 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2726 return Ptr; 2727 } 2728 2729 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2730 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2731 SDLoc dl(Op); 2732 2733 if (Op.getValueType() == MVT::v2i64) { 2734 // When the operands themselves are v2i64 values, we need to do something 2735 // special because VSX has no underlying comparison operations for these. 2736 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2737 // Equality can be handled by casting to the legal type for Altivec 2738 // comparisons, everything else needs to be expanded. 2739 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2740 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2741 DAG.getSetCC(dl, MVT::v4i32, 2742 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2743 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2744 CC)); 2745 } 2746 2747 return SDValue(); 2748 } 2749 2750 // We handle most of these in the usual way. 2751 return Op; 2752 } 2753 2754 // If we're comparing for equality to zero, expose the fact that this is 2755 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2756 // fold the new nodes. 2757 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 2758 return V; 2759 2760 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2761 // Leave comparisons against 0 and -1 alone for now, since they're usually 2762 // optimized. FIXME: revisit this when we can custom lower all setcc 2763 // optimizations. 2764 if (C->isAllOnesValue() || C->isNullValue()) 2765 return SDValue(); 2766 } 2767 2768 // If we have an integer seteq/setne, turn it into a compare against zero 2769 // by xor'ing the rhs with the lhs, which is faster than setting a 2770 // condition register, reading it back out, and masking the correct bit. The 2771 // normal approach here uses sub to do this instead of xor. Using xor exposes 2772 // the result to other bit-twiddling opportunities. 2773 EVT LHSVT = Op.getOperand(0).getValueType(); 2774 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2775 EVT VT = Op.getValueType(); 2776 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2777 Op.getOperand(1)); 2778 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2779 } 2780 return SDValue(); 2781 } 2782 2783 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2784 SDNode *Node = Op.getNode(); 2785 EVT VT = Node->getValueType(0); 2786 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2787 SDValue InChain = Node->getOperand(0); 2788 SDValue VAListPtr = Node->getOperand(1); 2789 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2790 SDLoc dl(Node); 2791 2792 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2793 2794 // gpr_index 2795 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2796 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2797 InChain = GprIndex.getValue(1); 2798 2799 if (VT == MVT::i64) { 2800 // Check if GprIndex is even 2801 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2802 DAG.getConstant(1, dl, MVT::i32)); 2803 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2804 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2805 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2806 DAG.getConstant(1, dl, MVT::i32)); 2807 // Align GprIndex to be even if it isn't 2808 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2809 GprIndex); 2810 } 2811 2812 // fpr index is 1 byte after gpr 2813 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2814 DAG.getConstant(1, dl, MVT::i32)); 2815 2816 // fpr 2817 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2818 FprPtr, MachinePointerInfo(SV), MVT::i8); 2819 InChain = FprIndex.getValue(1); 2820 2821 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2822 DAG.getConstant(8, dl, MVT::i32)); 2823 2824 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2825 DAG.getConstant(4, dl, MVT::i32)); 2826 2827 // areas 2828 SDValue OverflowArea = 2829 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2830 InChain = OverflowArea.getValue(1); 2831 2832 SDValue RegSaveArea = 2833 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2834 InChain = RegSaveArea.getValue(1); 2835 2836 // select overflow_area if index > 8 2837 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2838 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2839 2840 // adjustment constant gpr_index * 4/8 2841 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2842 VT.isInteger() ? GprIndex : FprIndex, 2843 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2844 MVT::i32)); 2845 2846 // OurReg = RegSaveArea + RegConstant 2847 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2848 RegConstant); 2849 2850 // Floating types are 32 bytes into RegSaveArea 2851 if (VT.isFloatingPoint()) 2852 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2853 DAG.getConstant(32, dl, MVT::i32)); 2854 2855 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2856 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2857 VT.isInteger() ? GprIndex : FprIndex, 2858 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2859 MVT::i32)); 2860 2861 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2862 VT.isInteger() ? VAListPtr : FprPtr, 2863 MachinePointerInfo(SV), MVT::i8); 2864 2865 // determine if we should load from reg_save_area or overflow_area 2866 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2867 2868 // increase overflow_area by 4/8 if gpr/fpr > 8 2869 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2870 DAG.getConstant(VT.isInteger() ? 4 : 8, 2871 dl, MVT::i32)); 2872 2873 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2874 OverflowAreaPlusN); 2875 2876 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 2877 MachinePointerInfo(), MVT::i32); 2878 2879 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 2880 } 2881 2882 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2883 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2884 2885 // We have to copy the entire va_list struct: 2886 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2887 return DAG.getMemcpy(Op.getOperand(0), Op, 2888 Op.getOperand(1), Op.getOperand(2), 2889 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2890 false, MachinePointerInfo(), MachinePointerInfo()); 2891 } 2892 2893 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2894 SelectionDAG &DAG) const { 2895 return Op.getOperand(0); 2896 } 2897 2898 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2899 SelectionDAG &DAG) const { 2900 SDValue Chain = Op.getOperand(0); 2901 SDValue Trmp = Op.getOperand(1); // trampoline 2902 SDValue FPtr = Op.getOperand(2); // nested function 2903 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2904 SDLoc dl(Op); 2905 2906 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2907 bool isPPC64 = (PtrVT == MVT::i64); 2908 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2909 2910 TargetLowering::ArgListTy Args; 2911 TargetLowering::ArgListEntry Entry; 2912 2913 Entry.Ty = IntPtrTy; 2914 Entry.Node = Trmp; Args.push_back(Entry); 2915 2916 // TrampSize == (isPPC64 ? 48 : 40); 2917 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2918 isPPC64 ? MVT::i64 : MVT::i32); 2919 Args.push_back(Entry); 2920 2921 Entry.Node = FPtr; Args.push_back(Entry); 2922 Entry.Node = Nest; Args.push_back(Entry); 2923 2924 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2925 TargetLowering::CallLoweringInfo CLI(DAG); 2926 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 2927 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2928 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 2929 2930 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2931 return CallResult.second; 2932 } 2933 2934 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2935 MachineFunction &MF = DAG.getMachineFunction(); 2936 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2937 EVT PtrVT = getPointerTy(MF.getDataLayout()); 2938 2939 SDLoc dl(Op); 2940 2941 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2942 // vastart just stores the address of the VarArgsFrameIndex slot into the 2943 // memory location argument. 2944 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2945 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2946 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2947 MachinePointerInfo(SV)); 2948 } 2949 2950 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2951 // We suppose the given va_list is already allocated. 2952 // 2953 // typedef struct { 2954 // char gpr; /* index into the array of 8 GPRs 2955 // * stored in the register save area 2956 // * gpr=0 corresponds to r3, 2957 // * gpr=1 to r4, etc. 2958 // */ 2959 // char fpr; /* index into the array of 8 FPRs 2960 // * stored in the register save area 2961 // * fpr=0 corresponds to f1, 2962 // * fpr=1 to f2, etc. 2963 // */ 2964 // char *overflow_arg_area; 2965 // /* location on stack that holds 2966 // * the next overflow argument 2967 // */ 2968 // char *reg_save_area; 2969 // /* where r3:r10 and f1:f8 (if saved) 2970 // * are stored 2971 // */ 2972 // } va_list[1]; 2973 2974 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2975 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2976 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2977 PtrVT); 2978 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2979 PtrVT); 2980 2981 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2982 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2983 2984 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2985 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2986 2987 uint64_t FPROffset = 1; 2988 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2989 2990 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2991 2992 // Store first byte : number of int regs 2993 SDValue firstStore = 2994 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 2995 MachinePointerInfo(SV), MVT::i8); 2996 uint64_t nextOffset = FPROffset; 2997 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2998 ConstFPROffset); 2999 3000 // Store second byte : number of float regs 3001 SDValue secondStore = 3002 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3003 MachinePointerInfo(SV, nextOffset), MVT::i8); 3004 nextOffset += StackOffset; 3005 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3006 3007 // Store second word : arguments given on stack 3008 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3009 MachinePointerInfo(SV, nextOffset)); 3010 nextOffset += FrameOffset; 3011 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3012 3013 // Store third word : arguments given in registers 3014 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3015 MachinePointerInfo(SV, nextOffset)); 3016 } 3017 3018 #include "PPCGenCallingConv.inc" 3019 3020 // Function whose sole purpose is to kill compiler warnings 3021 // stemming from unused functions included from PPCGenCallingConv.inc. 3022 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 3023 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 3024 } 3025 3026 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 3027 CCValAssign::LocInfo &LocInfo, 3028 ISD::ArgFlagsTy &ArgFlags, 3029 CCState &State) { 3030 return true; 3031 } 3032 3033 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 3034 MVT &LocVT, 3035 CCValAssign::LocInfo &LocInfo, 3036 ISD::ArgFlagsTy &ArgFlags, 3037 CCState &State) { 3038 static const MCPhysReg ArgRegs[] = { 3039 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3040 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3041 }; 3042 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3043 3044 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3045 3046 // Skip one register if the first unallocated register has an even register 3047 // number and there are still argument registers available which have not been 3048 // allocated yet. RegNum is actually an index into ArgRegs, which means we 3049 // need to skip a register if RegNum is odd. 3050 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 3051 State.AllocateReg(ArgRegs[RegNum]); 3052 } 3053 3054 // Always return false here, as this function only makes sure that the first 3055 // unallocated register has an odd register number and does not actually 3056 // allocate a register for the current argument. 3057 return false; 3058 } 3059 3060 bool 3061 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 3062 MVT &LocVT, 3063 CCValAssign::LocInfo &LocInfo, 3064 ISD::ArgFlagsTy &ArgFlags, 3065 CCState &State) { 3066 static const MCPhysReg ArgRegs[] = { 3067 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3068 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3069 }; 3070 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3071 3072 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3073 int RegsLeft = NumArgRegs - RegNum; 3074 3075 // Skip if there is not enough registers left for long double type (4 gpr regs 3076 // in soft float mode) and put long double argument on the stack. 3077 if (RegNum != NumArgRegs && RegsLeft < 4) { 3078 for (int i = 0; i < RegsLeft; i++) { 3079 State.AllocateReg(ArgRegs[RegNum + i]); 3080 } 3081 } 3082 3083 return false; 3084 } 3085 3086 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 3087 MVT &LocVT, 3088 CCValAssign::LocInfo &LocInfo, 3089 ISD::ArgFlagsTy &ArgFlags, 3090 CCState &State) { 3091 static const MCPhysReg ArgRegs[] = { 3092 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3093 PPC::F8 3094 }; 3095 3096 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3097 3098 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3099 3100 // If there is only one Floating-point register left we need to put both f64 3101 // values of a split ppc_fp128 value on the stack. 3102 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 3103 State.AllocateReg(ArgRegs[RegNum]); 3104 } 3105 3106 // Always return false here, as this function only makes sure that the two f64 3107 // values a ppc_fp128 value is split into are both passed in registers or both 3108 // passed on the stack and does not actually allocate a register for the 3109 // current argument. 3110 return false; 3111 } 3112 3113 /// FPR - The set of FP registers that should be allocated for arguments, 3114 /// on Darwin. 3115 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3116 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3117 PPC::F11, PPC::F12, PPC::F13}; 3118 3119 /// QFPR - The set of QPX registers that should be allocated for arguments. 3120 static const MCPhysReg QFPR[] = { 3121 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3122 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3123 3124 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3125 /// the stack. 3126 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3127 unsigned PtrByteSize) { 3128 unsigned ArgSize = ArgVT.getStoreSize(); 3129 if (Flags.isByVal()) 3130 ArgSize = Flags.getByValSize(); 3131 3132 // Round up to multiples of the pointer size, except for array members, 3133 // which are always packed. 3134 if (!Flags.isInConsecutiveRegs()) 3135 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3136 3137 return ArgSize; 3138 } 3139 3140 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3141 /// on the stack. 3142 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3143 ISD::ArgFlagsTy Flags, 3144 unsigned PtrByteSize) { 3145 unsigned Align = PtrByteSize; 3146 3147 // Altivec parameters are padded to a 16 byte boundary. 3148 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3149 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3150 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3151 ArgVT == MVT::v1i128) 3152 Align = 16; 3153 // QPX vector types stored in double-precision are padded to a 32 byte 3154 // boundary. 3155 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3156 Align = 32; 3157 3158 // ByVal parameters are aligned as requested. 3159 if (Flags.isByVal()) { 3160 unsigned BVAlign = Flags.getByValAlign(); 3161 if (BVAlign > PtrByteSize) { 3162 if (BVAlign % PtrByteSize != 0) 3163 llvm_unreachable( 3164 "ByVal alignment is not a multiple of the pointer size"); 3165 3166 Align = BVAlign; 3167 } 3168 } 3169 3170 // Array members are always packed to their original alignment. 3171 if (Flags.isInConsecutiveRegs()) { 3172 // If the array member was split into multiple registers, the first 3173 // needs to be aligned to the size of the full type. (Except for 3174 // ppcf128, which is only aligned as its f64 components.) 3175 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3176 Align = OrigVT.getStoreSize(); 3177 else 3178 Align = ArgVT.getStoreSize(); 3179 } 3180 3181 return Align; 3182 } 3183 3184 /// CalculateStackSlotUsed - Return whether this argument will use its 3185 /// stack slot (instead of being passed in registers). ArgOffset, 3186 /// AvailableFPRs, and AvailableVRs must hold the current argument 3187 /// position, and will be updated to account for this argument. 3188 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3189 ISD::ArgFlagsTy Flags, 3190 unsigned PtrByteSize, 3191 unsigned LinkageSize, 3192 unsigned ParamAreaSize, 3193 unsigned &ArgOffset, 3194 unsigned &AvailableFPRs, 3195 unsigned &AvailableVRs, bool HasQPX) { 3196 bool UseMemory = false; 3197 3198 // Respect alignment of argument on the stack. 3199 unsigned Align = 3200 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3201 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3202 // If there's no space left in the argument save area, we must 3203 // use memory (this check also catches zero-sized arguments). 3204 if (ArgOffset >= LinkageSize + ParamAreaSize) 3205 UseMemory = true; 3206 3207 // Allocate argument on the stack. 3208 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3209 if (Flags.isInConsecutiveRegsLast()) 3210 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3211 // If we overran the argument save area, we must use memory 3212 // (this check catches arguments passed partially in memory) 3213 if (ArgOffset > LinkageSize + ParamAreaSize) 3214 UseMemory = true; 3215 3216 // However, if the argument is actually passed in an FPR or a VR, 3217 // we don't use memory after all. 3218 if (!Flags.isByVal()) { 3219 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3220 // QPX registers overlap with the scalar FP registers. 3221 (HasQPX && (ArgVT == MVT::v4f32 || 3222 ArgVT == MVT::v4f64 || 3223 ArgVT == MVT::v4i1))) 3224 if (AvailableFPRs > 0) { 3225 --AvailableFPRs; 3226 return false; 3227 } 3228 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3229 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3230 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3231 ArgVT == MVT::v1i128) 3232 if (AvailableVRs > 0) { 3233 --AvailableVRs; 3234 return false; 3235 } 3236 } 3237 3238 return UseMemory; 3239 } 3240 3241 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3242 /// ensure minimum alignment required for target. 3243 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3244 unsigned NumBytes) { 3245 unsigned TargetAlign = Lowering->getStackAlignment(); 3246 unsigned AlignMask = TargetAlign - 1; 3247 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3248 return NumBytes; 3249 } 3250 3251 SDValue PPCTargetLowering::LowerFormalArguments( 3252 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3253 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3254 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3255 if (Subtarget.isSVR4ABI()) { 3256 if (Subtarget.isPPC64()) 3257 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 3258 dl, DAG, InVals); 3259 else 3260 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 3261 dl, DAG, InVals); 3262 } else { 3263 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 3264 dl, DAG, InVals); 3265 } 3266 } 3267 3268 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3269 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3270 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3271 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3272 3273 // 32-bit SVR4 ABI Stack Frame Layout: 3274 // +-----------------------------------+ 3275 // +--> | Back chain | 3276 // | +-----------------------------------+ 3277 // | | Floating-point register save area | 3278 // | +-----------------------------------+ 3279 // | | General register save area | 3280 // | +-----------------------------------+ 3281 // | | CR save word | 3282 // | +-----------------------------------+ 3283 // | | VRSAVE save word | 3284 // | +-----------------------------------+ 3285 // | | Alignment padding | 3286 // | +-----------------------------------+ 3287 // | | Vector register save area | 3288 // | +-----------------------------------+ 3289 // | | Local variable space | 3290 // | +-----------------------------------+ 3291 // | | Parameter list area | 3292 // | +-----------------------------------+ 3293 // | | LR save word | 3294 // | +-----------------------------------+ 3295 // SP--> +--- | Back chain | 3296 // +-----------------------------------+ 3297 // 3298 // Specifications: 3299 // System V Application Binary Interface PowerPC Processor Supplement 3300 // AltiVec Technology Programming Interface Manual 3301 3302 MachineFunction &MF = DAG.getMachineFunction(); 3303 MachineFrameInfo &MFI = MF.getFrameInfo(); 3304 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3305 3306 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3307 // Potential tail calls could cause overwriting of argument stack slots. 3308 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3309 (CallConv == CallingConv::Fast)); 3310 unsigned PtrByteSize = 4; 3311 3312 // Assign locations to all of the incoming arguments. 3313 SmallVector<CCValAssign, 16> ArgLocs; 3314 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3315 *DAG.getContext()); 3316 3317 // Reserve space for the linkage area on the stack. 3318 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3319 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3320 if (useSoftFloat()) 3321 CCInfo.PreAnalyzeFormalArguments(Ins); 3322 3323 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3324 CCInfo.clearWasPPCF128(); 3325 3326 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3327 CCValAssign &VA = ArgLocs[i]; 3328 3329 // Arguments stored in registers. 3330 if (VA.isRegLoc()) { 3331 const TargetRegisterClass *RC; 3332 EVT ValVT = VA.getValVT(); 3333 3334 switch (ValVT.getSimpleVT().SimpleTy) { 3335 default: 3336 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3337 case MVT::i1: 3338 case MVT::i32: 3339 RC = &PPC::GPRCRegClass; 3340 break; 3341 case MVT::f32: 3342 if (Subtarget.hasP8Vector()) 3343 RC = &PPC::VSSRCRegClass; 3344 else 3345 RC = &PPC::F4RCRegClass; 3346 break; 3347 case MVT::f64: 3348 if (Subtarget.hasVSX()) 3349 RC = &PPC::VSFRCRegClass; 3350 else 3351 RC = &PPC::F8RCRegClass; 3352 break; 3353 case MVT::v16i8: 3354 case MVT::v8i16: 3355 case MVT::v4i32: 3356 RC = &PPC::VRRCRegClass; 3357 break; 3358 case MVT::v4f32: 3359 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3360 break; 3361 case MVT::v2f64: 3362 case MVT::v2i64: 3363 RC = &PPC::VRRCRegClass; 3364 break; 3365 case MVT::v4f64: 3366 RC = &PPC::QFRCRegClass; 3367 break; 3368 case MVT::v4i1: 3369 RC = &PPC::QBRCRegClass; 3370 break; 3371 } 3372 3373 // Transform the arguments stored in physical registers into virtual ones. 3374 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3375 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3376 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3377 3378 if (ValVT == MVT::i1) 3379 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3380 3381 InVals.push_back(ArgValue); 3382 } else { 3383 // Argument stored in memory. 3384 assert(VA.isMemLoc()); 3385 3386 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3387 int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(), 3388 isImmutable); 3389 3390 // Create load nodes to retrieve arguments from the stack. 3391 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3392 InVals.push_back( 3393 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3394 } 3395 } 3396 3397 // Assign locations to all of the incoming aggregate by value arguments. 3398 // Aggregates passed by value are stored in the local variable space of the 3399 // caller's stack frame, right above the parameter list area. 3400 SmallVector<CCValAssign, 16> ByValArgLocs; 3401 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3402 ByValArgLocs, *DAG.getContext()); 3403 3404 // Reserve stack space for the allocations in CCInfo. 3405 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3406 3407 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3408 3409 // Area that is at least reserved in the caller of this function. 3410 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3411 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3412 3413 // Set the size that is at least reserved in caller of this function. Tail 3414 // call optimized function's reserved stack space needs to be aligned so that 3415 // taking the difference between two stack areas will result in an aligned 3416 // stack. 3417 MinReservedArea = 3418 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3419 FuncInfo->setMinReservedArea(MinReservedArea); 3420 3421 SmallVector<SDValue, 8> MemOps; 3422 3423 // If the function takes variable number of arguments, make a frame index for 3424 // the start of the first vararg value... for expansion of llvm.va_start. 3425 if (isVarArg) { 3426 static const MCPhysReg GPArgRegs[] = { 3427 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3428 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3429 }; 3430 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3431 3432 static const MCPhysReg FPArgRegs[] = { 3433 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3434 PPC::F8 3435 }; 3436 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3437 3438 if (useSoftFloat()) 3439 NumFPArgRegs = 0; 3440 3441 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3442 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3443 3444 // Make room for NumGPArgRegs and NumFPArgRegs. 3445 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3446 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3447 3448 FuncInfo->setVarArgsStackOffset( 3449 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3450 CCInfo.getNextStackOffset(), true)); 3451 3452 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3453 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3454 3455 // The fixed integer arguments of a variadic function are stored to the 3456 // VarArgsFrameIndex on the stack so that they may be loaded by 3457 // dereferencing the result of va_next. 3458 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3459 // Get an existing live-in vreg, or add a new one. 3460 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3461 if (!VReg) 3462 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3463 3464 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3465 SDValue Store = 3466 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3467 MemOps.push_back(Store); 3468 // Increment the address by four for the next argument to store 3469 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3470 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3471 } 3472 3473 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3474 // is set. 3475 // The double arguments are stored to the VarArgsFrameIndex 3476 // on the stack. 3477 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3478 // Get an existing live-in vreg, or add a new one. 3479 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3480 if (!VReg) 3481 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3482 3483 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3484 SDValue Store = 3485 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3486 MemOps.push_back(Store); 3487 // Increment the address by eight for the next argument to store 3488 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3489 PtrVT); 3490 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3491 } 3492 } 3493 3494 if (!MemOps.empty()) 3495 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3496 3497 return Chain; 3498 } 3499 3500 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3501 // value to MVT::i64 and then truncate to the correct register size. 3502 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3503 EVT ObjectVT, SelectionDAG &DAG, 3504 SDValue ArgVal, 3505 const SDLoc &dl) const { 3506 if (Flags.isSExt()) 3507 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3508 DAG.getValueType(ObjectVT)); 3509 else if (Flags.isZExt()) 3510 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3511 DAG.getValueType(ObjectVT)); 3512 3513 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3514 } 3515 3516 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3517 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3518 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3519 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3520 // TODO: add description of PPC stack frame format, or at least some docs. 3521 // 3522 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3523 bool isLittleEndian = Subtarget.isLittleEndian(); 3524 MachineFunction &MF = DAG.getMachineFunction(); 3525 MachineFrameInfo &MFI = MF.getFrameInfo(); 3526 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3527 3528 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3529 "fastcc not supported on varargs functions"); 3530 3531 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3532 // Potential tail calls could cause overwriting of argument stack slots. 3533 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3534 (CallConv == CallingConv::Fast)); 3535 unsigned PtrByteSize = 8; 3536 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3537 3538 static const MCPhysReg GPR[] = { 3539 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3540 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3541 }; 3542 static const MCPhysReg VR[] = { 3543 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3544 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3545 }; 3546 3547 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3548 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3549 const unsigned Num_VR_Regs = array_lengthof(VR); 3550 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3551 3552 // Do a first pass over the arguments to determine whether the ABI 3553 // guarantees that our caller has allocated the parameter save area 3554 // on its stack frame. In the ELFv1 ABI, this is always the case; 3555 // in the ELFv2 ABI, it is true if this is a vararg function or if 3556 // any parameter is located in a stack slot. 3557 3558 bool HasParameterArea = !isELFv2ABI || isVarArg; 3559 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3560 unsigned NumBytes = LinkageSize; 3561 unsigned AvailableFPRs = Num_FPR_Regs; 3562 unsigned AvailableVRs = Num_VR_Regs; 3563 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3564 if (Ins[i].Flags.isNest()) 3565 continue; 3566 3567 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3568 PtrByteSize, LinkageSize, ParamAreaSize, 3569 NumBytes, AvailableFPRs, AvailableVRs, 3570 Subtarget.hasQPX())) 3571 HasParameterArea = true; 3572 } 3573 3574 // Add DAG nodes to load the arguments or copy them out of registers. On 3575 // entry to a function on PPC, the arguments start after the linkage area, 3576 // although the first ones are often in registers. 3577 3578 unsigned ArgOffset = LinkageSize; 3579 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3580 unsigned &QFPR_idx = FPR_idx; 3581 SmallVector<SDValue, 8> MemOps; 3582 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3583 unsigned CurArgIdx = 0; 3584 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3585 SDValue ArgVal; 3586 bool needsLoad = false; 3587 EVT ObjectVT = Ins[ArgNo].VT; 3588 EVT OrigVT = Ins[ArgNo].ArgVT; 3589 unsigned ObjSize = ObjectVT.getStoreSize(); 3590 unsigned ArgSize = ObjSize; 3591 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3592 if (Ins[ArgNo].isOrigArg()) { 3593 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3594 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3595 } 3596 // We re-align the argument offset for each argument, except when using the 3597 // fast calling convention, when we need to make sure we do that only when 3598 // we'll actually use a stack slot. 3599 unsigned CurArgOffset, Align; 3600 auto ComputeArgOffset = [&]() { 3601 /* Respect alignment of argument on the stack. */ 3602 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3603 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3604 CurArgOffset = ArgOffset; 3605 }; 3606 3607 if (CallConv != CallingConv::Fast) { 3608 ComputeArgOffset(); 3609 3610 /* Compute GPR index associated with argument offset. */ 3611 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3612 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3613 } 3614 3615 // FIXME the codegen can be much improved in some cases. 3616 // We do not have to keep everything in memory. 3617 if (Flags.isByVal()) { 3618 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3619 3620 if (CallConv == CallingConv::Fast) 3621 ComputeArgOffset(); 3622 3623 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3624 ObjSize = Flags.getByValSize(); 3625 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3626 // Empty aggregate parameters do not take up registers. Examples: 3627 // struct { } a; 3628 // union { } b; 3629 // int c[0]; 3630 // etc. However, we have to provide a place-holder in InVals, so 3631 // pretend we have an 8-byte item at the current address for that 3632 // purpose. 3633 if (!ObjSize) { 3634 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3635 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3636 InVals.push_back(FIN); 3637 continue; 3638 } 3639 3640 // Create a stack object covering all stack doublewords occupied 3641 // by the argument. If the argument is (fully or partially) on 3642 // the stack, or if the argument is fully in registers but the 3643 // caller has allocated the parameter save anyway, we can refer 3644 // directly to the caller's stack frame. Otherwise, create a 3645 // local copy in our own frame. 3646 int FI; 3647 if (HasParameterArea || 3648 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3649 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3650 else 3651 FI = MFI.CreateStackObject(ArgSize, Align, false); 3652 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3653 3654 // Handle aggregates smaller than 8 bytes. 3655 if (ObjSize < PtrByteSize) { 3656 // The value of the object is its address, which differs from the 3657 // address of the enclosing doubleword on big-endian systems. 3658 SDValue Arg = FIN; 3659 if (!isLittleEndian) { 3660 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3661 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3662 } 3663 InVals.push_back(Arg); 3664 3665 if (GPR_idx != Num_GPR_Regs) { 3666 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3667 FuncInfo->addLiveInAttr(VReg, Flags); 3668 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3669 SDValue Store; 3670 3671 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3672 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3673 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3674 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3675 MachinePointerInfo(&*FuncArg), ObjType); 3676 } else { 3677 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3678 // store the whole register as-is to the parameter save area 3679 // slot. 3680 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3681 MachinePointerInfo(&*FuncArg)); 3682 } 3683 3684 MemOps.push_back(Store); 3685 } 3686 // Whether we copied from a register or not, advance the offset 3687 // into the parameter save area by a full doubleword. 3688 ArgOffset += PtrByteSize; 3689 continue; 3690 } 3691 3692 // The value of the object is its address, which is the address of 3693 // its first stack doubleword. 3694 InVals.push_back(FIN); 3695 3696 // Store whatever pieces of the object are in registers to memory. 3697 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3698 if (GPR_idx == Num_GPR_Regs) 3699 break; 3700 3701 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3702 FuncInfo->addLiveInAttr(VReg, Flags); 3703 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3704 SDValue Addr = FIN; 3705 if (j) { 3706 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3707 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3708 } 3709 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3710 MachinePointerInfo(&*FuncArg, j)); 3711 MemOps.push_back(Store); 3712 ++GPR_idx; 3713 } 3714 ArgOffset += ArgSize; 3715 continue; 3716 } 3717 3718 switch (ObjectVT.getSimpleVT().SimpleTy) { 3719 default: llvm_unreachable("Unhandled argument type!"); 3720 case MVT::i1: 3721 case MVT::i32: 3722 case MVT::i64: 3723 if (Flags.isNest()) { 3724 // The 'nest' parameter, if any, is passed in R11. 3725 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3726 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3727 3728 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3729 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3730 3731 break; 3732 } 3733 3734 // These can be scalar arguments or elements of an integer array type 3735 // passed directly. Clang may use those instead of "byval" aggregate 3736 // types to avoid forcing arguments to memory unnecessarily. 3737 if (GPR_idx != Num_GPR_Regs) { 3738 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3739 FuncInfo->addLiveInAttr(VReg, Flags); 3740 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3741 3742 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3743 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3744 // value to MVT::i64 and then truncate to the correct register size. 3745 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3746 } else { 3747 if (CallConv == CallingConv::Fast) 3748 ComputeArgOffset(); 3749 3750 needsLoad = true; 3751 ArgSize = PtrByteSize; 3752 } 3753 if (CallConv != CallingConv::Fast || needsLoad) 3754 ArgOffset += 8; 3755 break; 3756 3757 case MVT::f32: 3758 case MVT::f64: 3759 // These can be scalar arguments or elements of a float array type 3760 // passed directly. The latter are used to implement ELFv2 homogenous 3761 // float aggregates. 3762 if (FPR_idx != Num_FPR_Regs) { 3763 unsigned VReg; 3764 3765 if (ObjectVT == MVT::f32) 3766 VReg = MF.addLiveIn(FPR[FPR_idx], 3767 Subtarget.hasP8Vector() 3768 ? &PPC::VSSRCRegClass 3769 : &PPC::F4RCRegClass); 3770 else 3771 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3772 ? &PPC::VSFRCRegClass 3773 : &PPC::F8RCRegClass); 3774 3775 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3776 ++FPR_idx; 3777 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3778 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3779 // once we support fp <-> gpr moves. 3780 3781 // This can only ever happen in the presence of f32 array types, 3782 // since otherwise we never run out of FPRs before running out 3783 // of GPRs. 3784 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3785 FuncInfo->addLiveInAttr(VReg, Flags); 3786 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3787 3788 if (ObjectVT == MVT::f32) { 3789 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3790 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3791 DAG.getConstant(32, dl, MVT::i32)); 3792 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3793 } 3794 3795 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3796 } else { 3797 if (CallConv == CallingConv::Fast) 3798 ComputeArgOffset(); 3799 3800 needsLoad = true; 3801 } 3802 3803 // When passing an array of floats, the array occupies consecutive 3804 // space in the argument area; only round up to the next doubleword 3805 // at the end of the array. Otherwise, each float takes 8 bytes. 3806 if (CallConv != CallingConv::Fast || needsLoad) { 3807 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3808 ArgOffset += ArgSize; 3809 if (Flags.isInConsecutiveRegsLast()) 3810 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3811 } 3812 break; 3813 case MVT::v4f32: 3814 case MVT::v4i32: 3815 case MVT::v8i16: 3816 case MVT::v16i8: 3817 case MVT::v2f64: 3818 case MVT::v2i64: 3819 case MVT::v1i128: 3820 if (!Subtarget.hasQPX()) { 3821 // These can be scalar arguments or elements of a vector array type 3822 // passed directly. The latter are used to implement ELFv2 homogenous 3823 // vector aggregates. 3824 if (VR_idx != Num_VR_Regs) { 3825 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3826 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3827 ++VR_idx; 3828 } else { 3829 if (CallConv == CallingConv::Fast) 3830 ComputeArgOffset(); 3831 needsLoad = true; 3832 } 3833 if (CallConv != CallingConv::Fast || needsLoad) 3834 ArgOffset += 16; 3835 break; 3836 } // not QPX 3837 3838 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3839 "Invalid QPX parameter type"); 3840 /* fall through */ 3841 3842 case MVT::v4f64: 3843 case MVT::v4i1: 3844 // QPX vectors are treated like their scalar floating-point subregisters 3845 // (except that they're larger). 3846 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3847 if (QFPR_idx != Num_QFPR_Regs) { 3848 const TargetRegisterClass *RC; 3849 switch (ObjectVT.getSimpleVT().SimpleTy) { 3850 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3851 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3852 default: RC = &PPC::QBRCRegClass; break; 3853 } 3854 3855 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3856 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3857 ++QFPR_idx; 3858 } else { 3859 if (CallConv == CallingConv::Fast) 3860 ComputeArgOffset(); 3861 needsLoad = true; 3862 } 3863 if (CallConv != CallingConv::Fast || needsLoad) 3864 ArgOffset += Sz; 3865 break; 3866 } 3867 3868 // We need to load the argument to a virtual register if we determined 3869 // above that we ran out of physical registers of the appropriate type. 3870 if (needsLoad) { 3871 if (ObjSize < ArgSize && !isLittleEndian) 3872 CurArgOffset += ArgSize - ObjSize; 3873 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3874 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3875 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3876 } 3877 3878 InVals.push_back(ArgVal); 3879 } 3880 3881 // Area that is at least reserved in the caller of this function. 3882 unsigned MinReservedArea; 3883 if (HasParameterArea) 3884 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3885 else 3886 MinReservedArea = LinkageSize; 3887 3888 // Set the size that is at least reserved in caller of this function. Tail 3889 // call optimized functions' reserved stack space needs to be aligned so that 3890 // taking the difference between two stack areas will result in an aligned 3891 // stack. 3892 MinReservedArea = 3893 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3894 FuncInfo->setMinReservedArea(MinReservedArea); 3895 3896 // If the function takes variable number of arguments, make a frame index for 3897 // the start of the first vararg value... for expansion of llvm.va_start. 3898 if (isVarArg) { 3899 int Depth = ArgOffset; 3900 3901 FuncInfo->setVarArgsFrameIndex( 3902 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 3903 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3904 3905 // If this function is vararg, store any remaining integer argument regs 3906 // to their spots on the stack so that they may be loaded by dereferencing 3907 // the result of va_next. 3908 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3909 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3910 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3911 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3912 SDValue Store = 3913 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3914 MemOps.push_back(Store); 3915 // Increment the address by four for the next argument to store 3916 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3917 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3918 } 3919 } 3920 3921 if (!MemOps.empty()) 3922 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3923 3924 return Chain; 3925 } 3926 3927 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 3928 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3929 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3930 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3931 // TODO: add description of PPC stack frame format, or at least some docs. 3932 // 3933 MachineFunction &MF = DAG.getMachineFunction(); 3934 MachineFrameInfo &MFI = MF.getFrameInfo(); 3935 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3936 3937 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3938 bool isPPC64 = PtrVT == MVT::i64; 3939 // Potential tail calls could cause overwriting of argument stack slots. 3940 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3941 (CallConv == CallingConv::Fast)); 3942 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3943 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3944 unsigned ArgOffset = LinkageSize; 3945 // Area that is at least reserved in caller of this function. 3946 unsigned MinReservedArea = ArgOffset; 3947 3948 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3949 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3950 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3951 }; 3952 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3953 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3954 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3955 }; 3956 static const MCPhysReg VR[] = { 3957 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3958 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3959 }; 3960 3961 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3962 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3963 const unsigned Num_VR_Regs = array_lengthof( VR); 3964 3965 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3966 3967 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3968 3969 // In 32-bit non-varargs functions, the stack space for vectors is after the 3970 // stack space for non-vectors. We do not use this space unless we have 3971 // too many vectors to fit in registers, something that only occurs in 3972 // constructed examples:), but we have to walk the arglist to figure 3973 // that out...for the pathological case, compute VecArgOffset as the 3974 // start of the vector parameter area. Computing VecArgOffset is the 3975 // entire point of the following loop. 3976 unsigned VecArgOffset = ArgOffset; 3977 if (!isVarArg && !isPPC64) { 3978 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3979 ++ArgNo) { 3980 EVT ObjectVT = Ins[ArgNo].VT; 3981 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3982 3983 if (Flags.isByVal()) { 3984 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3985 unsigned ObjSize = Flags.getByValSize(); 3986 unsigned ArgSize = 3987 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3988 VecArgOffset += ArgSize; 3989 continue; 3990 } 3991 3992 switch(ObjectVT.getSimpleVT().SimpleTy) { 3993 default: llvm_unreachable("Unhandled argument type!"); 3994 case MVT::i1: 3995 case MVT::i32: 3996 case MVT::f32: 3997 VecArgOffset += 4; 3998 break; 3999 case MVT::i64: // PPC64 4000 case MVT::f64: 4001 // FIXME: We are guaranteed to be !isPPC64 at this point. 4002 // Does MVT::i64 apply? 4003 VecArgOffset += 8; 4004 break; 4005 case MVT::v4f32: 4006 case MVT::v4i32: 4007 case MVT::v8i16: 4008 case MVT::v16i8: 4009 // Nothing to do, we're only looking at Nonvector args here. 4010 break; 4011 } 4012 } 4013 } 4014 // We've found where the vector parameter area in memory is. Skip the 4015 // first 12 parameters; these don't use that memory. 4016 VecArgOffset = ((VecArgOffset+15)/16)*16; 4017 VecArgOffset += 12*16; 4018 4019 // Add DAG nodes to load the arguments or copy them out of registers. On 4020 // entry to a function on PPC, the arguments start after the linkage area, 4021 // although the first ones are often in registers. 4022 4023 SmallVector<SDValue, 8> MemOps; 4024 unsigned nAltivecParamsAtEnd = 0; 4025 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4026 unsigned CurArgIdx = 0; 4027 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4028 SDValue ArgVal; 4029 bool needsLoad = false; 4030 EVT ObjectVT = Ins[ArgNo].VT; 4031 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 4032 unsigned ArgSize = ObjSize; 4033 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4034 if (Ins[ArgNo].isOrigArg()) { 4035 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4036 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4037 } 4038 unsigned CurArgOffset = ArgOffset; 4039 4040 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4041 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4042 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4043 if (isVarArg || isPPC64) { 4044 MinReservedArea = ((MinReservedArea+15)/16)*16; 4045 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4046 Flags, 4047 PtrByteSize); 4048 } else nAltivecParamsAtEnd++; 4049 } else 4050 // Calculate min reserved area. 4051 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4052 Flags, 4053 PtrByteSize); 4054 4055 // FIXME the codegen can be much improved in some cases. 4056 // We do not have to keep everything in memory. 4057 if (Flags.isByVal()) { 4058 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4059 4060 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4061 ObjSize = Flags.getByValSize(); 4062 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4063 // Objects of size 1 and 2 are right justified, everything else is 4064 // left justified. This means the memory address is adjusted forwards. 4065 if (ObjSize==1 || ObjSize==2) { 4066 CurArgOffset = CurArgOffset + (4 - ObjSize); 4067 } 4068 // The value of the object is its address. 4069 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4070 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4071 InVals.push_back(FIN); 4072 if (ObjSize==1 || ObjSize==2) { 4073 if (GPR_idx != Num_GPR_Regs) { 4074 unsigned VReg; 4075 if (isPPC64) 4076 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4077 else 4078 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4079 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4080 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4081 SDValue Store = 4082 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4083 MachinePointerInfo(&*FuncArg), ObjType); 4084 MemOps.push_back(Store); 4085 ++GPR_idx; 4086 } 4087 4088 ArgOffset += PtrByteSize; 4089 4090 continue; 4091 } 4092 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4093 // Store whatever pieces of the object are in registers 4094 // to memory. ArgOffset will be the address of the beginning 4095 // of the object. 4096 if (GPR_idx != Num_GPR_Regs) { 4097 unsigned VReg; 4098 if (isPPC64) 4099 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4100 else 4101 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4102 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4103 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4104 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4105 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4106 MachinePointerInfo(&*FuncArg, j)); 4107 MemOps.push_back(Store); 4108 ++GPR_idx; 4109 ArgOffset += PtrByteSize; 4110 } else { 4111 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4112 break; 4113 } 4114 } 4115 continue; 4116 } 4117 4118 switch (ObjectVT.getSimpleVT().SimpleTy) { 4119 default: llvm_unreachable("Unhandled argument type!"); 4120 case MVT::i1: 4121 case MVT::i32: 4122 if (!isPPC64) { 4123 if (GPR_idx != Num_GPR_Regs) { 4124 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4125 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4126 4127 if (ObjectVT == MVT::i1) 4128 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4129 4130 ++GPR_idx; 4131 } else { 4132 needsLoad = true; 4133 ArgSize = PtrByteSize; 4134 } 4135 // All int arguments reserve stack space in the Darwin ABI. 4136 ArgOffset += PtrByteSize; 4137 break; 4138 } 4139 LLVM_FALLTHROUGH; 4140 case MVT::i64: // PPC64 4141 if (GPR_idx != Num_GPR_Regs) { 4142 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4143 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4144 4145 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4146 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4147 // value to MVT::i64 and then truncate to the correct register size. 4148 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4149 4150 ++GPR_idx; 4151 } else { 4152 needsLoad = true; 4153 ArgSize = PtrByteSize; 4154 } 4155 // All int arguments reserve stack space in the Darwin ABI. 4156 ArgOffset += 8; 4157 break; 4158 4159 case MVT::f32: 4160 case MVT::f64: 4161 // Every 4 bytes of argument space consumes one of the GPRs available for 4162 // argument passing. 4163 if (GPR_idx != Num_GPR_Regs) { 4164 ++GPR_idx; 4165 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4166 ++GPR_idx; 4167 } 4168 if (FPR_idx != Num_FPR_Regs) { 4169 unsigned VReg; 4170 4171 if (ObjectVT == MVT::f32) 4172 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4173 else 4174 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4175 4176 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4177 ++FPR_idx; 4178 } else { 4179 needsLoad = true; 4180 } 4181 4182 // All FP arguments reserve stack space in the Darwin ABI. 4183 ArgOffset += isPPC64 ? 8 : ObjSize; 4184 break; 4185 case MVT::v4f32: 4186 case MVT::v4i32: 4187 case MVT::v8i16: 4188 case MVT::v16i8: 4189 // Note that vector arguments in registers don't reserve stack space, 4190 // except in varargs functions. 4191 if (VR_idx != Num_VR_Regs) { 4192 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4193 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4194 if (isVarArg) { 4195 while ((ArgOffset % 16) != 0) { 4196 ArgOffset += PtrByteSize; 4197 if (GPR_idx != Num_GPR_Regs) 4198 GPR_idx++; 4199 } 4200 ArgOffset += 16; 4201 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4202 } 4203 ++VR_idx; 4204 } else { 4205 if (!isVarArg && !isPPC64) { 4206 // Vectors go after all the nonvectors. 4207 CurArgOffset = VecArgOffset; 4208 VecArgOffset += 16; 4209 } else { 4210 // Vectors are aligned. 4211 ArgOffset = ((ArgOffset+15)/16)*16; 4212 CurArgOffset = ArgOffset; 4213 ArgOffset += 16; 4214 } 4215 needsLoad = true; 4216 } 4217 break; 4218 } 4219 4220 // We need to load the argument to a virtual register if we determined above 4221 // that we ran out of physical registers of the appropriate type. 4222 if (needsLoad) { 4223 int FI = MFI.CreateFixedObject(ObjSize, 4224 CurArgOffset + (ArgSize - ObjSize), 4225 isImmutable); 4226 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4227 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4228 } 4229 4230 InVals.push_back(ArgVal); 4231 } 4232 4233 // Allow for Altivec parameters at the end, if needed. 4234 if (nAltivecParamsAtEnd) { 4235 MinReservedArea = ((MinReservedArea+15)/16)*16; 4236 MinReservedArea += 16*nAltivecParamsAtEnd; 4237 } 4238 4239 // Area that is at least reserved in the caller of this function. 4240 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4241 4242 // Set the size that is at least reserved in caller of this function. Tail 4243 // call optimized functions' reserved stack space needs to be aligned so that 4244 // taking the difference between two stack areas will result in an aligned 4245 // stack. 4246 MinReservedArea = 4247 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4248 FuncInfo->setMinReservedArea(MinReservedArea); 4249 4250 // If the function takes variable number of arguments, make a frame index for 4251 // the start of the first vararg value... for expansion of llvm.va_start. 4252 if (isVarArg) { 4253 int Depth = ArgOffset; 4254 4255 FuncInfo->setVarArgsFrameIndex( 4256 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4257 Depth, true)); 4258 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4259 4260 // If this function is vararg, store any remaining integer argument regs 4261 // to their spots on the stack so that they may be loaded by dereferencing 4262 // the result of va_next. 4263 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4264 unsigned VReg; 4265 4266 if (isPPC64) 4267 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4268 else 4269 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4270 4271 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4272 SDValue Store = 4273 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4274 MemOps.push_back(Store); 4275 // Increment the address by four for the next argument to store 4276 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4277 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4278 } 4279 } 4280 4281 if (!MemOps.empty()) 4282 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4283 4284 return Chain; 4285 } 4286 4287 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4288 /// adjusted to accommodate the arguments for the tailcall. 4289 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4290 unsigned ParamSize) { 4291 4292 if (!isTailCall) return 0; 4293 4294 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4295 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4296 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4297 // Remember only if the new adjustment is bigger. 4298 if (SPDiff < FI->getTailCallSPDelta()) 4299 FI->setTailCallSPDelta(SPDiff); 4300 4301 return SPDiff; 4302 } 4303 4304 static bool isFunctionGlobalAddress(SDValue Callee); 4305 4306 static bool 4307 callsShareTOCBase(const Function *Caller, SDValue Callee, 4308 const TargetMachine &TM) { 4309 // If !G, Callee can be an external symbol. 4310 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4311 if (!G) 4312 return false; 4313 4314 // The medium and large code models are expected to provide a sufficiently 4315 // large TOC to provide all data addressing needs of a module with a 4316 // single TOC. Since each module will be addressed with a single TOC then we 4317 // only need to check that caller and callee don't cross dso boundaries. 4318 if (CodeModel::Medium == TM.getCodeModel() || 4319 CodeModel::Large == TM.getCodeModel()) 4320 return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal()); 4321 4322 // Otherwise we need to ensure callee and caller are in the same section, 4323 // since the linker may allocate multiple TOCs, and we don't know which 4324 // sections will belong to the same TOC base. 4325 4326 const GlobalValue *GV = G->getGlobal(); 4327 if (!GV->isStrongDefinitionForLinker()) 4328 return false; 4329 4330 // Any explicitly-specified sections and section prefixes must also match. 4331 // Also, if we're using -ffunction-sections, then each function is always in 4332 // a different section (the same is true for COMDAT functions). 4333 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4334 GV->getSection() != Caller->getSection()) 4335 return false; 4336 if (const auto *F = dyn_cast<Function>(GV)) { 4337 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4338 return false; 4339 } 4340 4341 // If the callee might be interposed, then we can't assume the ultimate call 4342 // target will be in the same section. Even in cases where we can assume that 4343 // interposition won't happen, in any case where the linker might insert a 4344 // stub to allow for interposition, we must generate code as though 4345 // interposition might occur. To understand why this matters, consider a 4346 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4347 // in the same section, but a is in a different module (i.e. has a different 4348 // TOC base pointer). If the linker allows for interposition between b and c, 4349 // then it will generate a stub for the call edge between b and c which will 4350 // save the TOC pointer into the designated stack slot allocated by b. If we 4351 // return true here, and therefore allow a tail call between b and c, that 4352 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4353 // pointer into the stack slot allocated by a (where the a -> b stub saved 4354 // a's TOC base pointer). If we're not considering a tail call, but rather, 4355 // whether a nop is needed after the call instruction in b, because the linker 4356 // will insert a stub, it might complain about a missing nop if we omit it 4357 // (although many don't complain in this case). 4358 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4359 return false; 4360 4361 return true; 4362 } 4363 4364 static bool 4365 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4366 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4367 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 4368 4369 const unsigned PtrByteSize = 8; 4370 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4371 4372 static const MCPhysReg GPR[] = { 4373 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4374 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4375 }; 4376 static const MCPhysReg VR[] = { 4377 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4378 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4379 }; 4380 4381 const unsigned NumGPRs = array_lengthof(GPR); 4382 const unsigned NumFPRs = 13; 4383 const unsigned NumVRs = array_lengthof(VR); 4384 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4385 4386 unsigned NumBytes = LinkageSize; 4387 unsigned AvailableFPRs = NumFPRs; 4388 unsigned AvailableVRs = NumVRs; 4389 4390 for (const ISD::OutputArg& Param : Outs) { 4391 if (Param.Flags.isNest()) continue; 4392 4393 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4394 PtrByteSize, LinkageSize, ParamAreaSize, 4395 NumBytes, AvailableFPRs, AvailableVRs, 4396 Subtarget.hasQPX())) 4397 return true; 4398 } 4399 return false; 4400 } 4401 4402 static bool 4403 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4404 if (CS.arg_size() != CallerFn->arg_size()) 4405 return false; 4406 4407 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4408 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4409 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4410 4411 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4412 const Value* CalleeArg = *CalleeArgIter; 4413 const Value* CallerArg = &(*CallerArgIter); 4414 if (CalleeArg == CallerArg) 4415 continue; 4416 4417 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4418 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4419 // } 4420 // 1st argument of callee is undef and has the same type as caller. 4421 if (CalleeArg->getType() == CallerArg->getType() && 4422 isa<UndefValue>(CalleeArg)) 4423 continue; 4424 4425 return false; 4426 } 4427 4428 return true; 4429 } 4430 4431 // Returns true if TCO is possible between the callers and callees 4432 // calling conventions. 4433 static bool 4434 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4435 CallingConv::ID CalleeCC) { 4436 // Tail calls are possible with fastcc and ccc. 4437 auto isTailCallableCC = [] (CallingConv::ID CC){ 4438 return CC == CallingConv::C || CC == CallingConv::Fast; 4439 }; 4440 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4441 return false; 4442 4443 // We can safely tail call both fastcc and ccc callees from a c calling 4444 // convention caller. If the caller is fastcc, we may have less stack space 4445 // than a non-fastcc caller with the same signature so disable tail-calls in 4446 // that case. 4447 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4448 } 4449 4450 bool 4451 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4452 SDValue Callee, 4453 CallingConv::ID CalleeCC, 4454 ImmutableCallSite CS, 4455 bool isVarArg, 4456 const SmallVectorImpl<ISD::OutputArg> &Outs, 4457 const SmallVectorImpl<ISD::InputArg> &Ins, 4458 SelectionDAG& DAG) const { 4459 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4460 4461 if (DisableSCO && !TailCallOpt) return false; 4462 4463 // Variadic argument functions are not supported. 4464 if (isVarArg) return false; 4465 4466 auto &Caller = DAG.getMachineFunction().getFunction(); 4467 // Check that the calling conventions are compatible for tco. 4468 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4469 return false; 4470 4471 // Caller contains any byval parameter is not supported. 4472 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4473 return false; 4474 4475 // Callee contains any byval parameter is not supported, too. 4476 // Note: This is a quick work around, because in some cases, e.g. 4477 // caller's stack size > callee's stack size, we are still able to apply 4478 // sibling call optimization. For example, gcc is able to do SCO for caller1 4479 // in the following example, but not for caller2. 4480 // struct test { 4481 // long int a; 4482 // char ary[56]; 4483 // } gTest; 4484 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4485 // b->a = v.a; 4486 // return 0; 4487 // } 4488 // void caller1(struct test a, struct test c, struct test *b) { 4489 // callee(gTest, b); } 4490 // void caller2(struct test *b) { callee(gTest, b); } 4491 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4492 return false; 4493 4494 // If callee and caller use different calling conventions, we cannot pass 4495 // parameters on stack since offsets for the parameter area may be different. 4496 if (Caller.getCallingConv() != CalleeCC && 4497 needStackSlotPassParameters(Subtarget, Outs)) 4498 return false; 4499 4500 // No TCO/SCO on indirect call because Caller have to restore its TOC 4501 if (!isFunctionGlobalAddress(Callee) && 4502 !isa<ExternalSymbolSDNode>(Callee)) 4503 return false; 4504 4505 // If the caller and callee potentially have different TOC bases then we 4506 // cannot tail call since we need to restore the TOC pointer after the call. 4507 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4508 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4509 return false; 4510 4511 // TCO allows altering callee ABI, so we don't have to check further. 4512 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4513 return true; 4514 4515 if (DisableSCO) return false; 4516 4517 // If callee use the same argument list that caller is using, then we can 4518 // apply SCO on this case. If it is not, then we need to check if callee needs 4519 // stack for passing arguments. 4520 if (!hasSameArgumentList(&Caller, CS) && 4521 needStackSlotPassParameters(Subtarget, Outs)) { 4522 return false; 4523 } 4524 4525 return true; 4526 } 4527 4528 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4529 /// for tail call optimization. Targets which want to do tail call 4530 /// optimization should implement this function. 4531 bool 4532 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4533 CallingConv::ID CalleeCC, 4534 bool isVarArg, 4535 const SmallVectorImpl<ISD::InputArg> &Ins, 4536 SelectionDAG& DAG) const { 4537 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4538 return false; 4539 4540 // Variable argument functions are not supported. 4541 if (isVarArg) 4542 return false; 4543 4544 MachineFunction &MF = DAG.getMachineFunction(); 4545 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4546 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4547 // Functions containing by val parameters are not supported. 4548 for (unsigned i = 0; i != Ins.size(); i++) { 4549 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4550 if (Flags.isByVal()) return false; 4551 } 4552 4553 // Non-PIC/GOT tail calls are supported. 4554 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4555 return true; 4556 4557 // At the moment we can only do local tail calls (in same module, hidden 4558 // or protected) if we are generating PIC. 4559 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4560 return G->getGlobal()->hasHiddenVisibility() 4561 || G->getGlobal()->hasProtectedVisibility(); 4562 } 4563 4564 return false; 4565 } 4566 4567 /// isCallCompatibleAddress - Return the immediate to use if the specified 4568 /// 32-bit value is representable in the immediate field of a BxA instruction. 4569 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4570 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4571 if (!C) return nullptr; 4572 4573 int Addr = C->getZExtValue(); 4574 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4575 SignExtend32<26>(Addr) != Addr) 4576 return nullptr; // Top 6 bits have to be sext of immediate. 4577 4578 return DAG 4579 .getConstant( 4580 (int)C->getZExtValue() >> 2, SDLoc(Op), 4581 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4582 .getNode(); 4583 } 4584 4585 namespace { 4586 4587 struct TailCallArgumentInfo { 4588 SDValue Arg; 4589 SDValue FrameIdxOp; 4590 int FrameIdx = 0; 4591 4592 TailCallArgumentInfo() = default; 4593 }; 4594 4595 } // end anonymous namespace 4596 4597 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4598 static void StoreTailCallArgumentsToStackSlot( 4599 SelectionDAG &DAG, SDValue Chain, 4600 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4601 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4602 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4603 SDValue Arg = TailCallArgs[i].Arg; 4604 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4605 int FI = TailCallArgs[i].FrameIdx; 4606 // Store relative to framepointer. 4607 MemOpChains.push_back(DAG.getStore( 4608 Chain, dl, Arg, FIN, 4609 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4610 } 4611 } 4612 4613 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4614 /// the appropriate stack slot for the tail call optimized function call. 4615 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4616 SDValue OldRetAddr, SDValue OldFP, 4617 int SPDiff, const SDLoc &dl) { 4618 if (SPDiff) { 4619 // Calculate the new stack slot for the return address. 4620 MachineFunction &MF = DAG.getMachineFunction(); 4621 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4622 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4623 bool isPPC64 = Subtarget.isPPC64(); 4624 int SlotSize = isPPC64 ? 8 : 4; 4625 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4626 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4627 NewRetAddrLoc, true); 4628 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4629 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4630 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4631 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4632 4633 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4634 // slot as the FP is never overwritten. 4635 if (Subtarget.isDarwinABI()) { 4636 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4637 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4638 true); 4639 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4640 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4641 MachinePointerInfo::getFixedStack( 4642 DAG.getMachineFunction(), NewFPIdx)); 4643 } 4644 } 4645 return Chain; 4646 } 4647 4648 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4649 /// the position of the argument. 4650 static void 4651 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4652 SDValue Arg, int SPDiff, unsigned ArgOffset, 4653 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4654 int Offset = ArgOffset + SPDiff; 4655 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4656 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4657 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4658 SDValue FIN = DAG.getFrameIndex(FI, VT); 4659 TailCallArgumentInfo Info; 4660 Info.Arg = Arg; 4661 Info.FrameIdxOp = FIN; 4662 Info.FrameIdx = FI; 4663 TailCallArguments.push_back(Info); 4664 } 4665 4666 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4667 /// stack slot. Returns the chain as result and the loaded frame pointers in 4668 /// LROpOut/FPOpout. Used when tail calling. 4669 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4670 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4671 SDValue &FPOpOut, const SDLoc &dl) const { 4672 if (SPDiff) { 4673 // Load the LR and FP stack slot for later adjusting. 4674 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4675 LROpOut = getReturnAddrFrameIndex(DAG); 4676 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4677 Chain = SDValue(LROpOut.getNode(), 1); 4678 4679 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4680 // slot as the FP is never overwritten. 4681 if (Subtarget.isDarwinABI()) { 4682 FPOpOut = getFramePointerFrameIndex(DAG); 4683 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4684 Chain = SDValue(FPOpOut.getNode(), 1); 4685 } 4686 } 4687 return Chain; 4688 } 4689 4690 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4691 /// by "Src" to address "Dst" of size "Size". Alignment information is 4692 /// specified by the specific parameter attribute. The copy will be passed as 4693 /// a byval function parameter. 4694 /// Sometimes what we are copying is the end of a larger object, the part that 4695 /// does not fit in registers. 4696 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4697 SDValue Chain, ISD::ArgFlagsTy Flags, 4698 SelectionDAG &DAG, const SDLoc &dl) { 4699 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4700 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4701 false, false, false, MachinePointerInfo(), 4702 MachinePointerInfo()); 4703 } 4704 4705 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4706 /// tail calls. 4707 static void LowerMemOpCallTo( 4708 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4709 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4710 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4711 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4712 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4713 if (!isTailCall) { 4714 if (isVector) { 4715 SDValue StackPtr; 4716 if (isPPC64) 4717 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4718 else 4719 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4720 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4721 DAG.getConstant(ArgOffset, dl, PtrVT)); 4722 } 4723 MemOpChains.push_back( 4724 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4725 // Calculate and remember argument location. 4726 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4727 TailCallArguments); 4728 } 4729 4730 static void 4731 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4732 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4733 SDValue FPOp, 4734 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4735 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4736 // might overwrite each other in case of tail call optimization. 4737 SmallVector<SDValue, 8> MemOpChains2; 4738 // Do not flag preceding copytoreg stuff together with the following stuff. 4739 InFlag = SDValue(); 4740 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4741 MemOpChains2, dl); 4742 if (!MemOpChains2.empty()) 4743 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4744 4745 // Store the return address to the appropriate stack slot. 4746 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4747 4748 // Emit callseq_end just before tailcall node. 4749 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4750 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4751 InFlag = Chain.getValue(1); 4752 } 4753 4754 // Is this global address that of a function that can be called by name? (as 4755 // opposed to something that must hold a descriptor for an indirect call). 4756 static bool isFunctionGlobalAddress(SDValue Callee) { 4757 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4758 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4759 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4760 return false; 4761 4762 return G->getGlobal()->getValueType()->isFunctionTy(); 4763 } 4764 4765 return false; 4766 } 4767 4768 static unsigned 4769 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4770 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4771 bool isPatchPoint, bool hasNest, 4772 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4773 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4774 ImmutableCallSite CS, const PPCSubtarget &Subtarget) { 4775 bool isPPC64 = Subtarget.isPPC64(); 4776 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4777 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4778 4779 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4780 NodeTys.push_back(MVT::Other); // Returns a chain 4781 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4782 4783 unsigned CallOpc = PPCISD::CALL; 4784 4785 bool needIndirectCall = true; 4786 if (!isSVR4ABI || !isPPC64) 4787 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4788 // If this is an absolute destination address, use the munged value. 4789 Callee = SDValue(Dest, 0); 4790 needIndirectCall = false; 4791 } 4792 4793 // PC-relative references to external symbols should go through $stub, unless 4794 // we're building with the leopard linker or later, which automatically 4795 // synthesizes these stubs. 4796 const TargetMachine &TM = DAG.getTarget(); 4797 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 4798 const GlobalValue *GV = nullptr; 4799 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4800 GV = G->getGlobal(); 4801 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4802 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4803 4804 if (isFunctionGlobalAddress(Callee)) { 4805 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4806 // A call to a TLS address is actually an indirect call to a 4807 // thread-specific pointer. 4808 unsigned OpFlags = 0; 4809 if (UsePlt) 4810 OpFlags = PPCII::MO_PLT; 4811 4812 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4813 // every direct call is) turn it into a TargetGlobalAddress / 4814 // TargetExternalSymbol node so that legalize doesn't hack it. 4815 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4816 Callee.getValueType(), 0, OpFlags); 4817 needIndirectCall = false; 4818 } 4819 4820 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4821 unsigned char OpFlags = 0; 4822 4823 if (UsePlt) 4824 OpFlags = PPCII::MO_PLT; 4825 4826 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4827 OpFlags); 4828 needIndirectCall = false; 4829 } 4830 4831 if (isPatchPoint) { 4832 // We'll form an invalid direct call when lowering a patchpoint; the full 4833 // sequence for an indirect call is complicated, and many of the 4834 // instructions introduced might have side effects (and, thus, can't be 4835 // removed later). The call itself will be removed as soon as the 4836 // argument/return lowering is complete, so the fact that it has the wrong 4837 // kind of operands should not really matter. 4838 needIndirectCall = false; 4839 } 4840 4841 if (needIndirectCall) { 4842 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4843 // to do the call, we can't use PPCISD::CALL. 4844 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4845 4846 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4847 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4848 // entry point, but to the function descriptor (the function entry point 4849 // address is part of the function descriptor though). 4850 // The function descriptor is a three doubleword structure with the 4851 // following fields: function entry point, TOC base address and 4852 // environment pointer. 4853 // Thus for a call through a function pointer, the following actions need 4854 // to be performed: 4855 // 1. Save the TOC of the caller in the TOC save area of its stack 4856 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4857 // 2. Load the address of the function entry point from the function 4858 // descriptor. 4859 // 3. Load the TOC of the callee from the function descriptor into r2. 4860 // 4. Load the environment pointer from the function descriptor into 4861 // r11. 4862 // 5. Branch to the function entry point address. 4863 // 6. On return of the callee, the TOC of the caller needs to be 4864 // restored (this is done in FinishCall()). 4865 // 4866 // The loads are scheduled at the beginning of the call sequence, and the 4867 // register copies are flagged together to ensure that no other 4868 // operations can be scheduled in between. E.g. without flagging the 4869 // copies together, a TOC access in the caller could be scheduled between 4870 // the assignment of the callee TOC and the branch to the callee, which 4871 // results in the TOC access going through the TOC of the callee instead 4872 // of going through the TOC of the caller, which leads to incorrect code. 4873 4874 // Load the address of the function entry point from the function 4875 // descriptor. 4876 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4877 if (LDChain.getValueType() == MVT::Glue) 4878 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4879 4880 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 4881 ? (MachineMemOperand::MODereferenceable | 4882 MachineMemOperand::MOInvariant) 4883 : MachineMemOperand::MONone; 4884 4885 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 4886 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4887 /* Alignment = */ 8, MMOFlags); 4888 4889 // Load environment pointer into r11. 4890 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4891 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4892 SDValue LoadEnvPtr = 4893 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 4894 /* Alignment = */ 8, MMOFlags); 4895 4896 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4897 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4898 SDValue TOCPtr = 4899 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 4900 /* Alignment = */ 8, MMOFlags); 4901 4902 setUsesTOCBasePtr(DAG); 4903 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4904 InFlag); 4905 Chain = TOCVal.getValue(0); 4906 InFlag = TOCVal.getValue(1); 4907 4908 // If the function call has an explicit 'nest' parameter, it takes the 4909 // place of the environment pointer. 4910 if (!hasNest) { 4911 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4912 InFlag); 4913 4914 Chain = EnvVal.getValue(0); 4915 InFlag = EnvVal.getValue(1); 4916 } 4917 4918 MTCTROps[0] = Chain; 4919 MTCTROps[1] = LoadFuncPtr; 4920 MTCTROps[2] = InFlag; 4921 } 4922 4923 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4924 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4925 InFlag = Chain.getValue(1); 4926 4927 NodeTys.clear(); 4928 NodeTys.push_back(MVT::Other); 4929 NodeTys.push_back(MVT::Glue); 4930 Ops.push_back(Chain); 4931 CallOpc = PPCISD::BCTRL; 4932 Callee.setNode(nullptr); 4933 // Add use of X11 (holding environment pointer) 4934 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4935 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4936 // Add CTR register as callee so a bctr can be emitted later. 4937 if (isTailCall) 4938 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4939 } 4940 4941 // If this is a direct call, pass the chain and the callee. 4942 if (Callee.getNode()) { 4943 Ops.push_back(Chain); 4944 Ops.push_back(Callee); 4945 } 4946 // If this is a tail call add stack pointer delta. 4947 if (isTailCall) 4948 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4949 4950 // Add argument registers to the end of the list so that they are known live 4951 // into the call. 4952 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4953 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4954 RegsToPass[i].second.getValueType())); 4955 4956 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4957 // into the call. 4958 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 4959 setUsesTOCBasePtr(DAG); 4960 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4961 } 4962 4963 return CallOpc; 4964 } 4965 4966 SDValue PPCTargetLowering::LowerCallResult( 4967 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4968 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4969 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4970 SmallVector<CCValAssign, 16> RVLocs; 4971 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4972 *DAG.getContext()); 4973 4974 CCRetInfo.AnalyzeCallResult( 4975 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 4976 ? RetCC_PPC_Cold 4977 : RetCC_PPC); 4978 4979 // Copy all of the result registers out of their specified physreg. 4980 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4981 CCValAssign &VA = RVLocs[i]; 4982 assert(VA.isRegLoc() && "Can only return in registers!"); 4983 4984 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4985 VA.getLocReg(), VA.getLocVT(), InFlag); 4986 Chain = Val.getValue(1); 4987 InFlag = Val.getValue(2); 4988 4989 switch (VA.getLocInfo()) { 4990 default: llvm_unreachable("Unknown loc info!"); 4991 case CCValAssign::Full: break; 4992 case CCValAssign::AExt: 4993 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4994 break; 4995 case CCValAssign::ZExt: 4996 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4997 DAG.getValueType(VA.getValVT())); 4998 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4999 break; 5000 case CCValAssign::SExt: 5001 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 5002 DAG.getValueType(VA.getValVT())); 5003 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5004 break; 5005 } 5006 5007 InVals.push_back(Val); 5008 } 5009 5010 return Chain; 5011 } 5012 5013 SDValue PPCTargetLowering::FinishCall( 5014 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 5015 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 5016 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 5017 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5018 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5019 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 5020 std::vector<EVT> NodeTys; 5021 SmallVector<SDValue, 8> Ops; 5022 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 5023 SPDiff, isTailCall, isPatchPoint, hasNest, 5024 RegsToPass, Ops, NodeTys, CS, Subtarget); 5025 5026 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5027 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 5028 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5029 5030 // When performing tail call optimization the callee pops its arguments off 5031 // the stack. Account for this here so these bytes can be pushed back on in 5032 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5033 int BytesCalleePops = 5034 (CallConv == CallingConv::Fast && 5035 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 5036 5037 // Add a register mask operand representing the call-preserved registers. 5038 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5039 const uint32_t *Mask = 5040 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 5041 assert(Mask && "Missing call preserved mask for calling convention"); 5042 Ops.push_back(DAG.getRegisterMask(Mask)); 5043 5044 if (InFlag.getNode()) 5045 Ops.push_back(InFlag); 5046 5047 // Emit tail call. 5048 if (isTailCall) { 5049 assert(((Callee.getOpcode() == ISD::Register && 5050 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5051 Callee.getOpcode() == ISD::TargetExternalSymbol || 5052 Callee.getOpcode() == ISD::TargetGlobalAddress || 5053 isa<ConstantSDNode>(Callee)) && 5054 "Expecting an global address, external symbol, absolute value or register"); 5055 5056 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5057 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 5058 } 5059 5060 // Add a NOP immediately after the branch instruction when using the 64-bit 5061 // SVR4 ABI. At link time, if caller and callee are in a different module and 5062 // thus have a different TOC, the call will be replaced with a call to a stub 5063 // function which saves the current TOC, loads the TOC of the callee and 5064 // branches to the callee. The NOP will be replaced with a load instruction 5065 // which restores the TOC of the caller from the TOC save slot of the current 5066 // stack frame. If caller and callee belong to the same module (and have the 5067 // same TOC), the NOP will remain unchanged. 5068 5069 MachineFunction &MF = DAG.getMachineFunction(); 5070 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 5071 !isPatchPoint) { 5072 if (CallOpc == PPCISD::BCTRL) { 5073 // This is a call through a function pointer. 5074 // Restore the caller TOC from the save area into R2. 5075 // See PrepareCall() for more information about calls through function 5076 // pointers in the 64-bit SVR4 ABI. 5077 // We are using a target-specific load with r2 hard coded, because the 5078 // result of a target-independent load would never go directly into r2, 5079 // since r2 is a reserved register (which prevents the register allocator 5080 // from allocating it), resulting in an additional register being 5081 // allocated and an unnecessary move instruction being generated. 5082 CallOpc = PPCISD::BCTRL_LOAD_TOC; 5083 5084 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5085 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 5086 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5087 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5088 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 5089 5090 // The address needs to go after the chain input but before the flag (or 5091 // any other variadic arguments). 5092 Ops.insert(std::next(Ops.begin()), AddTOC); 5093 } else if (CallOpc == PPCISD::CALL && 5094 !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) { 5095 // Otherwise insert NOP for non-local calls. 5096 CallOpc = PPCISD::CALL_NOP; 5097 } 5098 } 5099 5100 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 5101 InFlag = Chain.getValue(1); 5102 5103 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5104 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5105 InFlag, dl); 5106 if (!Ins.empty()) 5107 InFlag = Chain.getValue(1); 5108 5109 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 5110 Ins, dl, DAG, InVals); 5111 } 5112 5113 SDValue 5114 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5115 SmallVectorImpl<SDValue> &InVals) const { 5116 SelectionDAG &DAG = CLI.DAG; 5117 SDLoc &dl = CLI.DL; 5118 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5119 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5120 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5121 SDValue Chain = CLI.Chain; 5122 SDValue Callee = CLI.Callee; 5123 bool &isTailCall = CLI.IsTailCall; 5124 CallingConv::ID CallConv = CLI.CallConv; 5125 bool isVarArg = CLI.IsVarArg; 5126 bool isPatchPoint = CLI.IsPatchPoint; 5127 ImmutableCallSite CS = CLI.CS; 5128 5129 if (isTailCall) { 5130 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5131 isTailCall = false; 5132 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5133 isTailCall = 5134 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5135 isVarArg, Outs, Ins, DAG); 5136 else 5137 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5138 Ins, DAG); 5139 if (isTailCall) { 5140 ++NumTailCalls; 5141 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5142 ++NumSiblingCalls; 5143 5144 assert(isa<GlobalAddressSDNode>(Callee) && 5145 "Callee should be an llvm::Function object."); 5146 LLVM_DEBUG( 5147 const GlobalValue *GV = 5148 cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5149 const unsigned Width = 5150 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0"); 5151 dbgs() << "TCO caller: " 5152 << left_justify(DAG.getMachineFunction().getName(), Width) 5153 << ", callee linkage: " << GV->getVisibility() << ", " 5154 << GV->getLinkage() << "\n"); 5155 } 5156 } 5157 5158 if (!isTailCall && CS && CS.isMustTailCall()) 5159 report_fatal_error("failed to perform tail call elimination on a call " 5160 "site marked musttail"); 5161 5162 // When long calls (i.e. indirect calls) are always used, calls are always 5163 // made via function pointer. If we have a function name, first translate it 5164 // into a pointer. 5165 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5166 !isTailCall) 5167 Callee = LowerGlobalAddress(Callee, DAG); 5168 5169 if (Subtarget.isSVR4ABI()) { 5170 if (Subtarget.isPPC64()) 5171 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5172 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5173 dl, DAG, InVals, CS); 5174 else 5175 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5176 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5177 dl, DAG, InVals, CS); 5178 } 5179 5180 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5181 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5182 dl, DAG, InVals, CS); 5183 } 5184 5185 SDValue PPCTargetLowering::LowerCall_32SVR4( 5186 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5187 bool isTailCall, bool isPatchPoint, 5188 const SmallVectorImpl<ISD::OutputArg> &Outs, 5189 const SmallVectorImpl<SDValue> &OutVals, 5190 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5191 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5192 ImmutableCallSite CS) const { 5193 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5194 // of the 32-bit SVR4 ABI stack frame layout. 5195 5196 assert((CallConv == CallingConv::C || 5197 CallConv == CallingConv::Cold || 5198 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5199 5200 unsigned PtrByteSize = 4; 5201 5202 MachineFunction &MF = DAG.getMachineFunction(); 5203 5204 // Mark this function as potentially containing a function that contains a 5205 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5206 // and restoring the callers stack pointer in this functions epilog. This is 5207 // done because by tail calling the called function might overwrite the value 5208 // in this function's (MF) stack pointer stack slot 0(SP). 5209 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5210 CallConv == CallingConv::Fast) 5211 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5212 5213 // Count how many bytes are to be pushed on the stack, including the linkage 5214 // area, parameter list area and the part of the local variable space which 5215 // contains copies of aggregates which are passed by value. 5216 5217 // Assign locations to all of the outgoing arguments. 5218 SmallVector<CCValAssign, 16> ArgLocs; 5219 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5220 5221 // Reserve space for the linkage area on the stack. 5222 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5223 PtrByteSize); 5224 if (useSoftFloat()) 5225 CCInfo.PreAnalyzeCallOperands(Outs); 5226 5227 if (isVarArg) { 5228 // Handle fixed and variable vector arguments differently. 5229 // Fixed vector arguments go into registers as long as registers are 5230 // available. Variable vector arguments always go into memory. 5231 unsigned NumArgs = Outs.size(); 5232 5233 for (unsigned i = 0; i != NumArgs; ++i) { 5234 MVT ArgVT = Outs[i].VT; 5235 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5236 bool Result; 5237 5238 if (Outs[i].IsFixed) { 5239 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5240 CCInfo); 5241 } else { 5242 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5243 ArgFlags, CCInfo); 5244 } 5245 5246 if (Result) { 5247 #ifndef NDEBUG 5248 errs() << "Call operand #" << i << " has unhandled type " 5249 << EVT(ArgVT).getEVTString() << "\n"; 5250 #endif 5251 llvm_unreachable(nullptr); 5252 } 5253 } 5254 } else { 5255 // All arguments are treated the same. 5256 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5257 } 5258 CCInfo.clearWasPPCF128(); 5259 5260 // Assign locations to all of the outgoing aggregate by value arguments. 5261 SmallVector<CCValAssign, 16> ByValArgLocs; 5262 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5263 5264 // Reserve stack space for the allocations in CCInfo. 5265 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5266 5267 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5268 5269 // Size of the linkage area, parameter list area and the part of the local 5270 // space variable where copies of aggregates which are passed by value are 5271 // stored. 5272 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5273 5274 // Calculate by how many bytes the stack has to be adjusted in case of tail 5275 // call optimization. 5276 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5277 5278 // Adjust the stack pointer for the new arguments... 5279 // These operations are automatically eliminated by the prolog/epilog pass 5280 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5281 SDValue CallSeqStart = Chain; 5282 5283 // Load the return address and frame pointer so it can be moved somewhere else 5284 // later. 5285 SDValue LROp, FPOp; 5286 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5287 5288 // Set up a copy of the stack pointer for use loading and storing any 5289 // arguments that may not fit in the registers available for argument 5290 // passing. 5291 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5292 5293 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5294 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5295 SmallVector<SDValue, 8> MemOpChains; 5296 5297 bool seenFloatArg = false; 5298 // Walk the register/memloc assignments, inserting copies/loads. 5299 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 5300 i != e; 5301 ++i) { 5302 CCValAssign &VA = ArgLocs[i]; 5303 SDValue Arg = OutVals[i]; 5304 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5305 5306 if (Flags.isByVal()) { 5307 // Argument is an aggregate which is passed by value, thus we need to 5308 // create a copy of it in the local variable space of the current stack 5309 // frame (which is the stack frame of the caller) and pass the address of 5310 // this copy to the callee. 5311 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5312 CCValAssign &ByValVA = ByValArgLocs[j++]; 5313 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5314 5315 // Memory reserved in the local variable space of the callers stack frame. 5316 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5317 5318 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5319 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5320 StackPtr, PtrOff); 5321 5322 // Create a copy of the argument in the local area of the current 5323 // stack frame. 5324 SDValue MemcpyCall = 5325 CreateCopyOfByValArgument(Arg, PtrOff, 5326 CallSeqStart.getNode()->getOperand(0), 5327 Flags, DAG, dl); 5328 5329 // This must go outside the CALLSEQ_START..END. 5330 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5331 SDLoc(MemcpyCall)); 5332 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5333 NewCallSeqStart.getNode()); 5334 Chain = CallSeqStart = NewCallSeqStart; 5335 5336 // Pass the address of the aggregate copy on the stack either in a 5337 // physical register or in the parameter list area of the current stack 5338 // frame to the callee. 5339 Arg = PtrOff; 5340 } 5341 5342 if (VA.isRegLoc()) { 5343 if (Arg.getValueType() == MVT::i1) 5344 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 5345 5346 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5347 // Put argument in a physical register. 5348 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5349 } else { 5350 // Put argument in the parameter list area of the current stack frame. 5351 assert(VA.isMemLoc()); 5352 unsigned LocMemOffset = VA.getLocMemOffset(); 5353 5354 if (!isTailCall) { 5355 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5356 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5357 StackPtr, PtrOff); 5358 5359 MemOpChains.push_back( 5360 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5361 } else { 5362 // Calculate and remember argument location. 5363 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5364 TailCallArguments); 5365 } 5366 } 5367 } 5368 5369 if (!MemOpChains.empty()) 5370 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5371 5372 // Build a sequence of copy-to-reg nodes chained together with token chain 5373 // and flag operands which copy the outgoing args into the appropriate regs. 5374 SDValue InFlag; 5375 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5376 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5377 RegsToPass[i].second, InFlag); 5378 InFlag = Chain.getValue(1); 5379 } 5380 5381 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5382 // registers. 5383 if (isVarArg) { 5384 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5385 SDValue Ops[] = { Chain, InFlag }; 5386 5387 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5388 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5389 5390 InFlag = Chain.getValue(1); 5391 } 5392 5393 if (isTailCall) 5394 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5395 TailCallArguments); 5396 5397 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5398 /* unused except on PPC64 ELFv1 */ false, DAG, 5399 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5400 NumBytes, Ins, InVals, CS); 5401 } 5402 5403 // Copy an argument into memory, being careful to do this outside the 5404 // call sequence for the call to which the argument belongs. 5405 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5406 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5407 SelectionDAG &DAG, const SDLoc &dl) const { 5408 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5409 CallSeqStart.getNode()->getOperand(0), 5410 Flags, DAG, dl); 5411 // The MEMCPY must go outside the CALLSEQ_START..END. 5412 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5413 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5414 SDLoc(MemcpyCall)); 5415 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5416 NewCallSeqStart.getNode()); 5417 return NewCallSeqStart; 5418 } 5419 5420 SDValue PPCTargetLowering::LowerCall_64SVR4( 5421 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5422 bool isTailCall, bool isPatchPoint, 5423 const SmallVectorImpl<ISD::OutputArg> &Outs, 5424 const SmallVectorImpl<SDValue> &OutVals, 5425 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5426 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5427 ImmutableCallSite CS) const { 5428 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5429 bool isLittleEndian = Subtarget.isLittleEndian(); 5430 unsigned NumOps = Outs.size(); 5431 bool hasNest = false; 5432 bool IsSibCall = false; 5433 5434 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5435 unsigned PtrByteSize = 8; 5436 5437 MachineFunction &MF = DAG.getMachineFunction(); 5438 5439 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5440 IsSibCall = true; 5441 5442 // Mark this function as potentially containing a function that contains a 5443 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5444 // and restoring the callers stack pointer in this functions epilog. This is 5445 // done because by tail calling the called function might overwrite the value 5446 // in this function's (MF) stack pointer stack slot 0(SP). 5447 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5448 CallConv == CallingConv::Fast) 5449 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5450 5451 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5452 "fastcc not supported on varargs functions"); 5453 5454 // Count how many bytes are to be pushed on the stack, including the linkage 5455 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5456 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5457 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5458 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5459 unsigned NumBytes = LinkageSize; 5460 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5461 unsigned &QFPR_idx = FPR_idx; 5462 5463 static const MCPhysReg GPR[] = { 5464 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5465 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5466 }; 5467 static const MCPhysReg VR[] = { 5468 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5469 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5470 }; 5471 5472 const unsigned NumGPRs = array_lengthof(GPR); 5473 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5474 const unsigned NumVRs = array_lengthof(VR); 5475 const unsigned NumQFPRs = NumFPRs; 5476 5477 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5478 // can be passed to the callee in registers. 5479 // For the fast calling convention, there is another check below. 5480 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5481 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5482 if (!HasParameterArea) { 5483 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5484 unsigned AvailableFPRs = NumFPRs; 5485 unsigned AvailableVRs = NumVRs; 5486 unsigned NumBytesTmp = NumBytes; 5487 for (unsigned i = 0; i != NumOps; ++i) { 5488 if (Outs[i].Flags.isNest()) continue; 5489 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5490 PtrByteSize, LinkageSize, ParamAreaSize, 5491 NumBytesTmp, AvailableFPRs, AvailableVRs, 5492 Subtarget.hasQPX())) 5493 HasParameterArea = true; 5494 } 5495 } 5496 5497 // When using the fast calling convention, we don't provide backing for 5498 // arguments that will be in registers. 5499 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5500 5501 // Avoid allocating parameter area for fastcc functions if all the arguments 5502 // can be passed in the registers. 5503 if (CallConv == CallingConv::Fast) 5504 HasParameterArea = false; 5505 5506 // Add up all the space actually used. 5507 for (unsigned i = 0; i != NumOps; ++i) { 5508 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5509 EVT ArgVT = Outs[i].VT; 5510 EVT OrigVT = Outs[i].ArgVT; 5511 5512 if (Flags.isNest()) 5513 continue; 5514 5515 if (CallConv == CallingConv::Fast) { 5516 if (Flags.isByVal()) { 5517 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5518 if (NumGPRsUsed > NumGPRs) 5519 HasParameterArea = true; 5520 } else { 5521 switch (ArgVT.getSimpleVT().SimpleTy) { 5522 default: llvm_unreachable("Unexpected ValueType for argument!"); 5523 case MVT::i1: 5524 case MVT::i32: 5525 case MVT::i64: 5526 if (++NumGPRsUsed <= NumGPRs) 5527 continue; 5528 break; 5529 case MVT::v4i32: 5530 case MVT::v8i16: 5531 case MVT::v16i8: 5532 case MVT::v2f64: 5533 case MVT::v2i64: 5534 case MVT::v1i128: 5535 if (++NumVRsUsed <= NumVRs) 5536 continue; 5537 break; 5538 case MVT::v4f32: 5539 // When using QPX, this is handled like a FP register, otherwise, it 5540 // is an Altivec register. 5541 if (Subtarget.hasQPX()) { 5542 if (++NumFPRsUsed <= NumFPRs) 5543 continue; 5544 } else { 5545 if (++NumVRsUsed <= NumVRs) 5546 continue; 5547 } 5548 break; 5549 case MVT::f32: 5550 case MVT::f64: 5551 case MVT::v4f64: // QPX 5552 case MVT::v4i1: // QPX 5553 if (++NumFPRsUsed <= NumFPRs) 5554 continue; 5555 break; 5556 } 5557 HasParameterArea = true; 5558 } 5559 } 5560 5561 /* Respect alignment of argument on the stack. */ 5562 unsigned Align = 5563 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5564 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5565 5566 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5567 if (Flags.isInConsecutiveRegsLast()) 5568 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5569 } 5570 5571 unsigned NumBytesActuallyUsed = NumBytes; 5572 5573 // In the old ELFv1 ABI, 5574 // the prolog code of the callee may store up to 8 GPR argument registers to 5575 // the stack, allowing va_start to index over them in memory if its varargs. 5576 // Because we cannot tell if this is needed on the caller side, we have to 5577 // conservatively assume that it is needed. As such, make sure we have at 5578 // least enough stack space for the caller to store the 8 GPRs. 5579 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5580 // really requires memory operands, e.g. a vararg function. 5581 if (HasParameterArea) 5582 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5583 else 5584 NumBytes = LinkageSize; 5585 5586 // Tail call needs the stack to be aligned. 5587 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5588 CallConv == CallingConv::Fast) 5589 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5590 5591 int SPDiff = 0; 5592 5593 // Calculate by how many bytes the stack has to be adjusted in case of tail 5594 // call optimization. 5595 if (!IsSibCall) 5596 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5597 5598 // To protect arguments on the stack from being clobbered in a tail call, 5599 // force all the loads to happen before doing any other lowering. 5600 if (isTailCall) 5601 Chain = DAG.getStackArgumentTokenFactor(Chain); 5602 5603 // Adjust the stack pointer for the new arguments... 5604 // These operations are automatically eliminated by the prolog/epilog pass 5605 if (!IsSibCall) 5606 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5607 SDValue CallSeqStart = Chain; 5608 5609 // Load the return address and frame pointer so it can be move somewhere else 5610 // later. 5611 SDValue LROp, FPOp; 5612 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5613 5614 // Set up a copy of the stack pointer for use loading and storing any 5615 // arguments that may not fit in the registers available for argument 5616 // passing. 5617 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5618 5619 // Figure out which arguments are going to go in registers, and which in 5620 // memory. Also, if this is a vararg function, floating point operations 5621 // must be stored to our stack, and loaded into integer regs as well, if 5622 // any integer regs are available for argument passing. 5623 unsigned ArgOffset = LinkageSize; 5624 5625 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5626 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5627 5628 SmallVector<SDValue, 8> MemOpChains; 5629 for (unsigned i = 0; i != NumOps; ++i) { 5630 SDValue Arg = OutVals[i]; 5631 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5632 EVT ArgVT = Outs[i].VT; 5633 EVT OrigVT = Outs[i].ArgVT; 5634 5635 // PtrOff will be used to store the current argument to the stack if a 5636 // register cannot be found for it. 5637 SDValue PtrOff; 5638 5639 // We re-align the argument offset for each argument, except when using the 5640 // fast calling convention, when we need to make sure we do that only when 5641 // we'll actually use a stack slot. 5642 auto ComputePtrOff = [&]() { 5643 /* Respect alignment of argument on the stack. */ 5644 unsigned Align = 5645 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5646 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5647 5648 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5649 5650 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5651 }; 5652 5653 if (CallConv != CallingConv::Fast) { 5654 ComputePtrOff(); 5655 5656 /* Compute GPR index associated with argument offset. */ 5657 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5658 GPR_idx = std::min(GPR_idx, NumGPRs); 5659 } 5660 5661 // Promote integers to 64-bit values. 5662 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5663 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5664 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5665 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5666 } 5667 5668 // FIXME memcpy is used way more than necessary. Correctness first. 5669 // Note: "by value" is code for passing a structure by value, not 5670 // basic types. 5671 if (Flags.isByVal()) { 5672 // Note: Size includes alignment padding, so 5673 // struct x { short a; char b; } 5674 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5675 // These are the proper values we need for right-justifying the 5676 // aggregate in a parameter register. 5677 unsigned Size = Flags.getByValSize(); 5678 5679 // An empty aggregate parameter takes up no storage and no 5680 // registers. 5681 if (Size == 0) 5682 continue; 5683 5684 if (CallConv == CallingConv::Fast) 5685 ComputePtrOff(); 5686 5687 // All aggregates smaller than 8 bytes must be passed right-justified. 5688 if (Size==1 || Size==2 || Size==4) { 5689 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5690 if (GPR_idx != NumGPRs) { 5691 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5692 MachinePointerInfo(), VT); 5693 MemOpChains.push_back(Load.getValue(1)); 5694 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5695 5696 ArgOffset += PtrByteSize; 5697 continue; 5698 } 5699 } 5700 5701 if (GPR_idx == NumGPRs && Size < 8) { 5702 SDValue AddPtr = PtrOff; 5703 if (!isLittleEndian) { 5704 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5705 PtrOff.getValueType()); 5706 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5707 } 5708 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5709 CallSeqStart, 5710 Flags, DAG, dl); 5711 ArgOffset += PtrByteSize; 5712 continue; 5713 } 5714 // Copy entire object into memory. There are cases where gcc-generated 5715 // code assumes it is there, even if it could be put entirely into 5716 // registers. (This is not what the doc says.) 5717 5718 // FIXME: The above statement is likely due to a misunderstanding of the 5719 // documents. All arguments must be copied into the parameter area BY 5720 // THE CALLEE in the event that the callee takes the address of any 5721 // formal argument. That has not yet been implemented. However, it is 5722 // reasonable to use the stack area as a staging area for the register 5723 // load. 5724 5725 // Skip this for small aggregates, as we will use the same slot for a 5726 // right-justified copy, below. 5727 if (Size >= 8) 5728 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5729 CallSeqStart, 5730 Flags, DAG, dl); 5731 5732 // When a register is available, pass a small aggregate right-justified. 5733 if (Size < 8 && GPR_idx != NumGPRs) { 5734 // The easiest way to get this right-justified in a register 5735 // is to copy the structure into the rightmost portion of a 5736 // local variable slot, then load the whole slot into the 5737 // register. 5738 // FIXME: The memcpy seems to produce pretty awful code for 5739 // small aggregates, particularly for packed ones. 5740 // FIXME: It would be preferable to use the slot in the 5741 // parameter save area instead of a new local variable. 5742 SDValue AddPtr = PtrOff; 5743 if (!isLittleEndian) { 5744 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5745 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5746 } 5747 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5748 CallSeqStart, 5749 Flags, DAG, dl); 5750 5751 // Load the slot into the register. 5752 SDValue Load = 5753 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5754 MemOpChains.push_back(Load.getValue(1)); 5755 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5756 5757 // Done with this argument. 5758 ArgOffset += PtrByteSize; 5759 continue; 5760 } 5761 5762 // For aggregates larger than PtrByteSize, copy the pieces of the 5763 // object that fit into registers from the parameter save area. 5764 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5765 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5766 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5767 if (GPR_idx != NumGPRs) { 5768 SDValue Load = 5769 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5770 MemOpChains.push_back(Load.getValue(1)); 5771 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5772 ArgOffset += PtrByteSize; 5773 } else { 5774 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5775 break; 5776 } 5777 } 5778 continue; 5779 } 5780 5781 switch (Arg.getSimpleValueType().SimpleTy) { 5782 default: llvm_unreachable("Unexpected ValueType for argument!"); 5783 case MVT::i1: 5784 case MVT::i32: 5785 case MVT::i64: 5786 if (Flags.isNest()) { 5787 // The 'nest' parameter, if any, is passed in R11. 5788 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5789 hasNest = true; 5790 break; 5791 } 5792 5793 // These can be scalar arguments or elements of an integer array type 5794 // passed directly. Clang may use those instead of "byval" aggregate 5795 // types to avoid forcing arguments to memory unnecessarily. 5796 if (GPR_idx != NumGPRs) { 5797 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5798 } else { 5799 if (CallConv == CallingConv::Fast) 5800 ComputePtrOff(); 5801 5802 assert(HasParameterArea && 5803 "Parameter area must exist to pass an argument in memory."); 5804 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5805 true, isTailCall, false, MemOpChains, 5806 TailCallArguments, dl); 5807 if (CallConv == CallingConv::Fast) 5808 ArgOffset += PtrByteSize; 5809 } 5810 if (CallConv != CallingConv::Fast) 5811 ArgOffset += PtrByteSize; 5812 break; 5813 case MVT::f32: 5814 case MVT::f64: { 5815 // These can be scalar arguments or elements of a float array type 5816 // passed directly. The latter are used to implement ELFv2 homogenous 5817 // float aggregates. 5818 5819 // Named arguments go into FPRs first, and once they overflow, the 5820 // remaining arguments go into GPRs and then the parameter save area. 5821 // Unnamed arguments for vararg functions always go to GPRs and 5822 // then the parameter save area. For now, put all arguments to vararg 5823 // routines always in both locations (FPR *and* GPR or stack slot). 5824 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5825 bool NeededLoad = false; 5826 5827 // First load the argument into the next available FPR. 5828 if (FPR_idx != NumFPRs) 5829 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5830 5831 // Next, load the argument into GPR or stack slot if needed. 5832 if (!NeedGPROrStack) 5833 ; 5834 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5835 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5836 // once we support fp <-> gpr moves. 5837 5838 // In the non-vararg case, this can only ever happen in the 5839 // presence of f32 array types, since otherwise we never run 5840 // out of FPRs before running out of GPRs. 5841 SDValue ArgVal; 5842 5843 // Double values are always passed in a single GPR. 5844 if (Arg.getValueType() != MVT::f32) { 5845 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5846 5847 // Non-array float values are extended and passed in a GPR. 5848 } else if (!Flags.isInConsecutiveRegs()) { 5849 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5850 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5851 5852 // If we have an array of floats, we collect every odd element 5853 // together with its predecessor into one GPR. 5854 } else if (ArgOffset % PtrByteSize != 0) { 5855 SDValue Lo, Hi; 5856 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5857 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5858 if (!isLittleEndian) 5859 std::swap(Lo, Hi); 5860 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5861 5862 // The final element, if even, goes into the first half of a GPR. 5863 } else if (Flags.isInConsecutiveRegsLast()) { 5864 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5865 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5866 if (!isLittleEndian) 5867 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5868 DAG.getConstant(32, dl, MVT::i32)); 5869 5870 // Non-final even elements are skipped; they will be handled 5871 // together the with subsequent argument on the next go-around. 5872 } else 5873 ArgVal = SDValue(); 5874 5875 if (ArgVal.getNode()) 5876 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5877 } else { 5878 if (CallConv == CallingConv::Fast) 5879 ComputePtrOff(); 5880 5881 // Single-precision floating-point values are mapped to the 5882 // second (rightmost) word of the stack doubleword. 5883 if (Arg.getValueType() == MVT::f32 && 5884 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5885 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5886 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5887 } 5888 5889 assert(HasParameterArea && 5890 "Parameter area must exist to pass an argument in memory."); 5891 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5892 true, isTailCall, false, MemOpChains, 5893 TailCallArguments, dl); 5894 5895 NeededLoad = true; 5896 } 5897 // When passing an array of floats, the array occupies consecutive 5898 // space in the argument area; only round up to the next doubleword 5899 // at the end of the array. Otherwise, each float takes 8 bytes. 5900 if (CallConv != CallingConv::Fast || NeededLoad) { 5901 ArgOffset += (Arg.getValueType() == MVT::f32 && 5902 Flags.isInConsecutiveRegs()) ? 4 : 8; 5903 if (Flags.isInConsecutiveRegsLast()) 5904 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5905 } 5906 break; 5907 } 5908 case MVT::v4f32: 5909 case MVT::v4i32: 5910 case MVT::v8i16: 5911 case MVT::v16i8: 5912 case MVT::v2f64: 5913 case MVT::v2i64: 5914 case MVT::v1i128: 5915 if (!Subtarget.hasQPX()) { 5916 // These can be scalar arguments or elements of a vector array type 5917 // passed directly. The latter are used to implement ELFv2 homogenous 5918 // vector aggregates. 5919 5920 // For a varargs call, named arguments go into VRs or on the stack as 5921 // usual; unnamed arguments always go to the stack or the corresponding 5922 // GPRs when within range. For now, we always put the value in both 5923 // locations (or even all three). 5924 if (isVarArg) { 5925 assert(HasParameterArea && 5926 "Parameter area must exist if we have a varargs call."); 5927 // We could elide this store in the case where the object fits 5928 // entirely in R registers. Maybe later. 5929 SDValue Store = 5930 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5931 MemOpChains.push_back(Store); 5932 if (VR_idx != NumVRs) { 5933 SDValue Load = 5934 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 5935 MemOpChains.push_back(Load.getValue(1)); 5936 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5937 } 5938 ArgOffset += 16; 5939 for (unsigned i=0; i<16; i+=PtrByteSize) { 5940 if (GPR_idx == NumGPRs) 5941 break; 5942 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5943 DAG.getConstant(i, dl, PtrVT)); 5944 SDValue Load = 5945 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5946 MemOpChains.push_back(Load.getValue(1)); 5947 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5948 } 5949 break; 5950 } 5951 5952 // Non-varargs Altivec params go into VRs or on the stack. 5953 if (VR_idx != NumVRs) { 5954 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5955 } else { 5956 if (CallConv == CallingConv::Fast) 5957 ComputePtrOff(); 5958 5959 assert(HasParameterArea && 5960 "Parameter area must exist to pass an argument in memory."); 5961 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5962 true, isTailCall, true, MemOpChains, 5963 TailCallArguments, dl); 5964 if (CallConv == CallingConv::Fast) 5965 ArgOffset += 16; 5966 } 5967 5968 if (CallConv != CallingConv::Fast) 5969 ArgOffset += 16; 5970 break; 5971 } // not QPX 5972 5973 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5974 "Invalid QPX parameter type"); 5975 5976 /* fall through */ 5977 case MVT::v4f64: 5978 case MVT::v4i1: { 5979 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5980 if (isVarArg) { 5981 assert(HasParameterArea && 5982 "Parameter area must exist if we have a varargs call."); 5983 // We could elide this store in the case where the object fits 5984 // entirely in R registers. Maybe later. 5985 SDValue Store = 5986 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5987 MemOpChains.push_back(Store); 5988 if (QFPR_idx != NumQFPRs) { 5989 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 5990 PtrOff, MachinePointerInfo()); 5991 MemOpChains.push_back(Load.getValue(1)); 5992 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5993 } 5994 ArgOffset += (IsF32 ? 16 : 32); 5995 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5996 if (GPR_idx == NumGPRs) 5997 break; 5998 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5999 DAG.getConstant(i, dl, PtrVT)); 6000 SDValue Load = 6001 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6002 MemOpChains.push_back(Load.getValue(1)); 6003 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6004 } 6005 break; 6006 } 6007 6008 // Non-varargs QPX params go into registers or on the stack. 6009 if (QFPR_idx != NumQFPRs) { 6010 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 6011 } else { 6012 if (CallConv == CallingConv::Fast) 6013 ComputePtrOff(); 6014 6015 assert(HasParameterArea && 6016 "Parameter area must exist to pass an argument in memory."); 6017 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6018 true, isTailCall, true, MemOpChains, 6019 TailCallArguments, dl); 6020 if (CallConv == CallingConv::Fast) 6021 ArgOffset += (IsF32 ? 16 : 32); 6022 } 6023 6024 if (CallConv != CallingConv::Fast) 6025 ArgOffset += (IsF32 ? 16 : 32); 6026 break; 6027 } 6028 } 6029 } 6030 6031 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6032 "mismatch in size of parameter area"); 6033 (void)NumBytesActuallyUsed; 6034 6035 if (!MemOpChains.empty()) 6036 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6037 6038 // Check if this is an indirect call (MTCTR/BCTRL). 6039 // See PrepareCall() for more information about calls through function 6040 // pointers in the 64-bit SVR4 ABI. 6041 if (!isTailCall && !isPatchPoint && 6042 !isFunctionGlobalAddress(Callee) && 6043 !isa<ExternalSymbolSDNode>(Callee)) { 6044 // Load r2 into a virtual register and store it to the TOC save area. 6045 setUsesTOCBasePtr(DAG); 6046 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6047 // TOC save area offset. 6048 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6049 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6050 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6051 Chain = DAG.getStore( 6052 Val.getValue(1), dl, Val, AddPtr, 6053 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 6054 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6055 // This does not mean the MTCTR instruction must use R12; it's easier 6056 // to model this as an extra parameter, so do that. 6057 if (isELFv2ABI && !isPatchPoint) 6058 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6059 } 6060 6061 // Build a sequence of copy-to-reg nodes chained together with token chain 6062 // and flag operands which copy the outgoing args into the appropriate regs. 6063 SDValue InFlag; 6064 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6065 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6066 RegsToPass[i].second, InFlag); 6067 InFlag = Chain.getValue(1); 6068 } 6069 6070 if (isTailCall && !IsSibCall) 6071 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6072 TailCallArguments); 6073 6074 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 6075 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 6076 SPDiff, NumBytes, Ins, InVals, CS); 6077 } 6078 6079 SDValue PPCTargetLowering::LowerCall_Darwin( 6080 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6081 bool isTailCall, bool isPatchPoint, 6082 const SmallVectorImpl<ISD::OutputArg> &Outs, 6083 const SmallVectorImpl<SDValue> &OutVals, 6084 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6085 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6086 ImmutableCallSite CS) const { 6087 unsigned NumOps = Outs.size(); 6088 6089 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6090 bool isPPC64 = PtrVT == MVT::i64; 6091 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6092 6093 MachineFunction &MF = DAG.getMachineFunction(); 6094 6095 // Mark this function as potentially containing a function that contains a 6096 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6097 // and restoring the callers stack pointer in this functions epilog. This is 6098 // done because by tail calling the called function might overwrite the value 6099 // in this function's (MF) stack pointer stack slot 0(SP). 6100 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6101 CallConv == CallingConv::Fast) 6102 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6103 6104 // Count how many bytes are to be pushed on the stack, including the linkage 6105 // area, and parameter passing area. We start with 24/48 bytes, which is 6106 // prereserved space for [SP][CR][LR][3 x unused]. 6107 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6108 unsigned NumBytes = LinkageSize; 6109 6110 // Add up all the space actually used. 6111 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6112 // they all go in registers, but we must reserve stack space for them for 6113 // possible use by the caller. In varargs or 64-bit calls, parameters are 6114 // assigned stack space in order, with padding so Altivec parameters are 6115 // 16-byte aligned. 6116 unsigned nAltivecParamsAtEnd = 0; 6117 for (unsigned i = 0; i != NumOps; ++i) { 6118 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6119 EVT ArgVT = Outs[i].VT; 6120 // Varargs Altivec parameters are padded to a 16 byte boundary. 6121 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6122 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6123 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6124 if (!isVarArg && !isPPC64) { 6125 // Non-varargs Altivec parameters go after all the non-Altivec 6126 // parameters; handle those later so we know how much padding we need. 6127 nAltivecParamsAtEnd++; 6128 continue; 6129 } 6130 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6131 NumBytes = ((NumBytes+15)/16)*16; 6132 } 6133 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6134 } 6135 6136 // Allow for Altivec parameters at the end, if needed. 6137 if (nAltivecParamsAtEnd) { 6138 NumBytes = ((NumBytes+15)/16)*16; 6139 NumBytes += 16*nAltivecParamsAtEnd; 6140 } 6141 6142 // The prolog code of the callee may store up to 8 GPR argument registers to 6143 // the stack, allowing va_start to index over them in memory if its varargs. 6144 // Because we cannot tell if this is needed on the caller side, we have to 6145 // conservatively assume that it is needed. As such, make sure we have at 6146 // least enough stack space for the caller to store the 8 GPRs. 6147 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6148 6149 // Tail call needs the stack to be aligned. 6150 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6151 CallConv == CallingConv::Fast) 6152 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6153 6154 // Calculate by how many bytes the stack has to be adjusted in case of tail 6155 // call optimization. 6156 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6157 6158 // To protect arguments on the stack from being clobbered in a tail call, 6159 // force all the loads to happen before doing any other lowering. 6160 if (isTailCall) 6161 Chain = DAG.getStackArgumentTokenFactor(Chain); 6162 6163 // Adjust the stack pointer for the new arguments... 6164 // These operations are automatically eliminated by the prolog/epilog pass 6165 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6166 SDValue CallSeqStart = Chain; 6167 6168 // Load the return address and frame pointer so it can be move somewhere else 6169 // later. 6170 SDValue LROp, FPOp; 6171 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6172 6173 // Set up a copy of the stack pointer for use loading and storing any 6174 // arguments that may not fit in the registers available for argument 6175 // passing. 6176 SDValue StackPtr; 6177 if (isPPC64) 6178 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6179 else 6180 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6181 6182 // Figure out which arguments are going to go in registers, and which in 6183 // memory. Also, if this is a vararg function, floating point operations 6184 // must be stored to our stack, and loaded into integer regs as well, if 6185 // any integer regs are available for argument passing. 6186 unsigned ArgOffset = LinkageSize; 6187 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6188 6189 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6190 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6191 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6192 }; 6193 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6194 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6195 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6196 }; 6197 static const MCPhysReg VR[] = { 6198 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6199 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6200 }; 6201 const unsigned NumGPRs = array_lengthof(GPR_32); 6202 const unsigned NumFPRs = 13; 6203 const unsigned NumVRs = array_lengthof(VR); 6204 6205 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6206 6207 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6208 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6209 6210 SmallVector<SDValue, 8> MemOpChains; 6211 for (unsigned i = 0; i != NumOps; ++i) { 6212 SDValue Arg = OutVals[i]; 6213 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6214 6215 // PtrOff will be used to store the current argument to the stack if a 6216 // register cannot be found for it. 6217 SDValue PtrOff; 6218 6219 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6220 6221 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6222 6223 // On PPC64, promote integers to 64-bit values. 6224 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6225 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6226 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6227 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6228 } 6229 6230 // FIXME memcpy is used way more than necessary. Correctness first. 6231 // Note: "by value" is code for passing a structure by value, not 6232 // basic types. 6233 if (Flags.isByVal()) { 6234 unsigned Size = Flags.getByValSize(); 6235 // Very small objects are passed right-justified. Everything else is 6236 // passed left-justified. 6237 if (Size==1 || Size==2) { 6238 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6239 if (GPR_idx != NumGPRs) { 6240 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6241 MachinePointerInfo(), VT); 6242 MemOpChains.push_back(Load.getValue(1)); 6243 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6244 6245 ArgOffset += PtrByteSize; 6246 } else { 6247 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6248 PtrOff.getValueType()); 6249 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6250 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6251 CallSeqStart, 6252 Flags, DAG, dl); 6253 ArgOffset += PtrByteSize; 6254 } 6255 continue; 6256 } 6257 // Copy entire object into memory. There are cases where gcc-generated 6258 // code assumes it is there, even if it could be put entirely into 6259 // registers. (This is not what the doc says.) 6260 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6261 CallSeqStart, 6262 Flags, DAG, dl); 6263 6264 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6265 // copy the pieces of the object that fit into registers from the 6266 // parameter save area. 6267 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6268 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6269 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6270 if (GPR_idx != NumGPRs) { 6271 SDValue Load = 6272 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6273 MemOpChains.push_back(Load.getValue(1)); 6274 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6275 ArgOffset += PtrByteSize; 6276 } else { 6277 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6278 break; 6279 } 6280 } 6281 continue; 6282 } 6283 6284 switch (Arg.getSimpleValueType().SimpleTy) { 6285 default: llvm_unreachable("Unexpected ValueType for argument!"); 6286 case MVT::i1: 6287 case MVT::i32: 6288 case MVT::i64: 6289 if (GPR_idx != NumGPRs) { 6290 if (Arg.getValueType() == MVT::i1) 6291 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6292 6293 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6294 } else { 6295 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6296 isPPC64, isTailCall, false, MemOpChains, 6297 TailCallArguments, dl); 6298 } 6299 ArgOffset += PtrByteSize; 6300 break; 6301 case MVT::f32: 6302 case MVT::f64: 6303 if (FPR_idx != NumFPRs) { 6304 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6305 6306 if (isVarArg) { 6307 SDValue Store = 6308 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6309 MemOpChains.push_back(Store); 6310 6311 // Float varargs are always shadowed in available integer registers 6312 if (GPR_idx != NumGPRs) { 6313 SDValue Load = 6314 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6315 MemOpChains.push_back(Load.getValue(1)); 6316 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6317 } 6318 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6319 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6320 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6321 SDValue Load = 6322 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6323 MemOpChains.push_back(Load.getValue(1)); 6324 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6325 } 6326 } else { 6327 // If we have any FPRs remaining, we may also have GPRs remaining. 6328 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6329 // GPRs. 6330 if (GPR_idx != NumGPRs) 6331 ++GPR_idx; 6332 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6333 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6334 ++GPR_idx; 6335 } 6336 } else 6337 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6338 isPPC64, isTailCall, false, MemOpChains, 6339 TailCallArguments, dl); 6340 if (isPPC64) 6341 ArgOffset += 8; 6342 else 6343 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6344 break; 6345 case MVT::v4f32: 6346 case MVT::v4i32: 6347 case MVT::v8i16: 6348 case MVT::v16i8: 6349 if (isVarArg) { 6350 // These go aligned on the stack, or in the corresponding R registers 6351 // when within range. The Darwin PPC ABI doc claims they also go in 6352 // V registers; in fact gcc does this only for arguments that are 6353 // prototyped, not for those that match the ... We do it for all 6354 // arguments, seems to work. 6355 while (ArgOffset % 16 !=0) { 6356 ArgOffset += PtrByteSize; 6357 if (GPR_idx != NumGPRs) 6358 GPR_idx++; 6359 } 6360 // We could elide this store in the case where the object fits 6361 // entirely in R registers. Maybe later. 6362 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6363 DAG.getConstant(ArgOffset, dl, PtrVT)); 6364 SDValue Store = 6365 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6366 MemOpChains.push_back(Store); 6367 if (VR_idx != NumVRs) { 6368 SDValue Load = 6369 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6370 MemOpChains.push_back(Load.getValue(1)); 6371 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6372 } 6373 ArgOffset += 16; 6374 for (unsigned i=0; i<16; i+=PtrByteSize) { 6375 if (GPR_idx == NumGPRs) 6376 break; 6377 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6378 DAG.getConstant(i, dl, PtrVT)); 6379 SDValue Load = 6380 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6381 MemOpChains.push_back(Load.getValue(1)); 6382 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6383 } 6384 break; 6385 } 6386 6387 // Non-varargs Altivec params generally go in registers, but have 6388 // stack space allocated at the end. 6389 if (VR_idx != NumVRs) { 6390 // Doesn't have GPR space allocated. 6391 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6392 } else if (nAltivecParamsAtEnd==0) { 6393 // We are emitting Altivec params in order. 6394 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6395 isPPC64, isTailCall, true, MemOpChains, 6396 TailCallArguments, dl); 6397 ArgOffset += 16; 6398 } 6399 break; 6400 } 6401 } 6402 // If all Altivec parameters fit in registers, as they usually do, 6403 // they get stack space following the non-Altivec parameters. We 6404 // don't track this here because nobody below needs it. 6405 // If there are more Altivec parameters than fit in registers emit 6406 // the stores here. 6407 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6408 unsigned j = 0; 6409 // Offset is aligned; skip 1st 12 params which go in V registers. 6410 ArgOffset = ((ArgOffset+15)/16)*16; 6411 ArgOffset += 12*16; 6412 for (unsigned i = 0; i != NumOps; ++i) { 6413 SDValue Arg = OutVals[i]; 6414 EVT ArgType = Outs[i].VT; 6415 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6416 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6417 if (++j > NumVRs) { 6418 SDValue PtrOff; 6419 // We are emitting Altivec params in order. 6420 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6421 isPPC64, isTailCall, true, MemOpChains, 6422 TailCallArguments, dl); 6423 ArgOffset += 16; 6424 } 6425 } 6426 } 6427 } 6428 6429 if (!MemOpChains.empty()) 6430 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6431 6432 // On Darwin, R12 must contain the address of an indirect callee. This does 6433 // not mean the MTCTR instruction must use R12; it's easier to model this as 6434 // an extra parameter, so do that. 6435 if (!isTailCall && 6436 !isFunctionGlobalAddress(Callee) && 6437 !isa<ExternalSymbolSDNode>(Callee) && 6438 !isBLACompatibleAddress(Callee, DAG)) 6439 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6440 PPC::R12), Callee)); 6441 6442 // Build a sequence of copy-to-reg nodes chained together with token chain 6443 // and flag operands which copy the outgoing args into the appropriate regs. 6444 SDValue InFlag; 6445 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6446 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6447 RegsToPass[i].second, InFlag); 6448 InFlag = Chain.getValue(1); 6449 } 6450 6451 if (isTailCall) 6452 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6453 TailCallArguments); 6454 6455 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6456 /* unused except on PPC64 ELFv1 */ false, DAG, 6457 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6458 NumBytes, Ins, InVals, CS); 6459 } 6460 6461 bool 6462 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6463 MachineFunction &MF, bool isVarArg, 6464 const SmallVectorImpl<ISD::OutputArg> &Outs, 6465 LLVMContext &Context) const { 6466 SmallVector<CCValAssign, 16> RVLocs; 6467 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6468 return CCInfo.CheckReturn( 6469 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6470 ? RetCC_PPC_Cold 6471 : RetCC_PPC); 6472 } 6473 6474 SDValue 6475 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6476 bool isVarArg, 6477 const SmallVectorImpl<ISD::OutputArg> &Outs, 6478 const SmallVectorImpl<SDValue> &OutVals, 6479 const SDLoc &dl, SelectionDAG &DAG) const { 6480 SmallVector<CCValAssign, 16> RVLocs; 6481 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6482 *DAG.getContext()); 6483 CCInfo.AnalyzeReturn(Outs, 6484 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 6485 ? RetCC_PPC_Cold 6486 : RetCC_PPC); 6487 6488 SDValue Flag; 6489 SmallVector<SDValue, 4> RetOps(1, Chain); 6490 6491 // Copy the result values into the output registers. 6492 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6493 CCValAssign &VA = RVLocs[i]; 6494 assert(VA.isRegLoc() && "Can only return in registers!"); 6495 6496 SDValue Arg = OutVals[i]; 6497 6498 switch (VA.getLocInfo()) { 6499 default: llvm_unreachable("Unknown loc info!"); 6500 case CCValAssign::Full: break; 6501 case CCValAssign::AExt: 6502 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6503 break; 6504 case CCValAssign::ZExt: 6505 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6506 break; 6507 case CCValAssign::SExt: 6508 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6509 break; 6510 } 6511 6512 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6513 Flag = Chain.getValue(1); 6514 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6515 } 6516 6517 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6518 const MCPhysReg *I = 6519 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6520 if (I) { 6521 for (; *I; ++I) { 6522 6523 if (PPC::G8RCRegClass.contains(*I)) 6524 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6525 else if (PPC::F8RCRegClass.contains(*I)) 6526 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6527 else if (PPC::CRRCRegClass.contains(*I)) 6528 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6529 else if (PPC::VRRCRegClass.contains(*I)) 6530 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6531 else 6532 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6533 } 6534 } 6535 6536 RetOps[0] = Chain; // Update chain. 6537 6538 // Add the flag if we have it. 6539 if (Flag.getNode()) 6540 RetOps.push_back(Flag); 6541 6542 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6543 } 6544 6545 SDValue 6546 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6547 SelectionDAG &DAG) const { 6548 SDLoc dl(Op); 6549 6550 // Get the correct type for integers. 6551 EVT IntVT = Op.getValueType(); 6552 6553 // Get the inputs. 6554 SDValue Chain = Op.getOperand(0); 6555 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6556 // Build a DYNAREAOFFSET node. 6557 SDValue Ops[2] = {Chain, FPSIdx}; 6558 SDVTList VTs = DAG.getVTList(IntVT); 6559 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6560 } 6561 6562 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6563 SelectionDAG &DAG) const { 6564 // When we pop the dynamic allocation we need to restore the SP link. 6565 SDLoc dl(Op); 6566 6567 // Get the correct type for pointers. 6568 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6569 6570 // Construct the stack pointer operand. 6571 bool isPPC64 = Subtarget.isPPC64(); 6572 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6573 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6574 6575 // Get the operands for the STACKRESTORE. 6576 SDValue Chain = Op.getOperand(0); 6577 SDValue SaveSP = Op.getOperand(1); 6578 6579 // Load the old link SP. 6580 SDValue LoadLinkSP = 6581 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6582 6583 // Restore the stack pointer. 6584 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6585 6586 // Store the old link SP. 6587 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6588 } 6589 6590 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6591 MachineFunction &MF = DAG.getMachineFunction(); 6592 bool isPPC64 = Subtarget.isPPC64(); 6593 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6594 6595 // Get current frame pointer save index. The users of this index will be 6596 // primarily DYNALLOC instructions. 6597 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6598 int RASI = FI->getReturnAddrSaveIndex(); 6599 6600 // If the frame pointer save index hasn't been defined yet. 6601 if (!RASI) { 6602 // Find out what the fix offset of the frame pointer save area. 6603 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6604 // Allocate the frame index for frame pointer save area. 6605 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6606 // Save the result. 6607 FI->setReturnAddrSaveIndex(RASI); 6608 } 6609 return DAG.getFrameIndex(RASI, PtrVT); 6610 } 6611 6612 SDValue 6613 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6614 MachineFunction &MF = DAG.getMachineFunction(); 6615 bool isPPC64 = Subtarget.isPPC64(); 6616 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6617 6618 // Get current frame pointer save index. The users of this index will be 6619 // primarily DYNALLOC instructions. 6620 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6621 int FPSI = FI->getFramePointerSaveIndex(); 6622 6623 // If the frame pointer save index hasn't been defined yet. 6624 if (!FPSI) { 6625 // Find out what the fix offset of the frame pointer save area. 6626 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6627 // Allocate the frame index for frame pointer save area. 6628 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6629 // Save the result. 6630 FI->setFramePointerSaveIndex(FPSI); 6631 } 6632 return DAG.getFrameIndex(FPSI, PtrVT); 6633 } 6634 6635 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6636 SelectionDAG &DAG) const { 6637 // Get the inputs. 6638 SDValue Chain = Op.getOperand(0); 6639 SDValue Size = Op.getOperand(1); 6640 SDLoc dl(Op); 6641 6642 // Get the correct type for pointers. 6643 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6644 // Negate the size. 6645 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6646 DAG.getConstant(0, dl, PtrVT), Size); 6647 // Construct a node for the frame pointer save index. 6648 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6649 // Build a DYNALLOC node. 6650 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6651 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6652 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6653 } 6654 6655 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 6656 SelectionDAG &DAG) const { 6657 MachineFunction &MF = DAG.getMachineFunction(); 6658 6659 bool isPPC64 = Subtarget.isPPC64(); 6660 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6661 6662 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 6663 return DAG.getFrameIndex(FI, PtrVT); 6664 } 6665 6666 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6667 SelectionDAG &DAG) const { 6668 SDLoc DL(Op); 6669 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6670 DAG.getVTList(MVT::i32, MVT::Other), 6671 Op.getOperand(0), Op.getOperand(1)); 6672 } 6673 6674 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6675 SelectionDAG &DAG) const { 6676 SDLoc DL(Op); 6677 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6678 Op.getOperand(0), Op.getOperand(1)); 6679 } 6680 6681 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6682 if (Op.getValueType().isVector()) 6683 return LowerVectorLoad(Op, DAG); 6684 6685 assert(Op.getValueType() == MVT::i1 && 6686 "Custom lowering only for i1 loads"); 6687 6688 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6689 6690 SDLoc dl(Op); 6691 LoadSDNode *LD = cast<LoadSDNode>(Op); 6692 6693 SDValue Chain = LD->getChain(); 6694 SDValue BasePtr = LD->getBasePtr(); 6695 MachineMemOperand *MMO = LD->getMemOperand(); 6696 6697 SDValue NewLD = 6698 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6699 BasePtr, MVT::i8, MMO); 6700 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6701 6702 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6703 return DAG.getMergeValues(Ops, dl); 6704 } 6705 6706 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6707 if (Op.getOperand(1).getValueType().isVector()) 6708 return LowerVectorStore(Op, DAG); 6709 6710 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6711 "Custom lowering only for i1 stores"); 6712 6713 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6714 6715 SDLoc dl(Op); 6716 StoreSDNode *ST = cast<StoreSDNode>(Op); 6717 6718 SDValue Chain = ST->getChain(); 6719 SDValue BasePtr = ST->getBasePtr(); 6720 SDValue Value = ST->getValue(); 6721 MachineMemOperand *MMO = ST->getMemOperand(); 6722 6723 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6724 Value); 6725 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6726 } 6727 6728 // FIXME: Remove this once the ANDI glue bug is fixed: 6729 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6730 assert(Op.getValueType() == MVT::i1 && 6731 "Custom lowering only for i1 results"); 6732 6733 SDLoc DL(Op); 6734 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6735 Op.getOperand(0)); 6736 } 6737 6738 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6739 /// possible. 6740 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6741 // Not FP? Not a fsel. 6742 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6743 !Op.getOperand(2).getValueType().isFloatingPoint()) 6744 return Op; 6745 6746 // We might be able to do better than this under some circumstances, but in 6747 // general, fsel-based lowering of select is a finite-math-only optimization. 6748 // For more information, see section F.3 of the 2.06 ISA specification. 6749 if (!DAG.getTarget().Options.NoInfsFPMath || 6750 !DAG.getTarget().Options.NoNaNsFPMath) 6751 return Op; 6752 // TODO: Propagate flags from the select rather than global settings. 6753 SDNodeFlags Flags; 6754 Flags.setNoInfs(true); 6755 Flags.setNoNaNs(true); 6756 6757 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6758 6759 EVT ResVT = Op.getValueType(); 6760 EVT CmpVT = Op.getOperand(0).getValueType(); 6761 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6762 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6763 SDLoc dl(Op); 6764 6765 // If the RHS of the comparison is a 0.0, we don't need to do the 6766 // subtraction at all. 6767 SDValue Sel1; 6768 if (isFloatingPointZero(RHS)) 6769 switch (CC) { 6770 default: break; // SETUO etc aren't handled by fsel. 6771 case ISD::SETNE: 6772 std::swap(TV, FV); 6773 LLVM_FALLTHROUGH; 6774 case ISD::SETEQ: 6775 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6776 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6777 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6778 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6779 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6780 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6781 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6782 case ISD::SETULT: 6783 case ISD::SETLT: 6784 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6785 LLVM_FALLTHROUGH; 6786 case ISD::SETOGE: 6787 case ISD::SETGE: 6788 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6789 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6790 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6791 case ISD::SETUGT: 6792 case ISD::SETGT: 6793 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6794 LLVM_FALLTHROUGH; 6795 case ISD::SETOLE: 6796 case ISD::SETLE: 6797 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6798 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6799 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6800 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6801 } 6802 6803 SDValue Cmp; 6804 switch (CC) { 6805 default: break; // SETUO etc aren't handled by fsel. 6806 case ISD::SETNE: 6807 std::swap(TV, FV); 6808 LLVM_FALLTHROUGH; 6809 case ISD::SETEQ: 6810 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6811 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6812 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6813 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6814 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6815 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6816 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6817 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6818 case ISD::SETULT: 6819 case ISD::SETLT: 6820 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6821 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6822 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6823 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6824 case ISD::SETOGE: 6825 case ISD::SETGE: 6826 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6827 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6828 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6829 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6830 case ISD::SETUGT: 6831 case ISD::SETGT: 6832 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6833 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6834 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6835 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6836 case ISD::SETOLE: 6837 case ISD::SETLE: 6838 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6839 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6840 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6841 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6842 } 6843 return Op; 6844 } 6845 6846 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6847 SelectionDAG &DAG, 6848 const SDLoc &dl) const { 6849 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6850 SDValue Src = Op.getOperand(0); 6851 if (Src.getValueType() == MVT::f32) 6852 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6853 6854 SDValue Tmp; 6855 switch (Op.getSimpleValueType().SimpleTy) { 6856 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6857 case MVT::i32: 6858 Tmp = DAG.getNode( 6859 Op.getOpcode() == ISD::FP_TO_SINT 6860 ? PPCISD::FCTIWZ 6861 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6862 dl, MVT::f64, Src); 6863 break; 6864 case MVT::i64: 6865 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6866 "i64 FP_TO_UINT is supported only with FPCVT"); 6867 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6868 PPCISD::FCTIDUZ, 6869 dl, MVT::f64, Src); 6870 break; 6871 } 6872 6873 // Convert the FP value to an int value through memory. 6874 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6875 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6876 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6877 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6878 MachinePointerInfo MPI = 6879 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6880 6881 // Emit a store to the stack slot. 6882 SDValue Chain; 6883 if (i32Stack) { 6884 MachineFunction &MF = DAG.getMachineFunction(); 6885 MachineMemOperand *MMO = 6886 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6887 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6888 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6889 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6890 } else 6891 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 6892 6893 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6894 // add in a bias on big endian. 6895 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6896 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6897 DAG.getConstant(4, dl, FIPtr.getValueType())); 6898 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6899 } 6900 6901 RLI.Chain = Chain; 6902 RLI.Ptr = FIPtr; 6903 RLI.MPI = MPI; 6904 } 6905 6906 /// Custom lowers floating point to integer conversions to use 6907 /// the direct move instructions available in ISA 2.07 to avoid the 6908 /// need for load/store combinations. 6909 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6910 SelectionDAG &DAG, 6911 const SDLoc &dl) const { 6912 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6913 SDValue Src = Op.getOperand(0); 6914 6915 if (Src.getValueType() == MVT::f32) 6916 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6917 6918 SDValue Tmp; 6919 switch (Op.getSimpleValueType().SimpleTy) { 6920 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6921 case MVT::i32: 6922 Tmp = DAG.getNode( 6923 Op.getOpcode() == ISD::FP_TO_SINT 6924 ? PPCISD::FCTIWZ 6925 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6926 dl, MVT::f64, Src); 6927 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6928 break; 6929 case MVT::i64: 6930 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6931 "i64 FP_TO_UINT is supported only with FPCVT"); 6932 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6933 PPCISD::FCTIDUZ, 6934 dl, MVT::f64, Src); 6935 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6936 break; 6937 } 6938 return Tmp; 6939 } 6940 6941 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6942 const SDLoc &dl) const { 6943 6944 // FP to INT conversions are legal for f128. 6945 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128)) 6946 return Op; 6947 6948 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 6949 // PPC (the libcall is not available). 6950 if (Op.getOperand(0).getValueType() == MVT::ppcf128) { 6951 if (Op.getValueType() == MVT::i32) { 6952 if (Op.getOpcode() == ISD::FP_TO_SINT) { 6953 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 6954 MVT::f64, Op.getOperand(0), 6955 DAG.getIntPtrConstant(0, dl)); 6956 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 6957 MVT::f64, Op.getOperand(0), 6958 DAG.getIntPtrConstant(1, dl)); 6959 6960 // Add the two halves of the long double in round-to-zero mode. 6961 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 6962 6963 // Now use a smaller FP_TO_SINT. 6964 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 6965 } 6966 if (Op.getOpcode() == ISD::FP_TO_UINT) { 6967 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 6968 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 6969 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128); 6970 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 6971 // FIXME: generated code sucks. 6972 // TODO: Are there fast-math-flags to propagate to this FSUB? 6973 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, 6974 Op.getOperand(0), Tmp); 6975 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 6976 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, 6977 DAG.getConstant(0x80000000, dl, MVT::i32)); 6978 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, 6979 Op.getOperand(0)); 6980 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False, 6981 ISD::SETGE); 6982 } 6983 } 6984 6985 return SDValue(); 6986 } 6987 6988 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6989 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6990 6991 ReuseLoadInfo RLI; 6992 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6993 6994 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 6995 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 6996 } 6997 6998 // We're trying to insert a regular store, S, and then a load, L. If the 6999 // incoming value, O, is a load, we might just be able to have our load use the 7000 // address used by O. However, we don't know if anything else will store to 7001 // that address before we can load from it. To prevent this situation, we need 7002 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7003 // the same chain operand as O, we create a token factor from the chain results 7004 // of O and L, and we replace all uses of O's chain result with that token 7005 // factor (see spliceIntoChain below for this last part). 7006 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7007 ReuseLoadInfo &RLI, 7008 SelectionDAG &DAG, 7009 ISD::LoadExtType ET) const { 7010 SDLoc dl(Op); 7011 if (ET == ISD::NON_EXTLOAD && 7012 (Op.getOpcode() == ISD::FP_TO_UINT || 7013 Op.getOpcode() == ISD::FP_TO_SINT) && 7014 isOperationLegalOrCustom(Op.getOpcode(), 7015 Op.getOperand(0).getValueType())) { 7016 7017 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7018 return true; 7019 } 7020 7021 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7022 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7023 LD->isNonTemporal()) 7024 return false; 7025 if (LD->getMemoryVT() != MemVT) 7026 return false; 7027 7028 RLI.Ptr = LD->getBasePtr(); 7029 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7030 assert(LD->getAddressingMode() == ISD::PRE_INC && 7031 "Non-pre-inc AM on PPC?"); 7032 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7033 LD->getOffset()); 7034 } 7035 7036 RLI.Chain = LD->getChain(); 7037 RLI.MPI = LD->getPointerInfo(); 7038 RLI.IsDereferenceable = LD->isDereferenceable(); 7039 RLI.IsInvariant = LD->isInvariant(); 7040 RLI.Alignment = LD->getAlignment(); 7041 RLI.AAInfo = LD->getAAInfo(); 7042 RLI.Ranges = LD->getRanges(); 7043 7044 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7045 return true; 7046 } 7047 7048 // Given the head of the old chain, ResChain, insert a token factor containing 7049 // it and NewResChain, and make users of ResChain now be users of that token 7050 // factor. 7051 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7052 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7053 SDValue NewResChain, 7054 SelectionDAG &DAG) const { 7055 if (!ResChain) 7056 return; 7057 7058 SDLoc dl(NewResChain); 7059 7060 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7061 NewResChain, DAG.getUNDEF(MVT::Other)); 7062 assert(TF.getNode() != NewResChain.getNode() && 7063 "A new TF really is required here"); 7064 7065 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7066 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7067 } 7068 7069 /// Analyze profitability of direct move 7070 /// prefer float load to int load plus direct move 7071 /// when there is no integer use of int load 7072 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7073 SDNode *Origin = Op.getOperand(0).getNode(); 7074 if (Origin->getOpcode() != ISD::LOAD) 7075 return true; 7076 7077 // If there is no LXSIBZX/LXSIHZX, like Power8, 7078 // prefer direct move if the memory size is 1 or 2 bytes. 7079 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7080 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7081 return true; 7082 7083 for (SDNode::use_iterator UI = Origin->use_begin(), 7084 UE = Origin->use_end(); 7085 UI != UE; ++UI) { 7086 7087 // Only look at the users of the loaded value. 7088 if (UI.getUse().get().getResNo() != 0) 7089 continue; 7090 7091 if (UI->getOpcode() != ISD::SINT_TO_FP && 7092 UI->getOpcode() != ISD::UINT_TO_FP) 7093 return true; 7094 } 7095 7096 return false; 7097 } 7098 7099 /// Custom lowers integer to floating point conversions to use 7100 /// the direct move instructions available in ISA 2.07 to avoid the 7101 /// need for load/store combinations. 7102 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7103 SelectionDAG &DAG, 7104 const SDLoc &dl) const { 7105 assert((Op.getValueType() == MVT::f32 || 7106 Op.getValueType() == MVT::f64) && 7107 "Invalid floating point type as target of conversion"); 7108 assert(Subtarget.hasFPCVT() && 7109 "Int to FP conversions with direct moves require FPCVT"); 7110 SDValue FP; 7111 SDValue Src = Op.getOperand(0); 7112 bool SinglePrec = Op.getValueType() == MVT::f32; 7113 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7114 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 7115 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 7116 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 7117 7118 if (WordInt) { 7119 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7120 dl, MVT::f64, Src); 7121 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7122 } 7123 else { 7124 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7125 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7126 } 7127 7128 return FP; 7129 } 7130 7131 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 7132 SelectionDAG &DAG) const { 7133 SDLoc dl(Op); 7134 7135 // Conversions to f128 are legal. 7136 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128)) 7137 return Op; 7138 7139 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 7140 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 7141 return SDValue(); 7142 7143 SDValue Value = Op.getOperand(0); 7144 // The values are now known to be -1 (false) or 1 (true). To convert this 7145 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7146 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7147 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7148 7149 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7150 7151 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7152 7153 if (Op.getValueType() != MVT::v4f64) 7154 Value = DAG.getNode(ISD::FP_ROUND, dl, 7155 Op.getValueType(), Value, 7156 DAG.getIntPtrConstant(1, dl)); 7157 return Value; 7158 } 7159 7160 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 7161 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 7162 return SDValue(); 7163 7164 if (Op.getOperand(0).getValueType() == MVT::i1) 7165 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 7166 DAG.getConstantFP(1.0, dl, Op.getValueType()), 7167 DAG.getConstantFP(0.0, dl, Op.getValueType())); 7168 7169 // If we have direct moves, we can do all the conversion, skip the store/load 7170 // however, without FPCVT we can't do most conversions. 7171 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 7172 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 7173 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 7174 7175 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 7176 "UINT_TO_FP is supported only with FPCVT"); 7177 7178 // If we have FCFIDS, then use it when converting to single-precision. 7179 // Otherwise, convert to double-precision and then round. 7180 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7181 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 7182 : PPCISD::FCFIDS) 7183 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 7184 : PPCISD::FCFID); 7185 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7186 ? MVT::f32 7187 : MVT::f64; 7188 7189 if (Op.getOperand(0).getValueType() == MVT::i64) { 7190 SDValue SINT = Op.getOperand(0); 7191 // When converting to single-precision, we actually need to convert 7192 // to double-precision first and then round to single-precision. 7193 // To avoid double-rounding effects during that operation, we have 7194 // to prepare the input operand. Bits that might be truncated when 7195 // converting to double-precision are replaced by a bit that won't 7196 // be lost at this stage, but is below the single-precision rounding 7197 // position. 7198 // 7199 // However, if -enable-unsafe-fp-math is in effect, accept double 7200 // rounding to avoid the extra overhead. 7201 if (Op.getValueType() == MVT::f32 && 7202 !Subtarget.hasFPCVT() && 7203 !DAG.getTarget().Options.UnsafeFPMath) { 7204 7205 // Twiddle input to make sure the low 11 bits are zero. (If this 7206 // is the case, we are guaranteed the value will fit into the 53 bit 7207 // mantissa of an IEEE double-precision value without rounding.) 7208 // If any of those low 11 bits were not zero originally, make sure 7209 // bit 12 (value 2048) is set instead, so that the final rounding 7210 // to single-precision gets the correct result. 7211 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7212 SINT, DAG.getConstant(2047, dl, MVT::i64)); 7213 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 7214 Round, DAG.getConstant(2047, dl, MVT::i64)); 7215 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 7216 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7217 Round, DAG.getConstant(-2048, dl, MVT::i64)); 7218 7219 // However, we cannot use that value unconditionally: if the magnitude 7220 // of the input value is small, the bit-twiddling we did above might 7221 // end up visibly changing the output. Fortunately, in that case, we 7222 // don't need to twiddle bits since the original input will convert 7223 // exactly to double-precision floating-point already. Therefore, 7224 // construct a conditional to use the original value if the top 11 7225 // bits are all sign-bit copies, and use the rounded value computed 7226 // above otherwise. 7227 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 7228 SINT, DAG.getConstant(53, dl, MVT::i32)); 7229 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 7230 Cond, DAG.getConstant(1, dl, MVT::i64)); 7231 Cond = DAG.getSetCC(dl, MVT::i32, 7232 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 7233 7234 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 7235 } 7236 7237 ReuseLoadInfo RLI; 7238 SDValue Bits; 7239 7240 MachineFunction &MF = DAG.getMachineFunction(); 7241 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 7242 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7243 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7244 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7245 } else if (Subtarget.hasLFIWAX() && 7246 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 7247 MachineMemOperand *MMO = 7248 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7249 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7250 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7251 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 7252 DAG.getVTList(MVT::f64, MVT::Other), 7253 Ops, MVT::i32, MMO); 7254 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7255 } else if (Subtarget.hasFPCVT() && 7256 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 7257 MachineMemOperand *MMO = 7258 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7259 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7260 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7261 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 7262 DAG.getVTList(MVT::f64, MVT::Other), 7263 Ops, MVT::i32, MMO); 7264 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7265 } else if (((Subtarget.hasLFIWAX() && 7266 SINT.getOpcode() == ISD::SIGN_EXTEND) || 7267 (Subtarget.hasFPCVT() && 7268 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 7269 SINT.getOperand(0).getValueType() == MVT::i32) { 7270 MachineFrameInfo &MFI = MF.getFrameInfo(); 7271 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7272 7273 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7274 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7275 7276 SDValue Store = 7277 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 7278 MachinePointerInfo::getFixedStack( 7279 DAG.getMachineFunction(), FrameIdx)); 7280 7281 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7282 "Expected an i32 store"); 7283 7284 RLI.Ptr = FIdx; 7285 RLI.Chain = Store; 7286 RLI.MPI = 7287 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7288 RLI.Alignment = 4; 7289 7290 MachineMemOperand *MMO = 7291 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7292 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7293 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7294 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 7295 PPCISD::LFIWZX : PPCISD::LFIWAX, 7296 dl, DAG.getVTList(MVT::f64, MVT::Other), 7297 Ops, MVT::i32, MMO); 7298 } else 7299 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 7300 7301 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 7302 7303 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7304 FP = DAG.getNode(ISD::FP_ROUND, dl, 7305 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 7306 return FP; 7307 } 7308 7309 assert(Op.getOperand(0).getValueType() == MVT::i32 && 7310 "Unhandled INT_TO_FP type in custom expander!"); 7311 // Since we only generate this in 64-bit mode, we can take advantage of 7312 // 64-bit registers. In particular, sign extend the input value into the 7313 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 7314 // then lfd it and fcfid it. 7315 MachineFunction &MF = DAG.getMachineFunction(); 7316 MachineFrameInfo &MFI = MF.getFrameInfo(); 7317 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7318 7319 SDValue Ld; 7320 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 7321 ReuseLoadInfo RLI; 7322 bool ReusingLoad; 7323 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 7324 DAG))) { 7325 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7326 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7327 7328 SDValue Store = 7329 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7330 MachinePointerInfo::getFixedStack( 7331 DAG.getMachineFunction(), FrameIdx)); 7332 7333 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7334 "Expected an i32 store"); 7335 7336 RLI.Ptr = FIdx; 7337 RLI.Chain = Store; 7338 RLI.MPI = 7339 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7340 RLI.Alignment = 4; 7341 } 7342 7343 MachineMemOperand *MMO = 7344 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7345 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7346 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7347 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 7348 PPCISD::LFIWZX : PPCISD::LFIWAX, 7349 dl, DAG.getVTList(MVT::f64, MVT::Other), 7350 Ops, MVT::i32, MMO); 7351 if (ReusingLoad) 7352 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 7353 } else { 7354 assert(Subtarget.isPPC64() && 7355 "i32->FP without LFIWAX supported only on PPC64"); 7356 7357 int FrameIdx = MFI.CreateStackObject(8, 8, false); 7358 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7359 7360 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 7361 Op.getOperand(0)); 7362 7363 // STD the extended value into the stack slot. 7364 SDValue Store = DAG.getStore( 7365 DAG.getEntryNode(), dl, Ext64, FIdx, 7366 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7367 7368 // Load the value as a double. 7369 Ld = DAG.getLoad( 7370 MVT::f64, dl, Store, FIdx, 7371 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7372 } 7373 7374 // FCFID it and return it. 7375 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 7376 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7377 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 7378 DAG.getIntPtrConstant(0, dl)); 7379 return FP; 7380 } 7381 7382 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 7383 SelectionDAG &DAG) const { 7384 SDLoc dl(Op); 7385 /* 7386 The rounding mode is in bits 30:31 of FPSR, and has the following 7387 settings: 7388 00 Round to nearest 7389 01 Round to 0 7390 10 Round to +inf 7391 11 Round to -inf 7392 7393 FLT_ROUNDS, on the other hand, expects the following: 7394 -1 Undefined 7395 0 Round to 0 7396 1 Round to nearest 7397 2 Round to +inf 7398 3 Round to -inf 7399 7400 To perform the conversion, we do: 7401 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7402 */ 7403 7404 MachineFunction &MF = DAG.getMachineFunction(); 7405 EVT VT = Op.getValueType(); 7406 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7407 7408 // Save FP Control Word to register 7409 EVT NodeTys[] = { 7410 MVT::f64, // return register 7411 MVT::Glue // unused in this context 7412 }; 7413 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7414 7415 // Save FP register to stack slot 7416 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7417 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7418 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7419 MachinePointerInfo()); 7420 7421 // Load FP Control Word from low 32 bits of stack slot. 7422 SDValue Four = DAG.getConstant(4, dl, PtrVT); 7423 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 7424 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 7425 7426 // Transform as necessary 7427 SDValue CWD1 = 7428 DAG.getNode(ISD::AND, dl, MVT::i32, 7429 CWD, DAG.getConstant(3, dl, MVT::i32)); 7430 SDValue CWD2 = 7431 DAG.getNode(ISD::SRL, dl, MVT::i32, 7432 DAG.getNode(ISD::AND, dl, MVT::i32, 7433 DAG.getNode(ISD::XOR, dl, MVT::i32, 7434 CWD, DAG.getConstant(3, dl, MVT::i32)), 7435 DAG.getConstant(3, dl, MVT::i32)), 7436 DAG.getConstant(1, dl, MVT::i32)); 7437 7438 SDValue RetVal = 7439 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 7440 7441 return DAG.getNode((VT.getSizeInBits() < 16 ? 7442 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 7443 } 7444 7445 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7446 EVT VT = Op.getValueType(); 7447 unsigned BitWidth = VT.getSizeInBits(); 7448 SDLoc dl(Op); 7449 assert(Op.getNumOperands() == 3 && 7450 VT == Op.getOperand(1).getValueType() && 7451 "Unexpected SHL!"); 7452 7453 // Expand into a bunch of logical ops. Note that these ops 7454 // depend on the PPC behavior for oversized shift amounts. 7455 SDValue Lo = Op.getOperand(0); 7456 SDValue Hi = Op.getOperand(1); 7457 SDValue Amt = Op.getOperand(2); 7458 EVT AmtVT = Amt.getValueType(); 7459 7460 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7461 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7462 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 7463 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 7464 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 7465 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7466 DAG.getConstant(-BitWidth, dl, AmtVT)); 7467 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 7468 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7469 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 7470 SDValue OutOps[] = { OutLo, OutHi }; 7471 return DAG.getMergeValues(OutOps, dl); 7472 } 7473 7474 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7475 EVT VT = Op.getValueType(); 7476 SDLoc dl(Op); 7477 unsigned BitWidth = VT.getSizeInBits(); 7478 assert(Op.getNumOperands() == 3 && 7479 VT == Op.getOperand(1).getValueType() && 7480 "Unexpected SRL!"); 7481 7482 // Expand into a bunch of logical ops. Note that these ops 7483 // depend on the PPC behavior for oversized shift amounts. 7484 SDValue Lo = Op.getOperand(0); 7485 SDValue Hi = Op.getOperand(1); 7486 SDValue Amt = Op.getOperand(2); 7487 EVT AmtVT = Amt.getValueType(); 7488 7489 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7490 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7491 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7492 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7493 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7494 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7495 DAG.getConstant(-BitWidth, dl, AmtVT)); 7496 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 7497 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7498 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 7499 SDValue OutOps[] = { OutLo, OutHi }; 7500 return DAG.getMergeValues(OutOps, dl); 7501 } 7502 7503 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 7504 SDLoc dl(Op); 7505 EVT VT = Op.getValueType(); 7506 unsigned BitWidth = VT.getSizeInBits(); 7507 assert(Op.getNumOperands() == 3 && 7508 VT == Op.getOperand(1).getValueType() && 7509 "Unexpected SRA!"); 7510 7511 // Expand into a bunch of logical ops, followed by a select_cc. 7512 SDValue Lo = Op.getOperand(0); 7513 SDValue Hi = Op.getOperand(1); 7514 SDValue Amt = Op.getOperand(2); 7515 EVT AmtVT = Amt.getValueType(); 7516 7517 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7518 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7519 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7520 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7521 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7522 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7523 DAG.getConstant(-BitWidth, dl, AmtVT)); 7524 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 7525 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 7526 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 7527 Tmp4, Tmp6, ISD::SETLE); 7528 SDValue OutOps[] = { OutLo, OutHi }; 7529 return DAG.getMergeValues(OutOps, dl); 7530 } 7531 7532 //===----------------------------------------------------------------------===// 7533 // Vector related lowering. 7534 // 7535 7536 /// BuildSplatI - Build a canonical splati of Val with an element size of 7537 /// SplatSize. Cast the result to VT. 7538 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7539 SelectionDAG &DAG, const SDLoc &dl) { 7540 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7541 7542 static const MVT VTys[] = { // canonical VT to use for each size. 7543 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7544 }; 7545 7546 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7547 7548 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7549 if (Val == -1) 7550 SplatSize = 1; 7551 7552 EVT CanonicalVT = VTys[SplatSize-1]; 7553 7554 // Build a canonical splat for this value. 7555 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7556 } 7557 7558 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7559 /// specified intrinsic ID. 7560 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7561 const SDLoc &dl, EVT DestVT = MVT::Other) { 7562 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7563 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7564 DAG.getConstant(IID, dl, MVT::i32), Op); 7565 } 7566 7567 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7568 /// specified intrinsic ID. 7569 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7570 SelectionDAG &DAG, const SDLoc &dl, 7571 EVT DestVT = MVT::Other) { 7572 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7573 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7574 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7575 } 7576 7577 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7578 /// specified intrinsic ID. 7579 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7580 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7581 EVT DestVT = MVT::Other) { 7582 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7583 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7584 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7585 } 7586 7587 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7588 /// amount. The result has the specified value type. 7589 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7590 SelectionDAG &DAG, const SDLoc &dl) { 7591 // Force LHS/RHS to be the right type. 7592 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7593 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7594 7595 int Ops[16]; 7596 for (unsigned i = 0; i != 16; ++i) 7597 Ops[i] = i + Amt; 7598 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7599 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7600 } 7601 7602 /// Do we have an efficient pattern in a .td file for this node? 7603 /// 7604 /// \param V - pointer to the BuildVectorSDNode being matched 7605 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 7606 /// 7607 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 7608 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 7609 /// the opposite is true (expansion is beneficial) are: 7610 /// - The node builds a vector out of integers that are not 32 or 64-bits 7611 /// - The node builds a vector out of constants 7612 /// - The node is a "load-and-splat" 7613 /// In all other cases, we will choose to keep the BUILD_VECTOR. 7614 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 7615 bool HasDirectMove, 7616 bool HasP8Vector) { 7617 EVT VecVT = V->getValueType(0); 7618 bool RightType = VecVT == MVT::v2f64 || 7619 (HasP8Vector && VecVT == MVT::v4f32) || 7620 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 7621 if (!RightType) 7622 return false; 7623 7624 bool IsSplat = true; 7625 bool IsLoad = false; 7626 SDValue Op0 = V->getOperand(0); 7627 7628 // This function is called in a block that confirms the node is not a constant 7629 // splat. So a constant BUILD_VECTOR here means the vector is built out of 7630 // different constants. 7631 if (V->isConstant()) 7632 return false; 7633 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 7634 if (V->getOperand(i).isUndef()) 7635 return false; 7636 // We want to expand nodes that represent load-and-splat even if the 7637 // loaded value is a floating point truncation or conversion to int. 7638 if (V->getOperand(i).getOpcode() == ISD::LOAD || 7639 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 7640 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7641 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 7642 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7643 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 7644 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 7645 IsLoad = true; 7646 // If the operands are different or the input is not a load and has more 7647 // uses than just this BV node, then it isn't a splat. 7648 if (V->getOperand(i) != Op0 || 7649 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 7650 IsSplat = false; 7651 } 7652 return !(IsSplat && IsLoad); 7653 } 7654 7655 // If this is a case we can't handle, return null and let the default 7656 // expansion code take care of it. If we CAN select this case, and if it 7657 // selects to a single instruction, return Op. Otherwise, if we can codegen 7658 // this case more efficiently than a constant pool load, lower it to the 7659 // sequence of ops that should be used. 7660 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7661 SelectionDAG &DAG) const { 7662 SDLoc dl(Op); 7663 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7664 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7665 7666 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7667 // We first build an i32 vector, load it into a QPX register, 7668 // then convert it to a floating-point vector and compare it 7669 // to a zero vector to get the boolean result. 7670 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7671 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7672 MachinePointerInfo PtrInfo = 7673 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7674 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7675 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7676 7677 assert(BVN->getNumOperands() == 4 && 7678 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7679 7680 bool IsConst = true; 7681 for (unsigned i = 0; i < 4; ++i) { 7682 if (BVN->getOperand(i).isUndef()) continue; 7683 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7684 IsConst = false; 7685 break; 7686 } 7687 } 7688 7689 if (IsConst) { 7690 Constant *One = 7691 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7692 Constant *NegOne = 7693 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7694 7695 Constant *CV[4]; 7696 for (unsigned i = 0; i < 4; ++i) { 7697 if (BVN->getOperand(i).isUndef()) 7698 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7699 else if (isNullConstant(BVN->getOperand(i))) 7700 CV[i] = NegOne; 7701 else 7702 CV[i] = One; 7703 } 7704 7705 Constant *CP = ConstantVector::get(CV); 7706 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7707 16 /* alignment */); 7708 7709 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7710 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7711 return DAG.getMemIntrinsicNode( 7712 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7713 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7714 } 7715 7716 SmallVector<SDValue, 4> Stores; 7717 for (unsigned i = 0; i < 4; ++i) { 7718 if (BVN->getOperand(i).isUndef()) continue; 7719 7720 unsigned Offset = 4*i; 7721 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7722 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7723 7724 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7725 if (StoreSize > 4) { 7726 Stores.push_back( 7727 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7728 PtrInfo.getWithOffset(Offset), MVT::i32)); 7729 } else { 7730 SDValue StoreValue = BVN->getOperand(i); 7731 if (StoreSize < 4) 7732 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7733 7734 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7735 PtrInfo.getWithOffset(Offset))); 7736 } 7737 } 7738 7739 SDValue StoreChain; 7740 if (!Stores.empty()) 7741 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7742 else 7743 StoreChain = DAG.getEntryNode(); 7744 7745 // Now load from v4i32 into the QPX register; this will extend it to 7746 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7747 // is typed as v4f64 because the QPX register integer states are not 7748 // explicitly represented. 7749 7750 SDValue Ops[] = {StoreChain, 7751 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7752 FIdx}; 7753 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7754 7755 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7756 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7757 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7758 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7759 LoadedVect); 7760 7761 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7762 7763 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7764 } 7765 7766 // All other QPX vectors are handled by generic code. 7767 if (Subtarget.hasQPX()) 7768 return SDValue(); 7769 7770 // Check if this is a splat of a constant value. 7771 APInt APSplatBits, APSplatUndef; 7772 unsigned SplatBitSize; 7773 bool HasAnyUndefs; 7774 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7775 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7776 SplatBitSize > 32) { 7777 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 7778 // lowered to VSX instructions under certain conditions. 7779 // Without VSX, there is no pattern more efficient than expanding the node. 7780 if (Subtarget.hasVSX() && 7781 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 7782 Subtarget.hasP8Vector())) 7783 return Op; 7784 return SDValue(); 7785 } 7786 7787 unsigned SplatBits = APSplatBits.getZExtValue(); 7788 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7789 unsigned SplatSize = SplatBitSize / 8; 7790 7791 // First, handle single instruction cases. 7792 7793 // All zeros? 7794 if (SplatBits == 0) { 7795 // Canonicalize all zero vectors to be v4i32. 7796 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7797 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7798 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7799 } 7800 return Op; 7801 } 7802 7803 // We have XXSPLTIB for constant splats one byte wide 7804 if (Subtarget.hasP9Vector() && SplatSize == 1) { 7805 // This is a splat of 1-byte elements with some elements potentially undef. 7806 // Rather than trying to match undef in the SDAG patterns, ensure that all 7807 // elements are the same constant. 7808 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 7809 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 7810 dl, MVT::i32)); 7811 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 7812 if (Op.getValueType() != MVT::v16i8) 7813 return DAG.getBitcast(Op.getValueType(), NewBV); 7814 return NewBV; 7815 } 7816 7817 // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll 7818 // detect that constant splats like v8i16: 0xABAB are really just splats 7819 // of a 1-byte constant. In this case, we need to convert the node to a 7820 // splat of v16i8 and a bitcast. 7821 if (Op.getValueType() != MVT::v16i8) 7822 return DAG.getBitcast(Op.getValueType(), 7823 DAG.getConstant(SplatBits, dl, MVT::v16i8)); 7824 7825 return Op; 7826 } 7827 7828 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7829 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7830 (32-SplatBitSize)); 7831 if (SextVal >= -16 && SextVal <= 15) 7832 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7833 7834 // Two instruction sequences. 7835 7836 // If this value is in the range [-32,30] and is even, use: 7837 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7838 // If this value is in the range [17,31] and is odd, use: 7839 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7840 // If this value is in the range [-31,-17] and is odd, use: 7841 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7842 // Note the last two are three-instruction sequences. 7843 if (SextVal >= -32 && SextVal <= 31) { 7844 // To avoid having these optimizations undone by constant folding, 7845 // we convert to a pseudo that will be expanded later into one of 7846 // the above forms. 7847 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7848 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7849 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7850 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7851 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7852 if (VT == Op.getValueType()) 7853 return RetVal; 7854 else 7855 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7856 } 7857 7858 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7859 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7860 // for fneg/fabs. 7861 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7862 // Make -1 and vspltisw -1: 7863 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7864 7865 // Make the VSLW intrinsic, computing 0x8000_0000. 7866 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7867 OnesV, DAG, dl); 7868 7869 // xor by OnesV to invert it. 7870 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7871 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7872 } 7873 7874 // Check to see if this is a wide variety of vsplti*, binop self cases. 7875 static const signed char SplatCsts[] = { 7876 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7877 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7878 }; 7879 7880 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7881 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7882 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7883 int i = SplatCsts[idx]; 7884 7885 // Figure out what shift amount will be used by altivec if shifted by i in 7886 // this splat size. 7887 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7888 7889 // vsplti + shl self. 7890 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7891 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7892 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7893 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7894 Intrinsic::ppc_altivec_vslw 7895 }; 7896 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7897 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7898 } 7899 7900 // vsplti + srl self. 7901 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7902 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7903 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7904 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7905 Intrinsic::ppc_altivec_vsrw 7906 }; 7907 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7908 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7909 } 7910 7911 // vsplti + sra self. 7912 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7913 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7914 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7915 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7916 Intrinsic::ppc_altivec_vsraw 7917 }; 7918 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7919 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7920 } 7921 7922 // vsplti + rol self. 7923 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7924 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7925 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7926 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7927 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7928 Intrinsic::ppc_altivec_vrlw 7929 }; 7930 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7931 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7932 } 7933 7934 // t = vsplti c, result = vsldoi t, t, 1 7935 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7936 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7937 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7938 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7939 } 7940 // t = vsplti c, result = vsldoi t, t, 2 7941 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7942 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7943 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7944 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7945 } 7946 // t = vsplti c, result = vsldoi t, t, 3 7947 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7948 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7949 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7950 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7951 } 7952 } 7953 7954 return SDValue(); 7955 } 7956 7957 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7958 /// the specified operations to build the shuffle. 7959 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7960 SDValue RHS, SelectionDAG &DAG, 7961 const SDLoc &dl) { 7962 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7963 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7964 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7965 7966 enum { 7967 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7968 OP_VMRGHW, 7969 OP_VMRGLW, 7970 OP_VSPLTISW0, 7971 OP_VSPLTISW1, 7972 OP_VSPLTISW2, 7973 OP_VSPLTISW3, 7974 OP_VSLDOI4, 7975 OP_VSLDOI8, 7976 OP_VSLDOI12 7977 }; 7978 7979 if (OpNum == OP_COPY) { 7980 if (LHSID == (1*9+2)*9+3) return LHS; 7981 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7982 return RHS; 7983 } 7984 7985 SDValue OpLHS, OpRHS; 7986 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7987 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7988 7989 int ShufIdxs[16]; 7990 switch (OpNum) { 7991 default: llvm_unreachable("Unknown i32 permute!"); 7992 case OP_VMRGHW: 7993 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7994 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7995 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7996 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7997 break; 7998 case OP_VMRGLW: 7999 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 8000 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 8001 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 8002 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 8003 break; 8004 case OP_VSPLTISW0: 8005 for (unsigned i = 0; i != 16; ++i) 8006 ShufIdxs[i] = (i&3)+0; 8007 break; 8008 case OP_VSPLTISW1: 8009 for (unsigned i = 0; i != 16; ++i) 8010 ShufIdxs[i] = (i&3)+4; 8011 break; 8012 case OP_VSPLTISW2: 8013 for (unsigned i = 0; i != 16; ++i) 8014 ShufIdxs[i] = (i&3)+8; 8015 break; 8016 case OP_VSPLTISW3: 8017 for (unsigned i = 0; i != 16; ++i) 8018 ShufIdxs[i] = (i&3)+12; 8019 break; 8020 case OP_VSLDOI4: 8021 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 8022 case OP_VSLDOI8: 8023 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 8024 case OP_VSLDOI12: 8025 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 8026 } 8027 EVT VT = OpLHS.getValueType(); 8028 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 8029 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 8030 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 8031 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8032 } 8033 8034 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 8035 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 8036 /// SDValue. 8037 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 8038 SelectionDAG &DAG) const { 8039 const unsigned BytesInVector = 16; 8040 bool IsLE = Subtarget.isLittleEndian(); 8041 SDLoc dl(N); 8042 SDValue V1 = N->getOperand(0); 8043 SDValue V2 = N->getOperand(1); 8044 unsigned ShiftElts = 0, InsertAtByte = 0; 8045 bool Swap = false; 8046 8047 // Shifts required to get the byte we want at element 7. 8048 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 8049 0, 15, 14, 13, 12, 11, 10, 9}; 8050 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 8051 1, 2, 3, 4, 5, 6, 7, 8}; 8052 8053 ArrayRef<int> Mask = N->getMask(); 8054 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 8055 8056 // For each mask element, find out if we're just inserting something 8057 // from V2 into V1 or vice versa. 8058 // Possible permutations inserting an element from V2 into V1: 8059 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8060 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 8061 // ... 8062 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 8063 // Inserting from V1 into V2 will be similar, except mask range will be 8064 // [16,31]. 8065 8066 bool FoundCandidate = false; 8067 // If both vector operands for the shuffle are the same vector, the mask 8068 // will contain only elements from the first one and the second one will be 8069 // undef. 8070 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 8071 // Go through the mask of half-words to find an element that's being moved 8072 // from one vector to the other. 8073 for (unsigned i = 0; i < BytesInVector; ++i) { 8074 unsigned CurrentElement = Mask[i]; 8075 // If 2nd operand is undefined, we should only look for element 7 in the 8076 // Mask. 8077 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 8078 continue; 8079 8080 bool OtherElementsInOrder = true; 8081 // Examine the other elements in the Mask to see if they're in original 8082 // order. 8083 for (unsigned j = 0; j < BytesInVector; ++j) { 8084 if (j == i) 8085 continue; 8086 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 8087 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 8088 // in which we always assume we're always picking from the 1st operand. 8089 int MaskOffset = 8090 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 8091 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 8092 OtherElementsInOrder = false; 8093 break; 8094 } 8095 } 8096 // If other elements are in original order, we record the number of shifts 8097 // we need to get the element we want into element 7. Also record which byte 8098 // in the vector we should insert into. 8099 if (OtherElementsInOrder) { 8100 // If 2nd operand is undefined, we assume no shifts and no swapping. 8101 if (V2.isUndef()) { 8102 ShiftElts = 0; 8103 Swap = false; 8104 } else { 8105 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 8106 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 8107 : BigEndianShifts[CurrentElement & 0xF]; 8108 Swap = CurrentElement < BytesInVector; 8109 } 8110 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 8111 FoundCandidate = true; 8112 break; 8113 } 8114 } 8115 8116 if (!FoundCandidate) 8117 return SDValue(); 8118 8119 // Candidate found, construct the proper SDAG sequence with VINSERTB, 8120 // optionally with VECSHL if shift is required. 8121 if (Swap) 8122 std::swap(V1, V2); 8123 if (V2.isUndef()) 8124 V2 = V1; 8125 if (ShiftElts) { 8126 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8127 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8128 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 8129 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8130 } 8131 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 8132 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8133 } 8134 8135 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 8136 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 8137 /// SDValue. 8138 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 8139 SelectionDAG &DAG) const { 8140 const unsigned NumHalfWords = 8; 8141 const unsigned BytesInVector = NumHalfWords * 2; 8142 // Check that the shuffle is on half-words. 8143 if (!isNByteElemShuffleMask(N, 2, 1)) 8144 return SDValue(); 8145 8146 bool IsLE = Subtarget.isLittleEndian(); 8147 SDLoc dl(N); 8148 SDValue V1 = N->getOperand(0); 8149 SDValue V2 = N->getOperand(1); 8150 unsigned ShiftElts = 0, InsertAtByte = 0; 8151 bool Swap = false; 8152 8153 // Shifts required to get the half-word we want at element 3. 8154 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 8155 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 8156 8157 uint32_t Mask = 0; 8158 uint32_t OriginalOrderLow = 0x1234567; 8159 uint32_t OriginalOrderHigh = 0x89ABCDEF; 8160 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 8161 // 32-bit space, only need 4-bit nibbles per element. 8162 for (unsigned i = 0; i < NumHalfWords; ++i) { 8163 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8164 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 8165 } 8166 8167 // For each mask element, find out if we're just inserting something 8168 // from V2 into V1 or vice versa. Possible permutations inserting an element 8169 // from V2 into V1: 8170 // X, 1, 2, 3, 4, 5, 6, 7 8171 // 0, X, 2, 3, 4, 5, 6, 7 8172 // 0, 1, X, 3, 4, 5, 6, 7 8173 // 0, 1, 2, X, 4, 5, 6, 7 8174 // 0, 1, 2, 3, X, 5, 6, 7 8175 // 0, 1, 2, 3, 4, X, 6, 7 8176 // 0, 1, 2, 3, 4, 5, X, 7 8177 // 0, 1, 2, 3, 4, 5, 6, X 8178 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 8179 8180 bool FoundCandidate = false; 8181 // Go through the mask of half-words to find an element that's being moved 8182 // from one vector to the other. 8183 for (unsigned i = 0; i < NumHalfWords; ++i) { 8184 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8185 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 8186 uint32_t MaskOtherElts = ~(0xF << MaskShift); 8187 uint32_t TargetOrder = 0x0; 8188 8189 // If both vector operands for the shuffle are the same vector, the mask 8190 // will contain only elements from the first one and the second one will be 8191 // undef. 8192 if (V2.isUndef()) { 8193 ShiftElts = 0; 8194 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 8195 TargetOrder = OriginalOrderLow; 8196 Swap = false; 8197 // Skip if not the correct element or mask of other elements don't equal 8198 // to our expected order. 8199 if (MaskOneElt == VINSERTHSrcElem && 8200 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8201 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8202 FoundCandidate = true; 8203 break; 8204 } 8205 } else { // If both operands are defined. 8206 // Target order is [8,15] if the current mask is between [0,7]. 8207 TargetOrder = 8208 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 8209 // Skip if mask of other elements don't equal our expected order. 8210 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8211 // We only need the last 3 bits for the number of shifts. 8212 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 8213 : BigEndianShifts[MaskOneElt & 0x7]; 8214 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8215 Swap = MaskOneElt < NumHalfWords; 8216 FoundCandidate = true; 8217 break; 8218 } 8219 } 8220 } 8221 8222 if (!FoundCandidate) 8223 return SDValue(); 8224 8225 // Candidate found, construct the proper SDAG sequence with VINSERTH, 8226 // optionally with VECSHL if shift is required. 8227 if (Swap) 8228 std::swap(V1, V2); 8229 if (V2.isUndef()) 8230 V2 = V1; 8231 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8232 if (ShiftElts) { 8233 // Double ShiftElts because we're left shifting on v16i8 type. 8234 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8235 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 8236 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 8237 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8238 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8239 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8240 } 8241 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 8242 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8243 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8244 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8245 } 8246 8247 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 8248 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 8249 /// return the code it can be lowered into. Worst case, it can always be 8250 /// lowered into a vperm. 8251 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 8252 SelectionDAG &DAG) const { 8253 SDLoc dl(Op); 8254 SDValue V1 = Op.getOperand(0); 8255 SDValue V2 = Op.getOperand(1); 8256 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 8257 EVT VT = Op.getValueType(); 8258 bool isLittleEndian = Subtarget.isLittleEndian(); 8259 8260 unsigned ShiftElts, InsertAtByte; 8261 bool Swap = false; 8262 if (Subtarget.hasP9Vector() && 8263 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 8264 isLittleEndian)) { 8265 if (Swap) 8266 std::swap(V1, V2); 8267 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8268 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 8269 if (ShiftElts) { 8270 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 8271 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8272 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 8273 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8274 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8275 } 8276 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 8277 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8278 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8279 } 8280 8281 if (Subtarget.hasP9Altivec()) { 8282 SDValue NewISDNode; 8283 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 8284 return NewISDNode; 8285 8286 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 8287 return NewISDNode; 8288 } 8289 8290 if (Subtarget.hasVSX() && 8291 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8292 if (Swap) 8293 std::swap(V1, V2); 8294 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8295 SDValue Conv2 = 8296 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 8297 8298 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 8299 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8300 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 8301 } 8302 8303 if (Subtarget.hasVSX() && 8304 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8305 if (Swap) 8306 std::swap(V1, V2); 8307 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8308 SDValue Conv2 = 8309 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 8310 8311 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 8312 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8313 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 8314 } 8315 8316 if (Subtarget.hasP9Vector()) { 8317 if (PPC::isXXBRHShuffleMask(SVOp)) { 8318 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8319 SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv); 8320 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 8321 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 8322 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8323 SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv); 8324 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 8325 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 8326 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8327 SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv); 8328 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 8329 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 8330 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 8331 SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv); 8332 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 8333 } 8334 } 8335 8336 if (Subtarget.hasVSX()) { 8337 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 8338 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 8339 8340 // If the source for the shuffle is a scalar_to_vector that came from a 8341 // 32-bit load, it will have used LXVWSX so we don't need to splat again. 8342 if (Subtarget.hasP9Vector() && 8343 ((isLittleEndian && SplatIdx == 3) || 8344 (!isLittleEndian && SplatIdx == 0))) { 8345 SDValue Src = V1.getOperand(0); 8346 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR && 8347 Src.getOperand(0).getOpcode() == ISD::LOAD && 8348 Src.getOperand(0).hasOneUse()) 8349 return V1; 8350 } 8351 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8352 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 8353 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8354 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 8355 } 8356 8357 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 8358 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 8359 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 8360 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 8361 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 8362 } 8363 } 8364 8365 if (Subtarget.hasQPX()) { 8366 if (VT.getVectorNumElements() != 4) 8367 return SDValue(); 8368 8369 if (V2.isUndef()) V2 = V1; 8370 8371 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 8372 if (AlignIdx != -1) { 8373 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 8374 DAG.getConstant(AlignIdx, dl, MVT::i32)); 8375 } else if (SVOp->isSplat()) { 8376 int SplatIdx = SVOp->getSplatIndex(); 8377 if (SplatIdx >= 4) { 8378 std::swap(V1, V2); 8379 SplatIdx -= 4; 8380 } 8381 8382 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 8383 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8384 } 8385 8386 // Lower this into a qvgpci/qvfperm pair. 8387 8388 // Compute the qvgpci literal 8389 unsigned idx = 0; 8390 for (unsigned i = 0; i < 4; ++i) { 8391 int m = SVOp->getMaskElt(i); 8392 unsigned mm = m >= 0 ? (unsigned) m : i; 8393 idx |= mm << (3-i)*3; 8394 } 8395 8396 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 8397 DAG.getConstant(idx, dl, MVT::i32)); 8398 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 8399 } 8400 8401 // Cases that are handled by instructions that take permute immediates 8402 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 8403 // selected by the instruction selector. 8404 if (V2.isUndef()) { 8405 if (PPC::isSplatShuffleMask(SVOp, 1) || 8406 PPC::isSplatShuffleMask(SVOp, 2) || 8407 PPC::isSplatShuffleMask(SVOp, 4) || 8408 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 8409 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 8410 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 8411 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 8412 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 8413 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 8414 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 8415 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 8416 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 8417 (Subtarget.hasP8Altivec() && ( 8418 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 8419 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 8420 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 8421 return Op; 8422 } 8423 } 8424 8425 // Altivec has a variety of "shuffle immediates" that take two vector inputs 8426 // and produce a fixed permutation. If any of these match, do not lower to 8427 // VPERM. 8428 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 8429 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 8430 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 8431 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 8432 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8433 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8434 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8435 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8436 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8437 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8438 (Subtarget.hasP8Altivec() && ( 8439 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 8440 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 8441 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 8442 return Op; 8443 8444 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 8445 // perfect shuffle table to emit an optimal matching sequence. 8446 ArrayRef<int> PermMask = SVOp->getMask(); 8447 8448 unsigned PFIndexes[4]; 8449 bool isFourElementShuffle = true; 8450 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 8451 unsigned EltNo = 8; // Start out undef. 8452 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 8453 if (PermMask[i*4+j] < 0) 8454 continue; // Undef, ignore it. 8455 8456 unsigned ByteSource = PermMask[i*4+j]; 8457 if ((ByteSource & 3) != j) { 8458 isFourElementShuffle = false; 8459 break; 8460 } 8461 8462 if (EltNo == 8) { 8463 EltNo = ByteSource/4; 8464 } else if (EltNo != ByteSource/4) { 8465 isFourElementShuffle = false; 8466 break; 8467 } 8468 } 8469 PFIndexes[i] = EltNo; 8470 } 8471 8472 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 8473 // perfect shuffle vector to determine if it is cost effective to do this as 8474 // discrete instructions, or whether we should use a vperm. 8475 // For now, we skip this for little endian until such time as we have a 8476 // little-endian perfect shuffle table. 8477 if (isFourElementShuffle && !isLittleEndian) { 8478 // Compute the index in the perfect shuffle table. 8479 unsigned PFTableIndex = 8480 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 8481 8482 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 8483 unsigned Cost = (PFEntry >> 30); 8484 8485 // Determining when to avoid vperm is tricky. Many things affect the cost 8486 // of vperm, particularly how many times the perm mask needs to be computed. 8487 // For example, if the perm mask can be hoisted out of a loop or is already 8488 // used (perhaps because there are multiple permutes with the same shuffle 8489 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 8490 // the loop requires an extra register. 8491 // 8492 // As a compromise, we only emit discrete instructions if the shuffle can be 8493 // generated in 3 or fewer operations. When we have loop information 8494 // available, if this block is within a loop, we should avoid using vperm 8495 // for 3-operation perms and use a constant pool load instead. 8496 if (Cost < 3) 8497 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8498 } 8499 8500 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 8501 // vector that will get spilled to the constant pool. 8502 if (V2.isUndef()) V2 = V1; 8503 8504 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 8505 // that it is in input element units, not in bytes. Convert now. 8506 8507 // For little endian, the order of the input vectors is reversed, and 8508 // the permutation mask is complemented with respect to 31. This is 8509 // necessary to produce proper semantics with the big-endian-biased vperm 8510 // instruction. 8511 EVT EltVT = V1.getValueType().getVectorElementType(); 8512 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 8513 8514 SmallVector<SDValue, 16> ResultMask; 8515 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 8516 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 8517 8518 for (unsigned j = 0; j != BytesPerElement; ++j) 8519 if (isLittleEndian) 8520 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 8521 dl, MVT::i32)); 8522 else 8523 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 8524 MVT::i32)); 8525 } 8526 8527 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 8528 if (isLittleEndian) 8529 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8530 V2, V1, VPermMask); 8531 else 8532 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8533 V1, V2, VPermMask); 8534 } 8535 8536 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 8537 /// vector comparison. If it is, return true and fill in Opc/isDot with 8538 /// information about the intrinsic. 8539 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 8540 bool &isDot, const PPCSubtarget &Subtarget) { 8541 unsigned IntrinsicID = 8542 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 8543 CompareOpc = -1; 8544 isDot = false; 8545 switch (IntrinsicID) { 8546 default: 8547 return false; 8548 // Comparison predicates. 8549 case Intrinsic::ppc_altivec_vcmpbfp_p: 8550 CompareOpc = 966; 8551 isDot = true; 8552 break; 8553 case Intrinsic::ppc_altivec_vcmpeqfp_p: 8554 CompareOpc = 198; 8555 isDot = true; 8556 break; 8557 case Intrinsic::ppc_altivec_vcmpequb_p: 8558 CompareOpc = 6; 8559 isDot = true; 8560 break; 8561 case Intrinsic::ppc_altivec_vcmpequh_p: 8562 CompareOpc = 70; 8563 isDot = true; 8564 break; 8565 case Intrinsic::ppc_altivec_vcmpequw_p: 8566 CompareOpc = 134; 8567 isDot = true; 8568 break; 8569 case Intrinsic::ppc_altivec_vcmpequd_p: 8570 if (Subtarget.hasP8Altivec()) { 8571 CompareOpc = 199; 8572 isDot = true; 8573 } else 8574 return false; 8575 break; 8576 case Intrinsic::ppc_altivec_vcmpneb_p: 8577 case Intrinsic::ppc_altivec_vcmpneh_p: 8578 case Intrinsic::ppc_altivec_vcmpnew_p: 8579 case Intrinsic::ppc_altivec_vcmpnezb_p: 8580 case Intrinsic::ppc_altivec_vcmpnezh_p: 8581 case Intrinsic::ppc_altivec_vcmpnezw_p: 8582 if (Subtarget.hasP9Altivec()) { 8583 switch (IntrinsicID) { 8584 default: 8585 llvm_unreachable("Unknown comparison intrinsic."); 8586 case Intrinsic::ppc_altivec_vcmpneb_p: 8587 CompareOpc = 7; 8588 break; 8589 case Intrinsic::ppc_altivec_vcmpneh_p: 8590 CompareOpc = 71; 8591 break; 8592 case Intrinsic::ppc_altivec_vcmpnew_p: 8593 CompareOpc = 135; 8594 break; 8595 case Intrinsic::ppc_altivec_vcmpnezb_p: 8596 CompareOpc = 263; 8597 break; 8598 case Intrinsic::ppc_altivec_vcmpnezh_p: 8599 CompareOpc = 327; 8600 break; 8601 case Intrinsic::ppc_altivec_vcmpnezw_p: 8602 CompareOpc = 391; 8603 break; 8604 } 8605 isDot = true; 8606 } else 8607 return false; 8608 break; 8609 case Intrinsic::ppc_altivec_vcmpgefp_p: 8610 CompareOpc = 454; 8611 isDot = true; 8612 break; 8613 case Intrinsic::ppc_altivec_vcmpgtfp_p: 8614 CompareOpc = 710; 8615 isDot = true; 8616 break; 8617 case Intrinsic::ppc_altivec_vcmpgtsb_p: 8618 CompareOpc = 774; 8619 isDot = true; 8620 break; 8621 case Intrinsic::ppc_altivec_vcmpgtsh_p: 8622 CompareOpc = 838; 8623 isDot = true; 8624 break; 8625 case Intrinsic::ppc_altivec_vcmpgtsw_p: 8626 CompareOpc = 902; 8627 isDot = true; 8628 break; 8629 case Intrinsic::ppc_altivec_vcmpgtsd_p: 8630 if (Subtarget.hasP8Altivec()) { 8631 CompareOpc = 967; 8632 isDot = true; 8633 } else 8634 return false; 8635 break; 8636 case Intrinsic::ppc_altivec_vcmpgtub_p: 8637 CompareOpc = 518; 8638 isDot = true; 8639 break; 8640 case Intrinsic::ppc_altivec_vcmpgtuh_p: 8641 CompareOpc = 582; 8642 isDot = true; 8643 break; 8644 case Intrinsic::ppc_altivec_vcmpgtuw_p: 8645 CompareOpc = 646; 8646 isDot = true; 8647 break; 8648 case Intrinsic::ppc_altivec_vcmpgtud_p: 8649 if (Subtarget.hasP8Altivec()) { 8650 CompareOpc = 711; 8651 isDot = true; 8652 } else 8653 return false; 8654 break; 8655 8656 // VSX predicate comparisons use the same infrastructure 8657 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8658 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8659 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8660 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8661 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8662 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8663 if (Subtarget.hasVSX()) { 8664 switch (IntrinsicID) { 8665 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8666 CompareOpc = 99; 8667 break; 8668 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8669 CompareOpc = 115; 8670 break; 8671 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8672 CompareOpc = 107; 8673 break; 8674 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8675 CompareOpc = 67; 8676 break; 8677 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8678 CompareOpc = 83; 8679 break; 8680 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8681 CompareOpc = 75; 8682 break; 8683 } 8684 isDot = true; 8685 } else 8686 return false; 8687 break; 8688 8689 // Normal Comparisons. 8690 case Intrinsic::ppc_altivec_vcmpbfp: 8691 CompareOpc = 966; 8692 break; 8693 case Intrinsic::ppc_altivec_vcmpeqfp: 8694 CompareOpc = 198; 8695 break; 8696 case Intrinsic::ppc_altivec_vcmpequb: 8697 CompareOpc = 6; 8698 break; 8699 case Intrinsic::ppc_altivec_vcmpequh: 8700 CompareOpc = 70; 8701 break; 8702 case Intrinsic::ppc_altivec_vcmpequw: 8703 CompareOpc = 134; 8704 break; 8705 case Intrinsic::ppc_altivec_vcmpequd: 8706 if (Subtarget.hasP8Altivec()) 8707 CompareOpc = 199; 8708 else 8709 return false; 8710 break; 8711 case Intrinsic::ppc_altivec_vcmpneb: 8712 case Intrinsic::ppc_altivec_vcmpneh: 8713 case Intrinsic::ppc_altivec_vcmpnew: 8714 case Intrinsic::ppc_altivec_vcmpnezb: 8715 case Intrinsic::ppc_altivec_vcmpnezh: 8716 case Intrinsic::ppc_altivec_vcmpnezw: 8717 if (Subtarget.hasP9Altivec()) 8718 switch (IntrinsicID) { 8719 default: 8720 llvm_unreachable("Unknown comparison intrinsic."); 8721 case Intrinsic::ppc_altivec_vcmpneb: 8722 CompareOpc = 7; 8723 break; 8724 case Intrinsic::ppc_altivec_vcmpneh: 8725 CompareOpc = 71; 8726 break; 8727 case Intrinsic::ppc_altivec_vcmpnew: 8728 CompareOpc = 135; 8729 break; 8730 case Intrinsic::ppc_altivec_vcmpnezb: 8731 CompareOpc = 263; 8732 break; 8733 case Intrinsic::ppc_altivec_vcmpnezh: 8734 CompareOpc = 327; 8735 break; 8736 case Intrinsic::ppc_altivec_vcmpnezw: 8737 CompareOpc = 391; 8738 break; 8739 } 8740 else 8741 return false; 8742 break; 8743 case Intrinsic::ppc_altivec_vcmpgefp: 8744 CompareOpc = 454; 8745 break; 8746 case Intrinsic::ppc_altivec_vcmpgtfp: 8747 CompareOpc = 710; 8748 break; 8749 case Intrinsic::ppc_altivec_vcmpgtsb: 8750 CompareOpc = 774; 8751 break; 8752 case Intrinsic::ppc_altivec_vcmpgtsh: 8753 CompareOpc = 838; 8754 break; 8755 case Intrinsic::ppc_altivec_vcmpgtsw: 8756 CompareOpc = 902; 8757 break; 8758 case Intrinsic::ppc_altivec_vcmpgtsd: 8759 if (Subtarget.hasP8Altivec()) 8760 CompareOpc = 967; 8761 else 8762 return false; 8763 break; 8764 case Intrinsic::ppc_altivec_vcmpgtub: 8765 CompareOpc = 518; 8766 break; 8767 case Intrinsic::ppc_altivec_vcmpgtuh: 8768 CompareOpc = 582; 8769 break; 8770 case Intrinsic::ppc_altivec_vcmpgtuw: 8771 CompareOpc = 646; 8772 break; 8773 case Intrinsic::ppc_altivec_vcmpgtud: 8774 if (Subtarget.hasP8Altivec()) 8775 CompareOpc = 711; 8776 else 8777 return false; 8778 break; 8779 } 8780 return true; 8781 } 8782 8783 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 8784 /// lower, do it, otherwise return null. 8785 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 8786 SelectionDAG &DAG) const { 8787 unsigned IntrinsicID = 8788 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8789 8790 SDLoc dl(Op); 8791 8792 if (IntrinsicID == Intrinsic::thread_pointer) { 8793 // Reads the thread pointer register, used for __builtin_thread_pointer. 8794 if (Subtarget.isPPC64()) 8795 return DAG.getRegister(PPC::X13, MVT::i64); 8796 return DAG.getRegister(PPC::R2, MVT::i32); 8797 } 8798 8799 // We are looking for absolute values here. 8800 // The idea is to try to fit one of two patterns: 8801 // max (a, (0-a)) OR max ((0-a), a) 8802 if (Subtarget.hasP9Vector() && 8803 (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw || 8804 IntrinsicID == Intrinsic::ppc_altivec_vmaxsh || 8805 IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) { 8806 SDValue V1 = Op.getOperand(1); 8807 SDValue V2 = Op.getOperand(2); 8808 if (V1.getSimpleValueType() == V2.getSimpleValueType() && 8809 (V1.getSimpleValueType() == MVT::v4i32 || 8810 V1.getSimpleValueType() == MVT::v8i16 || 8811 V1.getSimpleValueType() == MVT::v16i8)) { 8812 if ( V1.getOpcode() == ISD::SUB && 8813 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 8814 V1.getOperand(1) == V2 ) { 8815 // Generate the abs instruction with the operands 8816 return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2); 8817 } 8818 8819 if ( V2.getOpcode() == ISD::SUB && 8820 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 8821 V2.getOperand(1) == V1 ) { 8822 // Generate the abs instruction with the operands 8823 return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1); 8824 } 8825 } 8826 } 8827 8828 // If this is a lowered altivec predicate compare, CompareOpc is set to the 8829 // opcode number of the comparison. 8830 int CompareOpc; 8831 bool isDot; 8832 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 8833 return SDValue(); // Don't custom lower most intrinsics. 8834 8835 // If this is a non-dot comparison, make the VCMP node and we are done. 8836 if (!isDot) { 8837 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 8838 Op.getOperand(1), Op.getOperand(2), 8839 DAG.getConstant(CompareOpc, dl, MVT::i32)); 8840 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 8841 } 8842 8843 // Create the PPCISD altivec 'dot' comparison node. 8844 SDValue Ops[] = { 8845 Op.getOperand(2), // LHS 8846 Op.getOperand(3), // RHS 8847 DAG.getConstant(CompareOpc, dl, MVT::i32) 8848 }; 8849 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 8850 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 8851 8852 // Now that we have the comparison, emit a copy from the CR to a GPR. 8853 // This is flagged to the above dot comparison. 8854 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 8855 DAG.getRegister(PPC::CR6, MVT::i32), 8856 CompNode.getValue(1)); 8857 8858 // Unpack the result based on how the target uses it. 8859 unsigned BitNo; // Bit # of CR6. 8860 bool InvertBit; // Invert result? 8861 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 8862 default: // Can't happen, don't crash on invalid number though. 8863 case 0: // Return the value of the EQ bit of CR6. 8864 BitNo = 0; InvertBit = false; 8865 break; 8866 case 1: // Return the inverted value of the EQ bit of CR6. 8867 BitNo = 0; InvertBit = true; 8868 break; 8869 case 2: // Return the value of the LT bit of CR6. 8870 BitNo = 2; InvertBit = false; 8871 break; 8872 case 3: // Return the inverted value of the LT bit of CR6. 8873 BitNo = 2; InvertBit = true; 8874 break; 8875 } 8876 8877 // Shift the bit into the low position. 8878 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 8879 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 8880 // Isolate the bit. 8881 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 8882 DAG.getConstant(1, dl, MVT::i32)); 8883 8884 // If we are supposed to, toggle the bit. 8885 if (InvertBit) 8886 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 8887 DAG.getConstant(1, dl, MVT::i32)); 8888 return Flags; 8889 } 8890 8891 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 8892 SelectionDAG &DAG) const { 8893 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 8894 // the beginning of the argument list. 8895 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 8896 SDLoc DL(Op); 8897 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 8898 case Intrinsic::ppc_cfence: { 8899 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 8900 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 8901 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 8902 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 8903 Op.getOperand(ArgStart + 1)), 8904 Op.getOperand(0)), 8905 0); 8906 } 8907 default: 8908 break; 8909 } 8910 return SDValue(); 8911 } 8912 8913 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 8914 // Check for a DIV with the same operands as this REM. 8915 for (auto UI : Op.getOperand(1)->uses()) { 8916 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 8917 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 8918 if (UI->getOperand(0) == Op.getOperand(0) && 8919 UI->getOperand(1) == Op.getOperand(1)) 8920 return SDValue(); 8921 } 8922 return Op; 8923 } 8924 8925 // Lower scalar BSWAP64 to xxbrd. 8926 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 8927 SDLoc dl(Op); 8928 // MTVSRDD 8929 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 8930 Op.getOperand(0)); 8931 // XXBRD 8932 Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op); 8933 // MFVSRD 8934 int VectorIndex = 0; 8935 if (Subtarget.isLittleEndian()) 8936 VectorIndex = 1; 8937 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 8938 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 8939 return Op; 8940 } 8941 8942 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 8943 // compared to a value that is atomically loaded (atomic loads zero-extend). 8944 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 8945 SelectionDAG &DAG) const { 8946 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 8947 "Expecting an atomic compare-and-swap here."); 8948 SDLoc dl(Op); 8949 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 8950 EVT MemVT = AtomicNode->getMemoryVT(); 8951 if (MemVT.getSizeInBits() >= 32) 8952 return Op; 8953 8954 SDValue CmpOp = Op.getOperand(2); 8955 // If this is already correctly zero-extended, leave it alone. 8956 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 8957 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 8958 return Op; 8959 8960 // Clear the high bits of the compare operand. 8961 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 8962 SDValue NewCmpOp = 8963 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 8964 DAG.getConstant(MaskVal, dl, MVT::i32)); 8965 8966 // Replace the existing compare operand with the properly zero-extended one. 8967 SmallVector<SDValue, 4> Ops; 8968 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 8969 Ops.push_back(AtomicNode->getOperand(i)); 8970 Ops[2] = NewCmpOp; 8971 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 8972 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 8973 auto NodeTy = 8974 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 8975 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 8976 } 8977 8978 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 8979 SelectionDAG &DAG) const { 8980 SDLoc dl(Op); 8981 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 8982 // instructions), but for smaller types, we need to first extend up to v2i32 8983 // before doing going farther. 8984 if (Op.getValueType() == MVT::v2i64) { 8985 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 8986 if (ExtVT != MVT::v2i32) { 8987 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 8988 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 8989 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 8990 ExtVT.getVectorElementType(), 4))); 8991 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 8992 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 8993 DAG.getValueType(MVT::v2i32)); 8994 } 8995 8996 return Op; 8997 } 8998 8999 return SDValue(); 9000 } 9001 9002 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 9003 SelectionDAG &DAG) const { 9004 SDLoc dl(Op); 9005 // Create a stack slot that is 16-byte aligned. 9006 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9007 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9008 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9009 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9010 9011 // Store the input value into Value#0 of the stack slot. 9012 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 9013 MachinePointerInfo()); 9014 // Load it out. 9015 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 9016 } 9017 9018 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 9019 SelectionDAG &DAG) const { 9020 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 9021 "Should only be called for ISD::INSERT_VECTOR_ELT"); 9022 9023 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 9024 // We have legal lowering for constant indices but not for variable ones. 9025 if (!C) 9026 return SDValue(); 9027 9028 EVT VT = Op.getValueType(); 9029 SDLoc dl(Op); 9030 SDValue V1 = Op.getOperand(0); 9031 SDValue V2 = Op.getOperand(1); 9032 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 9033 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 9034 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 9035 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 9036 unsigned InsertAtElement = C->getZExtValue(); 9037 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 9038 if (Subtarget.isLittleEndian()) { 9039 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 9040 } 9041 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 9042 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9043 } 9044 return Op; 9045 } 9046 9047 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 9048 SelectionDAG &DAG) const { 9049 SDLoc dl(Op); 9050 SDNode *N = Op.getNode(); 9051 9052 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 9053 "Unknown extract_vector_elt type"); 9054 9055 SDValue Value = N->getOperand(0); 9056 9057 // The first part of this is like the store lowering except that we don't 9058 // need to track the chain. 9059 9060 // The values are now known to be -1 (false) or 1 (true). To convert this 9061 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9062 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9063 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9064 9065 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9066 // understand how to form the extending load. 9067 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9068 9069 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9070 9071 // Now convert to an integer and store. 9072 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9073 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9074 Value); 9075 9076 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9077 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9078 MachinePointerInfo PtrInfo = 9079 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9080 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9081 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9082 9083 SDValue StoreChain = DAG.getEntryNode(); 9084 SDValue Ops[] = {StoreChain, 9085 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9086 Value, FIdx}; 9087 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9088 9089 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9090 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9091 9092 // Extract the value requested. 9093 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9094 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9095 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9096 9097 SDValue IntVal = 9098 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 9099 9100 if (!Subtarget.useCRBits()) 9101 return IntVal; 9102 9103 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 9104 } 9105 9106 /// Lowering for QPX v4i1 loads 9107 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 9108 SelectionDAG &DAG) const { 9109 SDLoc dl(Op); 9110 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 9111 SDValue LoadChain = LN->getChain(); 9112 SDValue BasePtr = LN->getBasePtr(); 9113 9114 if (Op.getValueType() == MVT::v4f64 || 9115 Op.getValueType() == MVT::v4f32) { 9116 EVT MemVT = LN->getMemoryVT(); 9117 unsigned Alignment = LN->getAlignment(); 9118 9119 // If this load is properly aligned, then it is legal. 9120 if (Alignment >= MemVT.getStoreSize()) 9121 return Op; 9122 9123 EVT ScalarVT = Op.getValueType().getScalarType(), 9124 ScalarMemVT = MemVT.getScalarType(); 9125 unsigned Stride = ScalarMemVT.getStoreSize(); 9126 9127 SDValue Vals[4], LoadChains[4]; 9128 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9129 SDValue Load; 9130 if (ScalarVT != ScalarMemVT) 9131 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 9132 BasePtr, 9133 LN->getPointerInfo().getWithOffset(Idx * Stride), 9134 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9135 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9136 else 9137 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 9138 LN->getPointerInfo().getWithOffset(Idx * Stride), 9139 MinAlign(Alignment, Idx * Stride), 9140 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9141 9142 if (Idx == 0 && LN->isIndexed()) { 9143 assert(LN->getAddressingMode() == ISD::PRE_INC && 9144 "Unknown addressing mode on vector load"); 9145 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 9146 LN->getAddressingMode()); 9147 } 9148 9149 Vals[Idx] = Load; 9150 LoadChains[Idx] = Load.getValue(1); 9151 9152 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9153 DAG.getConstant(Stride, dl, 9154 BasePtr.getValueType())); 9155 } 9156 9157 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9158 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 9159 9160 if (LN->isIndexed()) { 9161 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 9162 return DAG.getMergeValues(RetOps, dl); 9163 } 9164 9165 SDValue RetOps[] = { Value, TF }; 9166 return DAG.getMergeValues(RetOps, dl); 9167 } 9168 9169 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 9170 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 9171 9172 // To lower v4i1 from a byte array, we load the byte elements of the 9173 // vector and then reuse the BUILD_VECTOR logic. 9174 9175 SDValue VectElmts[4], VectElmtChains[4]; 9176 for (unsigned i = 0; i < 4; ++i) { 9177 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9178 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9179 9180 VectElmts[i] = DAG.getExtLoad( 9181 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 9182 LN->getPointerInfo().getWithOffset(i), MVT::i8, 9183 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9184 VectElmtChains[i] = VectElmts[i].getValue(1); 9185 } 9186 9187 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 9188 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 9189 9190 SDValue RVals[] = { Value, LoadChain }; 9191 return DAG.getMergeValues(RVals, dl); 9192 } 9193 9194 /// Lowering for QPX v4i1 stores 9195 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 9196 SelectionDAG &DAG) const { 9197 SDLoc dl(Op); 9198 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 9199 SDValue StoreChain = SN->getChain(); 9200 SDValue BasePtr = SN->getBasePtr(); 9201 SDValue Value = SN->getValue(); 9202 9203 if (Value.getValueType() == MVT::v4f64 || 9204 Value.getValueType() == MVT::v4f32) { 9205 EVT MemVT = SN->getMemoryVT(); 9206 unsigned Alignment = SN->getAlignment(); 9207 9208 // If this store is properly aligned, then it is legal. 9209 if (Alignment >= MemVT.getStoreSize()) 9210 return Op; 9211 9212 EVT ScalarVT = Value.getValueType().getScalarType(), 9213 ScalarMemVT = MemVT.getScalarType(); 9214 unsigned Stride = ScalarMemVT.getStoreSize(); 9215 9216 SDValue Stores[4]; 9217 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9218 SDValue Ex = DAG.getNode( 9219 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 9220 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 9221 SDValue Store; 9222 if (ScalarVT != ScalarMemVT) 9223 Store = 9224 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 9225 SN->getPointerInfo().getWithOffset(Idx * Stride), 9226 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9227 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9228 else 9229 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 9230 SN->getPointerInfo().getWithOffset(Idx * Stride), 9231 MinAlign(Alignment, Idx * Stride), 9232 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9233 9234 if (Idx == 0 && SN->isIndexed()) { 9235 assert(SN->getAddressingMode() == ISD::PRE_INC && 9236 "Unknown addressing mode on vector store"); 9237 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 9238 SN->getAddressingMode()); 9239 } 9240 9241 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9242 DAG.getConstant(Stride, dl, 9243 BasePtr.getValueType())); 9244 Stores[Idx] = Store; 9245 } 9246 9247 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9248 9249 if (SN->isIndexed()) { 9250 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 9251 return DAG.getMergeValues(RetOps, dl); 9252 } 9253 9254 return TF; 9255 } 9256 9257 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 9258 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 9259 9260 // The values are now known to be -1 (false) or 1 (true). To convert this 9261 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9262 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9263 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9264 9265 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9266 // understand how to form the extending load. 9267 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9268 9269 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9270 9271 // Now convert to an integer and store. 9272 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9273 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9274 Value); 9275 9276 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9277 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9278 MachinePointerInfo PtrInfo = 9279 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9280 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9281 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9282 9283 SDValue Ops[] = {StoreChain, 9284 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9285 Value, FIdx}; 9286 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9287 9288 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9289 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9290 9291 // Move data into the byte array. 9292 SDValue Loads[4], LoadChains[4]; 9293 for (unsigned i = 0; i < 4; ++i) { 9294 unsigned Offset = 4*i; 9295 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9296 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9297 9298 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 9299 PtrInfo.getWithOffset(Offset)); 9300 LoadChains[i] = Loads[i].getValue(1); 9301 } 9302 9303 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9304 9305 SDValue Stores[4]; 9306 for (unsigned i = 0; i < 4; ++i) { 9307 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9308 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9309 9310 Stores[i] = DAG.getTruncStore( 9311 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 9312 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 9313 SN->getAAInfo()); 9314 } 9315 9316 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9317 9318 return StoreChain; 9319 } 9320 9321 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 9322 SDLoc dl(Op); 9323 if (Op.getValueType() == MVT::v4i32) { 9324 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9325 9326 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 9327 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 9328 9329 SDValue RHSSwap = // = vrlw RHS, 16 9330 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 9331 9332 // Shrinkify inputs to v8i16. 9333 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 9334 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 9335 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 9336 9337 // Low parts multiplied together, generating 32-bit results (we ignore the 9338 // top parts). 9339 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 9340 LHS, RHS, DAG, dl, MVT::v4i32); 9341 9342 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 9343 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 9344 // Shift the high parts up 16 bits. 9345 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 9346 Neg16, DAG, dl); 9347 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 9348 } else if (Op.getValueType() == MVT::v8i16) { 9349 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9350 9351 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 9352 9353 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 9354 LHS, RHS, Zero, DAG, dl); 9355 } else if (Op.getValueType() == MVT::v16i8) { 9356 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9357 bool isLittleEndian = Subtarget.isLittleEndian(); 9358 9359 // Multiply the even 8-bit parts, producing 16-bit sums. 9360 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 9361 LHS, RHS, DAG, dl, MVT::v8i16); 9362 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 9363 9364 // Multiply the odd 8-bit parts, producing 16-bit sums. 9365 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 9366 LHS, RHS, DAG, dl, MVT::v8i16); 9367 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 9368 9369 // Merge the results together. Because vmuleub and vmuloub are 9370 // instructions with a big-endian bias, we must reverse the 9371 // element numbering and reverse the meaning of "odd" and "even" 9372 // when generating little endian code. 9373 int Ops[16]; 9374 for (unsigned i = 0; i != 8; ++i) { 9375 if (isLittleEndian) { 9376 Ops[i*2 ] = 2*i; 9377 Ops[i*2+1] = 2*i+16; 9378 } else { 9379 Ops[i*2 ] = 2*i+1; 9380 Ops[i*2+1] = 2*i+1+16; 9381 } 9382 } 9383 if (isLittleEndian) 9384 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 9385 else 9386 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 9387 } else { 9388 llvm_unreachable("Unknown mul to lower!"); 9389 } 9390 } 9391 9392 /// LowerOperation - Provide custom lowering hooks for some operations. 9393 /// 9394 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9395 switch (Op.getOpcode()) { 9396 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 9397 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 9398 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 9399 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 9400 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 9401 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 9402 case ISD::SETCC: return LowerSETCC(Op, DAG); 9403 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 9404 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 9405 9406 // Variable argument lowering. 9407 case ISD::VASTART: return LowerVASTART(Op, DAG); 9408 case ISD::VAARG: return LowerVAARG(Op, DAG); 9409 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 9410 9411 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 9412 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 9413 case ISD::GET_DYNAMIC_AREA_OFFSET: 9414 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 9415 9416 // Exception handling lowering. 9417 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 9418 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 9419 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 9420 9421 case ISD::LOAD: return LowerLOAD(Op, DAG); 9422 case ISD::STORE: return LowerSTORE(Op, DAG); 9423 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 9424 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 9425 case ISD::FP_TO_UINT: 9426 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 9427 case ISD::UINT_TO_FP: 9428 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 9429 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9430 9431 // Lower 64-bit shifts. 9432 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 9433 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 9434 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 9435 9436 // Vector-related lowering. 9437 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 9438 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 9439 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 9440 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 9441 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 9442 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 9443 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9444 case ISD::MUL: return LowerMUL(Op, DAG); 9445 9446 // For counter-based loop handling. 9447 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 9448 9449 // Frame & Return address. 9450 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9451 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9452 9453 case ISD::INTRINSIC_VOID: 9454 return LowerINTRINSIC_VOID(Op, DAG); 9455 case ISD::SREM: 9456 case ISD::UREM: 9457 return LowerREM(Op, DAG); 9458 case ISD::BSWAP: 9459 return LowerBSWAP(Op, DAG); 9460 case ISD::ATOMIC_CMP_SWAP: 9461 return LowerATOMIC_CMP_SWAP(Op, DAG); 9462 } 9463 } 9464 9465 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 9466 SmallVectorImpl<SDValue>&Results, 9467 SelectionDAG &DAG) const { 9468 SDLoc dl(N); 9469 switch (N->getOpcode()) { 9470 default: 9471 llvm_unreachable("Do not know how to custom type legalize this operation!"); 9472 case ISD::READCYCLECOUNTER: { 9473 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 9474 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 9475 9476 Results.push_back(RTB); 9477 Results.push_back(RTB.getValue(1)); 9478 Results.push_back(RTB.getValue(2)); 9479 break; 9480 } 9481 case ISD::INTRINSIC_W_CHAIN: { 9482 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 9483 Intrinsic::ppc_is_decremented_ctr_nonzero) 9484 break; 9485 9486 assert(N->getValueType(0) == MVT::i1 && 9487 "Unexpected result type for CTR decrement intrinsic"); 9488 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 9489 N->getValueType(0)); 9490 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 9491 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 9492 N->getOperand(1)); 9493 9494 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 9495 Results.push_back(NewInt.getValue(1)); 9496 break; 9497 } 9498 case ISD::VAARG: { 9499 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 9500 return; 9501 9502 EVT VT = N->getValueType(0); 9503 9504 if (VT == MVT::i64) { 9505 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 9506 9507 Results.push_back(NewNode); 9508 Results.push_back(NewNode.getValue(1)); 9509 } 9510 return; 9511 } 9512 case ISD::FP_TO_SINT: 9513 case ISD::FP_TO_UINT: 9514 // LowerFP_TO_INT() can only handle f32 and f64. 9515 if (N->getOperand(0).getValueType() == MVT::ppcf128) 9516 return; 9517 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 9518 return; 9519 } 9520 } 9521 9522 //===----------------------------------------------------------------------===// 9523 // Other Lowering Code 9524 //===----------------------------------------------------------------------===// 9525 9526 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 9527 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 9528 Function *Func = Intrinsic::getDeclaration(M, Id); 9529 return Builder.CreateCall(Func, {}); 9530 } 9531 9532 // The mappings for emitLeading/TrailingFence is taken from 9533 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 9534 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 9535 Instruction *Inst, 9536 AtomicOrdering Ord) const { 9537 if (Ord == AtomicOrdering::SequentiallyConsistent) 9538 return callIntrinsic(Builder, Intrinsic::ppc_sync); 9539 if (isReleaseOrStronger(Ord)) 9540 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9541 return nullptr; 9542 } 9543 9544 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 9545 Instruction *Inst, 9546 AtomicOrdering Ord) const { 9547 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 9548 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 9549 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 9550 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 9551 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 9552 return Builder.CreateCall( 9553 Intrinsic::getDeclaration( 9554 Builder.GetInsertBlock()->getParent()->getParent(), 9555 Intrinsic::ppc_cfence, {Inst->getType()}), 9556 {Inst}); 9557 // FIXME: Can use isync for rmw operation. 9558 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9559 } 9560 return nullptr; 9561 } 9562 9563 MachineBasicBlock * 9564 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 9565 unsigned AtomicSize, 9566 unsigned BinOpcode, 9567 unsigned CmpOpcode, 9568 unsigned CmpPred) const { 9569 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9570 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9571 9572 auto LoadMnemonic = PPC::LDARX; 9573 auto StoreMnemonic = PPC::STDCX; 9574 switch (AtomicSize) { 9575 default: 9576 llvm_unreachable("Unexpected size of atomic entity"); 9577 case 1: 9578 LoadMnemonic = PPC::LBARX; 9579 StoreMnemonic = PPC::STBCX; 9580 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9581 break; 9582 case 2: 9583 LoadMnemonic = PPC::LHARX; 9584 StoreMnemonic = PPC::STHCX; 9585 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9586 break; 9587 case 4: 9588 LoadMnemonic = PPC::LWARX; 9589 StoreMnemonic = PPC::STWCX; 9590 break; 9591 case 8: 9592 LoadMnemonic = PPC::LDARX; 9593 StoreMnemonic = PPC::STDCX; 9594 break; 9595 } 9596 9597 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9598 MachineFunction *F = BB->getParent(); 9599 MachineFunction::iterator It = ++BB->getIterator(); 9600 9601 unsigned dest = MI.getOperand(0).getReg(); 9602 unsigned ptrA = MI.getOperand(1).getReg(); 9603 unsigned ptrB = MI.getOperand(2).getReg(); 9604 unsigned incr = MI.getOperand(3).getReg(); 9605 DebugLoc dl = MI.getDebugLoc(); 9606 9607 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9608 MachineBasicBlock *loop2MBB = 9609 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9610 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9611 F->insert(It, loopMBB); 9612 if (CmpOpcode) 9613 F->insert(It, loop2MBB); 9614 F->insert(It, exitMBB); 9615 exitMBB->splice(exitMBB->begin(), BB, 9616 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9617 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9618 9619 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9620 unsigned TmpReg = (!BinOpcode) ? incr : 9621 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 9622 : &PPC::GPRCRegClass); 9623 9624 // thisMBB: 9625 // ... 9626 // fallthrough --> loopMBB 9627 BB->addSuccessor(loopMBB); 9628 9629 // loopMBB: 9630 // l[wd]arx dest, ptr 9631 // add r0, dest, incr 9632 // st[wd]cx. r0, ptr 9633 // bne- loopMBB 9634 // fallthrough --> exitMBB 9635 9636 // For max/min... 9637 // loopMBB: 9638 // l[wd]arx dest, ptr 9639 // cmpl?[wd] incr, dest 9640 // bgt exitMBB 9641 // loop2MBB: 9642 // st[wd]cx. dest, ptr 9643 // bne- loopMBB 9644 // fallthrough --> exitMBB 9645 9646 BB = loopMBB; 9647 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9648 .addReg(ptrA).addReg(ptrB); 9649 if (BinOpcode) 9650 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 9651 if (CmpOpcode) { 9652 // Signed comparisons of byte or halfword values must be sign-extended. 9653 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 9654 unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9655 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 9656 ExtReg).addReg(dest); 9657 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9658 .addReg(incr).addReg(ExtReg); 9659 } else 9660 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9661 .addReg(incr).addReg(dest); 9662 9663 BuildMI(BB, dl, TII->get(PPC::BCC)) 9664 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9665 BB->addSuccessor(loop2MBB); 9666 BB->addSuccessor(exitMBB); 9667 BB = loop2MBB; 9668 } 9669 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9670 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 9671 BuildMI(BB, dl, TII->get(PPC::BCC)) 9672 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9673 BB->addSuccessor(loopMBB); 9674 BB->addSuccessor(exitMBB); 9675 9676 // exitMBB: 9677 // ... 9678 BB = exitMBB; 9679 return BB; 9680 } 9681 9682 MachineBasicBlock * 9683 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 9684 MachineBasicBlock *BB, 9685 bool is8bit, // operation 9686 unsigned BinOpcode, 9687 unsigned CmpOpcode, 9688 unsigned CmpPred) const { 9689 // If we support part-word atomic mnemonics, just use them 9690 if (Subtarget.hasPartwordAtomics()) 9691 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, 9692 CmpOpcode, CmpPred); 9693 9694 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9695 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9696 // In 64 bit mode we have to use 64 bits for addresses, even though the 9697 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 9698 // registers without caring whether they're 32 or 64, but here we're 9699 // doing actual arithmetic on the addresses. 9700 bool is64bit = Subtarget.isPPC64(); 9701 bool isLittleEndian = Subtarget.isLittleEndian(); 9702 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9703 9704 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9705 MachineFunction *F = BB->getParent(); 9706 MachineFunction::iterator It = ++BB->getIterator(); 9707 9708 unsigned dest = MI.getOperand(0).getReg(); 9709 unsigned ptrA = MI.getOperand(1).getReg(); 9710 unsigned ptrB = MI.getOperand(2).getReg(); 9711 unsigned incr = MI.getOperand(3).getReg(); 9712 DebugLoc dl = MI.getDebugLoc(); 9713 9714 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9715 MachineBasicBlock *loop2MBB = 9716 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9717 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9718 F->insert(It, loopMBB); 9719 if (CmpOpcode) 9720 F->insert(It, loop2MBB); 9721 F->insert(It, exitMBB); 9722 exitMBB->splice(exitMBB->begin(), BB, 9723 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9724 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9725 9726 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9727 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9728 : &PPC::GPRCRegClass; 9729 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9730 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9731 unsigned ShiftReg = 9732 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9733 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 9734 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9735 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9736 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9737 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9738 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 9739 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9740 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9741 unsigned Ptr1Reg; 9742 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 9743 9744 // thisMBB: 9745 // ... 9746 // fallthrough --> loopMBB 9747 BB->addSuccessor(loopMBB); 9748 9749 // The 4-byte load must be aligned, while a char or short may be 9750 // anywhere in the word. Hence all this nasty bookkeeping code. 9751 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9752 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9753 // xori shift, shift1, 24 [16] 9754 // rlwinm ptr, ptr1, 0, 0, 29 9755 // slw incr2, incr, shift 9756 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9757 // slw mask, mask2, shift 9758 // loopMBB: 9759 // lwarx tmpDest, ptr 9760 // add tmp, tmpDest, incr2 9761 // andc tmp2, tmpDest, mask 9762 // and tmp3, tmp, mask 9763 // or tmp4, tmp3, tmp2 9764 // stwcx. tmp4, ptr 9765 // bne- loopMBB 9766 // fallthrough --> exitMBB 9767 // srw dest, tmpDest, shift 9768 if (ptrA != ZeroReg) { 9769 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9770 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9771 .addReg(ptrA).addReg(ptrB); 9772 } else { 9773 Ptr1Reg = ptrB; 9774 } 9775 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9776 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9777 if (!isLittleEndian) 9778 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9779 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9780 if (is64bit) 9781 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9782 .addReg(Ptr1Reg).addImm(0).addImm(61); 9783 else 9784 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9785 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9786 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 9787 .addReg(incr).addReg(ShiftReg); 9788 if (is8bit) 9789 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9790 else { 9791 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9792 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 9793 } 9794 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9795 .addReg(Mask2Reg).addReg(ShiftReg); 9796 9797 BB = loopMBB; 9798 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9799 .addReg(ZeroReg).addReg(PtrReg); 9800 if (BinOpcode) 9801 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 9802 .addReg(Incr2Reg).addReg(TmpDestReg); 9803 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 9804 .addReg(TmpDestReg).addReg(MaskReg); 9805 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 9806 .addReg(TmpReg).addReg(MaskReg); 9807 if (CmpOpcode) { 9808 // For unsigned comparisons, we can directly compare the shifted values. 9809 // For signed comparisons we shift and sign extend. 9810 unsigned SReg = RegInfo.createVirtualRegister(RC); 9811 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg) 9812 .addReg(TmpDestReg).addReg(MaskReg); 9813 unsigned ValueReg = SReg; 9814 unsigned CmpReg = Incr2Reg; 9815 if (CmpOpcode == PPC::CMPW) { 9816 ValueReg = RegInfo.createVirtualRegister(RC); 9817 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 9818 .addReg(SReg).addReg(ShiftReg); 9819 unsigned ValueSReg = RegInfo.createVirtualRegister(RC); 9820 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 9821 .addReg(ValueReg); 9822 ValueReg = ValueSReg; 9823 CmpReg = incr; 9824 } 9825 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9826 .addReg(CmpReg).addReg(ValueReg); 9827 BuildMI(BB, dl, TII->get(PPC::BCC)) 9828 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9829 BB->addSuccessor(loop2MBB); 9830 BB->addSuccessor(exitMBB); 9831 BB = loop2MBB; 9832 } 9833 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 9834 .addReg(Tmp3Reg).addReg(Tmp2Reg); 9835 BuildMI(BB, dl, TII->get(PPC::STWCX)) 9836 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 9837 BuildMI(BB, dl, TII->get(PPC::BCC)) 9838 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9839 BB->addSuccessor(loopMBB); 9840 BB->addSuccessor(exitMBB); 9841 9842 // exitMBB: 9843 // ... 9844 BB = exitMBB; 9845 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 9846 .addReg(ShiftReg); 9847 return BB; 9848 } 9849 9850 llvm::MachineBasicBlock * 9851 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 9852 MachineBasicBlock *MBB) const { 9853 DebugLoc DL = MI.getDebugLoc(); 9854 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9855 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 9856 9857 MachineFunction *MF = MBB->getParent(); 9858 MachineRegisterInfo &MRI = MF->getRegInfo(); 9859 9860 const BasicBlock *BB = MBB->getBasicBlock(); 9861 MachineFunction::iterator I = ++MBB->getIterator(); 9862 9863 // Memory Reference 9864 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9865 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9866 9867 unsigned DstReg = MI.getOperand(0).getReg(); 9868 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 9869 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 9870 unsigned mainDstReg = MRI.createVirtualRegister(RC); 9871 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 9872 9873 MVT PVT = getPointerTy(MF->getDataLayout()); 9874 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9875 "Invalid Pointer Size!"); 9876 // For v = setjmp(buf), we generate 9877 // 9878 // thisMBB: 9879 // SjLjSetup mainMBB 9880 // bl mainMBB 9881 // v_restore = 1 9882 // b sinkMBB 9883 // 9884 // mainMBB: 9885 // buf[LabelOffset] = LR 9886 // v_main = 0 9887 // 9888 // sinkMBB: 9889 // v = phi(main, restore) 9890 // 9891 9892 MachineBasicBlock *thisMBB = MBB; 9893 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 9894 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 9895 MF->insert(I, mainMBB); 9896 MF->insert(I, sinkMBB); 9897 9898 MachineInstrBuilder MIB; 9899 9900 // Transfer the remainder of BB and its successor edges to sinkMBB. 9901 sinkMBB->splice(sinkMBB->begin(), MBB, 9902 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 9903 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 9904 9905 // Note that the structure of the jmp_buf used here is not compatible 9906 // with that used by libc, and is not designed to be. Specifically, it 9907 // stores only those 'reserved' registers that LLVM does not otherwise 9908 // understand how to spill. Also, by convention, by the time this 9909 // intrinsic is called, Clang has already stored the frame address in the 9910 // first slot of the buffer and stack address in the third. Following the 9911 // X86 target code, we'll store the jump address in the second slot. We also 9912 // need to save the TOC pointer (R2) to handle jumps between shared 9913 // libraries, and that will be stored in the fourth slot. The thread 9914 // identifier (R13) is not affected. 9915 9916 // thisMBB: 9917 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9918 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9919 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9920 9921 // Prepare IP either in reg. 9922 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 9923 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 9924 unsigned BufReg = MI.getOperand(1).getReg(); 9925 9926 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 9927 setUsesTOCBasePtr(*MBB->getParent()); 9928 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 9929 .addReg(PPC::X2) 9930 .addImm(TOCOffset) 9931 .addReg(BufReg); 9932 MIB.setMemRefs(MMOBegin, MMOEnd); 9933 } 9934 9935 // Naked functions never have a base pointer, and so we use r1. For all 9936 // other functions, this decision must be delayed until during PEI. 9937 unsigned BaseReg; 9938 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 9939 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 9940 else 9941 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 9942 9943 MIB = BuildMI(*thisMBB, MI, DL, 9944 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 9945 .addReg(BaseReg) 9946 .addImm(BPOffset) 9947 .addReg(BufReg); 9948 MIB.setMemRefs(MMOBegin, MMOEnd); 9949 9950 // Setup 9951 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 9952 MIB.addRegMask(TRI->getNoPreservedMask()); 9953 9954 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 9955 9956 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 9957 .addMBB(mainMBB); 9958 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 9959 9960 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 9961 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 9962 9963 // mainMBB: 9964 // mainDstReg = 0 9965 MIB = 9966 BuildMI(mainMBB, DL, 9967 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 9968 9969 // Store IP 9970 if (Subtarget.isPPC64()) { 9971 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 9972 .addReg(LabelReg) 9973 .addImm(LabelOffset) 9974 .addReg(BufReg); 9975 } else { 9976 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 9977 .addReg(LabelReg) 9978 .addImm(LabelOffset) 9979 .addReg(BufReg); 9980 } 9981 9982 MIB.setMemRefs(MMOBegin, MMOEnd); 9983 9984 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 9985 mainMBB->addSuccessor(sinkMBB); 9986 9987 // sinkMBB: 9988 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 9989 TII->get(PPC::PHI), DstReg) 9990 .addReg(mainDstReg).addMBB(mainMBB) 9991 .addReg(restoreDstReg).addMBB(thisMBB); 9992 9993 MI.eraseFromParent(); 9994 return sinkMBB; 9995 } 9996 9997 MachineBasicBlock * 9998 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 9999 MachineBasicBlock *MBB) const { 10000 DebugLoc DL = MI.getDebugLoc(); 10001 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10002 10003 MachineFunction *MF = MBB->getParent(); 10004 MachineRegisterInfo &MRI = MF->getRegInfo(); 10005 10006 // Memory Reference 10007 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 10008 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 10009 10010 MVT PVT = getPointerTy(MF->getDataLayout()); 10011 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10012 "Invalid Pointer Size!"); 10013 10014 const TargetRegisterClass *RC = 10015 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10016 unsigned Tmp = MRI.createVirtualRegister(RC); 10017 // Since FP is only updated here but NOT referenced, it's treated as GPR. 10018 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 10019 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 10020 unsigned BP = 10021 (PVT == MVT::i64) 10022 ? PPC::X30 10023 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 10024 : PPC::R30); 10025 10026 MachineInstrBuilder MIB; 10027 10028 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 10029 const int64_t SPOffset = 2 * PVT.getStoreSize(); 10030 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 10031 const int64_t BPOffset = 4 * PVT.getStoreSize(); 10032 10033 unsigned BufReg = MI.getOperand(0).getReg(); 10034 10035 // Reload FP (the jumped-to function may not have had a 10036 // frame pointer, and if so, then its r31 will be restored 10037 // as necessary). 10038 if (PVT == MVT::i64) { 10039 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 10040 .addImm(0) 10041 .addReg(BufReg); 10042 } else { 10043 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 10044 .addImm(0) 10045 .addReg(BufReg); 10046 } 10047 MIB.setMemRefs(MMOBegin, MMOEnd); 10048 10049 // Reload IP 10050 if (PVT == MVT::i64) { 10051 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 10052 .addImm(LabelOffset) 10053 .addReg(BufReg); 10054 } else { 10055 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 10056 .addImm(LabelOffset) 10057 .addReg(BufReg); 10058 } 10059 MIB.setMemRefs(MMOBegin, MMOEnd); 10060 10061 // Reload SP 10062 if (PVT == MVT::i64) { 10063 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 10064 .addImm(SPOffset) 10065 .addReg(BufReg); 10066 } else { 10067 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 10068 .addImm(SPOffset) 10069 .addReg(BufReg); 10070 } 10071 MIB.setMemRefs(MMOBegin, MMOEnd); 10072 10073 // Reload BP 10074 if (PVT == MVT::i64) { 10075 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 10076 .addImm(BPOffset) 10077 .addReg(BufReg); 10078 } else { 10079 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 10080 .addImm(BPOffset) 10081 .addReg(BufReg); 10082 } 10083 MIB.setMemRefs(MMOBegin, MMOEnd); 10084 10085 // Reload TOC 10086 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 10087 setUsesTOCBasePtr(*MBB->getParent()); 10088 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 10089 .addImm(TOCOffset) 10090 .addReg(BufReg); 10091 10092 MIB.setMemRefs(MMOBegin, MMOEnd); 10093 } 10094 10095 // Jump 10096 BuildMI(*MBB, MI, DL, 10097 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 10098 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 10099 10100 MI.eraseFromParent(); 10101 return MBB; 10102 } 10103 10104 MachineBasicBlock * 10105 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 10106 MachineBasicBlock *BB) const { 10107 if (MI.getOpcode() == TargetOpcode::STACKMAP || 10108 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10109 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 10110 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 10111 // Call lowering should have added an r2 operand to indicate a dependence 10112 // on the TOC base pointer value. It can't however, because there is no 10113 // way to mark the dependence as implicit there, and so the stackmap code 10114 // will confuse it with a regular operand. Instead, add the dependence 10115 // here. 10116 setUsesTOCBasePtr(*BB->getParent()); 10117 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 10118 } 10119 10120 return emitPatchPoint(MI, BB); 10121 } 10122 10123 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 10124 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 10125 return emitEHSjLjSetJmp(MI, BB); 10126 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 10127 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 10128 return emitEHSjLjLongJmp(MI, BB); 10129 } 10130 10131 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10132 10133 // To "insert" these instructions we actually have to insert their 10134 // control-flow patterns. 10135 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10136 MachineFunction::iterator It = ++BB->getIterator(); 10137 10138 MachineFunction *F = BB->getParent(); 10139 10140 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10141 MI.getOpcode() == PPC::SELECT_CC_I8 || 10142 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) { 10143 SmallVector<MachineOperand, 2> Cond; 10144 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10145 MI.getOpcode() == PPC::SELECT_CC_I8) 10146 Cond.push_back(MI.getOperand(4)); 10147 else 10148 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 10149 Cond.push_back(MI.getOperand(1)); 10150 10151 DebugLoc dl = MI.getDebugLoc(); 10152 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 10153 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 10154 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10155 MI.getOpcode() == PPC::SELECT_CC_I8 || 10156 MI.getOpcode() == PPC::SELECT_CC_F4 || 10157 MI.getOpcode() == PPC::SELECT_CC_F8 || 10158 MI.getOpcode() == PPC::SELECT_CC_QFRC || 10159 MI.getOpcode() == PPC::SELECT_CC_QSRC || 10160 MI.getOpcode() == PPC::SELECT_CC_QBRC || 10161 MI.getOpcode() == PPC::SELECT_CC_VRRC || 10162 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 10163 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 10164 MI.getOpcode() == PPC::SELECT_CC_VSRC || 10165 MI.getOpcode() == PPC::SELECT_I4 || 10166 MI.getOpcode() == PPC::SELECT_I8 || 10167 MI.getOpcode() == PPC::SELECT_F4 || 10168 MI.getOpcode() == PPC::SELECT_F8 || 10169 MI.getOpcode() == PPC::SELECT_QFRC || 10170 MI.getOpcode() == PPC::SELECT_QSRC || 10171 MI.getOpcode() == PPC::SELECT_QBRC || 10172 MI.getOpcode() == PPC::SELECT_VRRC || 10173 MI.getOpcode() == PPC::SELECT_VSFRC || 10174 MI.getOpcode() == PPC::SELECT_VSSRC || 10175 MI.getOpcode() == PPC::SELECT_VSRC) { 10176 // The incoming instruction knows the destination vreg to set, the 10177 // condition code register to branch on, the true/false values to 10178 // select between, and a branch opcode to use. 10179 10180 // thisMBB: 10181 // ... 10182 // TrueVal = ... 10183 // cmpTY ccX, r1, r2 10184 // bCC copy1MBB 10185 // fallthrough --> copy0MBB 10186 MachineBasicBlock *thisMBB = BB; 10187 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 10188 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10189 DebugLoc dl = MI.getDebugLoc(); 10190 F->insert(It, copy0MBB); 10191 F->insert(It, sinkMBB); 10192 10193 // Transfer the remainder of BB and its successor edges to sinkMBB. 10194 sinkMBB->splice(sinkMBB->begin(), BB, 10195 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10196 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10197 10198 // Next, add the true and fallthrough blocks as its successors. 10199 BB->addSuccessor(copy0MBB); 10200 BB->addSuccessor(sinkMBB); 10201 10202 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 10203 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 10204 MI.getOpcode() == PPC::SELECT_QFRC || 10205 MI.getOpcode() == PPC::SELECT_QSRC || 10206 MI.getOpcode() == PPC::SELECT_QBRC || 10207 MI.getOpcode() == PPC::SELECT_VRRC || 10208 MI.getOpcode() == PPC::SELECT_VSFRC || 10209 MI.getOpcode() == PPC::SELECT_VSSRC || 10210 MI.getOpcode() == PPC::SELECT_VSRC) { 10211 BuildMI(BB, dl, TII->get(PPC::BC)) 10212 .addReg(MI.getOperand(1).getReg()) 10213 .addMBB(sinkMBB); 10214 } else { 10215 unsigned SelectPred = MI.getOperand(4).getImm(); 10216 BuildMI(BB, dl, TII->get(PPC::BCC)) 10217 .addImm(SelectPred) 10218 .addReg(MI.getOperand(1).getReg()) 10219 .addMBB(sinkMBB); 10220 } 10221 10222 // copy0MBB: 10223 // %FalseValue = ... 10224 // # fallthrough to sinkMBB 10225 BB = copy0MBB; 10226 10227 // Update machine-CFG edges 10228 BB->addSuccessor(sinkMBB); 10229 10230 // sinkMBB: 10231 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 10232 // ... 10233 BB = sinkMBB; 10234 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 10235 .addReg(MI.getOperand(3).getReg()) 10236 .addMBB(copy0MBB) 10237 .addReg(MI.getOperand(2).getReg()) 10238 .addMBB(thisMBB); 10239 } else if (MI.getOpcode() == PPC::ReadTB) { 10240 // To read the 64-bit time-base register on a 32-bit target, we read the 10241 // two halves. Should the counter have wrapped while it was being read, we 10242 // need to try again. 10243 // ... 10244 // readLoop: 10245 // mfspr Rx,TBU # load from TBU 10246 // mfspr Ry,TB # load from TB 10247 // mfspr Rz,TBU # load from TBU 10248 // cmpw crX,Rx,Rz # check if 'old'='new' 10249 // bne readLoop # branch if they're not equal 10250 // ... 10251 10252 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 10253 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10254 DebugLoc dl = MI.getDebugLoc(); 10255 F->insert(It, readMBB); 10256 F->insert(It, sinkMBB); 10257 10258 // Transfer the remainder of BB and its successor edges to sinkMBB. 10259 sinkMBB->splice(sinkMBB->begin(), BB, 10260 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10261 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10262 10263 BB->addSuccessor(readMBB); 10264 BB = readMBB; 10265 10266 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10267 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10268 unsigned LoReg = MI.getOperand(0).getReg(); 10269 unsigned HiReg = MI.getOperand(1).getReg(); 10270 10271 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 10272 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 10273 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 10274 10275 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10276 10277 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 10278 .addReg(HiReg).addReg(ReadAgainReg); 10279 BuildMI(BB, dl, TII->get(PPC::BCC)) 10280 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 10281 10282 BB->addSuccessor(readMBB); 10283 BB->addSuccessor(sinkMBB); 10284 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 10285 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 10286 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 10287 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 10288 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 10289 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 10290 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 10291 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 10292 10293 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 10294 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 10295 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 10296 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 10297 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 10298 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 10299 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 10300 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 10301 10302 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 10303 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 10304 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 10305 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 10306 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 10307 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 10308 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 10309 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 10310 10311 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 10312 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 10313 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 10314 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 10315 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 10316 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 10317 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 10318 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 10319 10320 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 10321 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 10322 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 10323 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 10324 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 10325 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 10326 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 10327 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 10328 10329 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 10330 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 10331 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 10332 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 10333 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 10334 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 10335 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 10336 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 10337 10338 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 10339 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 10340 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 10341 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 10342 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 10343 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 10344 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 10345 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 10346 10347 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 10348 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 10349 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 10350 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 10351 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 10352 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 10353 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 10354 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 10355 10356 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 10357 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 10358 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 10359 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 10360 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 10361 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 10362 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 10363 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 10364 10365 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 10366 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 10367 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 10368 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 10369 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 10370 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 10371 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 10372 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 10373 10374 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 10375 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 10376 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 10377 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 10378 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 10379 BB = EmitAtomicBinary(MI, BB, 4, 0); 10380 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 10381 BB = EmitAtomicBinary(MI, BB, 8, 0); 10382 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 10383 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 10384 (Subtarget.hasPartwordAtomics() && 10385 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 10386 (Subtarget.hasPartwordAtomics() && 10387 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 10388 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 10389 10390 auto LoadMnemonic = PPC::LDARX; 10391 auto StoreMnemonic = PPC::STDCX; 10392 switch (MI.getOpcode()) { 10393 default: 10394 llvm_unreachable("Compare and swap of unknown size"); 10395 case PPC::ATOMIC_CMP_SWAP_I8: 10396 LoadMnemonic = PPC::LBARX; 10397 StoreMnemonic = PPC::STBCX; 10398 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10399 break; 10400 case PPC::ATOMIC_CMP_SWAP_I16: 10401 LoadMnemonic = PPC::LHARX; 10402 StoreMnemonic = PPC::STHCX; 10403 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10404 break; 10405 case PPC::ATOMIC_CMP_SWAP_I32: 10406 LoadMnemonic = PPC::LWARX; 10407 StoreMnemonic = PPC::STWCX; 10408 break; 10409 case PPC::ATOMIC_CMP_SWAP_I64: 10410 LoadMnemonic = PPC::LDARX; 10411 StoreMnemonic = PPC::STDCX; 10412 break; 10413 } 10414 unsigned dest = MI.getOperand(0).getReg(); 10415 unsigned ptrA = MI.getOperand(1).getReg(); 10416 unsigned ptrB = MI.getOperand(2).getReg(); 10417 unsigned oldval = MI.getOperand(3).getReg(); 10418 unsigned newval = MI.getOperand(4).getReg(); 10419 DebugLoc dl = MI.getDebugLoc(); 10420 10421 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10422 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10423 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10424 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10425 F->insert(It, loop1MBB); 10426 F->insert(It, loop2MBB); 10427 F->insert(It, midMBB); 10428 F->insert(It, exitMBB); 10429 exitMBB->splice(exitMBB->begin(), BB, 10430 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10431 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10432 10433 // thisMBB: 10434 // ... 10435 // fallthrough --> loopMBB 10436 BB->addSuccessor(loop1MBB); 10437 10438 // loop1MBB: 10439 // l[bhwd]arx dest, ptr 10440 // cmp[wd] dest, oldval 10441 // bne- midMBB 10442 // loop2MBB: 10443 // st[bhwd]cx. newval, ptr 10444 // bne- loopMBB 10445 // b exitBB 10446 // midMBB: 10447 // st[bhwd]cx. dest, ptr 10448 // exitBB: 10449 BB = loop1MBB; 10450 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10451 .addReg(ptrA).addReg(ptrB); 10452 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 10453 .addReg(oldval).addReg(dest); 10454 BuildMI(BB, dl, TII->get(PPC::BCC)) 10455 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10456 BB->addSuccessor(loop2MBB); 10457 BB->addSuccessor(midMBB); 10458 10459 BB = loop2MBB; 10460 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10461 .addReg(newval).addReg(ptrA).addReg(ptrB); 10462 BuildMI(BB, dl, TII->get(PPC::BCC)) 10463 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10464 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10465 BB->addSuccessor(loop1MBB); 10466 BB->addSuccessor(exitMBB); 10467 10468 BB = midMBB; 10469 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10470 .addReg(dest).addReg(ptrA).addReg(ptrB); 10471 BB->addSuccessor(exitMBB); 10472 10473 // exitMBB: 10474 // ... 10475 BB = exitMBB; 10476 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 10477 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 10478 // We must use 64-bit registers for addresses when targeting 64-bit, 10479 // since we're actually doing arithmetic on them. Other registers 10480 // can be 32-bit. 10481 bool is64bit = Subtarget.isPPC64(); 10482 bool isLittleEndian = Subtarget.isLittleEndian(); 10483 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 10484 10485 unsigned dest = MI.getOperand(0).getReg(); 10486 unsigned ptrA = MI.getOperand(1).getReg(); 10487 unsigned ptrB = MI.getOperand(2).getReg(); 10488 unsigned oldval = MI.getOperand(3).getReg(); 10489 unsigned newval = MI.getOperand(4).getReg(); 10490 DebugLoc dl = MI.getDebugLoc(); 10491 10492 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10493 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10494 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10495 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10496 F->insert(It, loop1MBB); 10497 F->insert(It, loop2MBB); 10498 F->insert(It, midMBB); 10499 F->insert(It, exitMBB); 10500 exitMBB->splice(exitMBB->begin(), BB, 10501 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10502 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10503 10504 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10505 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 10506 : &PPC::GPRCRegClass; 10507 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 10508 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 10509 unsigned ShiftReg = 10510 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 10511 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 10512 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 10513 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 10514 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 10515 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 10516 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 10517 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 10518 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 10519 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 10520 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 10521 unsigned Ptr1Reg; 10522 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 10523 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10524 // thisMBB: 10525 // ... 10526 // fallthrough --> loopMBB 10527 BB->addSuccessor(loop1MBB); 10528 10529 // The 4-byte load must be aligned, while a char or short may be 10530 // anywhere in the word. Hence all this nasty bookkeeping code. 10531 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10532 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10533 // xori shift, shift1, 24 [16] 10534 // rlwinm ptr, ptr1, 0, 0, 29 10535 // slw newval2, newval, shift 10536 // slw oldval2, oldval,shift 10537 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10538 // slw mask, mask2, shift 10539 // and newval3, newval2, mask 10540 // and oldval3, oldval2, mask 10541 // loop1MBB: 10542 // lwarx tmpDest, ptr 10543 // and tmp, tmpDest, mask 10544 // cmpw tmp, oldval3 10545 // bne- midMBB 10546 // loop2MBB: 10547 // andc tmp2, tmpDest, mask 10548 // or tmp4, tmp2, newval3 10549 // stwcx. tmp4, ptr 10550 // bne- loop1MBB 10551 // b exitBB 10552 // midMBB: 10553 // stwcx. tmpDest, ptr 10554 // exitBB: 10555 // srw dest, tmpDest, shift 10556 if (ptrA != ZeroReg) { 10557 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10558 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10559 .addReg(ptrA).addReg(ptrB); 10560 } else { 10561 Ptr1Reg = ptrB; 10562 } 10563 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 10564 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 10565 if (!isLittleEndian) 10566 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 10567 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 10568 if (is64bit) 10569 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10570 .addReg(Ptr1Reg).addImm(0).addImm(61); 10571 else 10572 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10573 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 10574 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 10575 .addReg(newval).addReg(ShiftReg); 10576 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 10577 .addReg(oldval).addReg(ShiftReg); 10578 if (is8bit) 10579 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10580 else { 10581 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10582 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10583 .addReg(Mask3Reg).addImm(65535); 10584 } 10585 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10586 .addReg(Mask2Reg).addReg(ShiftReg); 10587 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 10588 .addReg(NewVal2Reg).addReg(MaskReg); 10589 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 10590 .addReg(OldVal2Reg).addReg(MaskReg); 10591 10592 BB = loop1MBB; 10593 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10594 .addReg(ZeroReg).addReg(PtrReg); 10595 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 10596 .addReg(TmpDestReg).addReg(MaskReg); 10597 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 10598 .addReg(TmpReg).addReg(OldVal3Reg); 10599 BuildMI(BB, dl, TII->get(PPC::BCC)) 10600 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10601 BB->addSuccessor(loop2MBB); 10602 BB->addSuccessor(midMBB); 10603 10604 BB = loop2MBB; 10605 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 10606 .addReg(TmpDestReg).addReg(MaskReg); 10607 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 10608 .addReg(Tmp2Reg).addReg(NewVal3Reg); 10609 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 10610 .addReg(ZeroReg).addReg(PtrReg); 10611 BuildMI(BB, dl, TII->get(PPC::BCC)) 10612 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10613 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10614 BB->addSuccessor(loop1MBB); 10615 BB->addSuccessor(exitMBB); 10616 10617 BB = midMBB; 10618 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 10619 .addReg(ZeroReg).addReg(PtrReg); 10620 BB->addSuccessor(exitMBB); 10621 10622 // exitMBB: 10623 // ... 10624 BB = exitMBB; 10625 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 10626 .addReg(ShiftReg); 10627 } else if (MI.getOpcode() == PPC::FADDrtz) { 10628 // This pseudo performs an FADD with rounding mode temporarily forced 10629 // to round-to-zero. We emit this via custom inserter since the FPSCR 10630 // is not modeled at the SelectionDAG level. 10631 unsigned Dest = MI.getOperand(0).getReg(); 10632 unsigned Src1 = MI.getOperand(1).getReg(); 10633 unsigned Src2 = MI.getOperand(2).getReg(); 10634 DebugLoc dl = MI.getDebugLoc(); 10635 10636 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10637 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 10638 10639 // Save FPSCR value. 10640 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 10641 10642 // Set rounding mode to round-to-zero. 10643 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 10644 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 10645 10646 // Perform addition. 10647 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 10648 10649 // Restore FPSCR value. 10650 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 10651 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10652 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 10653 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10654 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 10655 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10656 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 10657 ? PPC::ANDIo8 10658 : PPC::ANDIo; 10659 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10660 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 10661 10662 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10663 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 10664 &PPC::GPRCRegClass : 10665 &PPC::G8RCRegClass); 10666 10667 DebugLoc dl = MI.getDebugLoc(); 10668 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 10669 .addReg(MI.getOperand(1).getReg()) 10670 .addImm(1); 10671 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 10672 MI.getOperand(0).getReg()) 10673 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 10674 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 10675 DebugLoc Dl = MI.getDebugLoc(); 10676 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10677 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10678 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 10679 return BB; 10680 } else { 10681 llvm_unreachable("Unexpected instr type to insert"); 10682 } 10683 10684 MI.eraseFromParent(); // The pseudo instruction is gone now. 10685 return BB; 10686 } 10687 10688 //===----------------------------------------------------------------------===// 10689 // Target Optimization Hooks 10690 //===----------------------------------------------------------------------===// 10691 10692 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 10693 // For the estimates, convergence is quadratic, so we essentially double the 10694 // number of digits correct after every iteration. For both FRE and FRSQRTE, 10695 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 10696 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 10697 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 10698 if (VT.getScalarType() == MVT::f64) 10699 RefinementSteps++; 10700 return RefinementSteps; 10701 } 10702 10703 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 10704 int Enabled, int &RefinementSteps, 10705 bool &UseOneConstNR, 10706 bool Reciprocal) const { 10707 EVT VT = Operand.getValueType(); 10708 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 10709 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 10710 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10711 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10712 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10713 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10714 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10715 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10716 10717 UseOneConstNR = true; 10718 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 10719 } 10720 return SDValue(); 10721 } 10722 10723 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 10724 int Enabled, 10725 int &RefinementSteps) const { 10726 EVT VT = Operand.getValueType(); 10727 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 10728 (VT == MVT::f64 && Subtarget.hasFRE()) || 10729 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10730 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10731 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10732 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10733 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10734 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10735 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 10736 } 10737 return SDValue(); 10738 } 10739 10740 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 10741 // Note: This functionality is used only when unsafe-fp-math is enabled, and 10742 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 10743 // enabled for division), this functionality is redundant with the default 10744 // combiner logic (once the division -> reciprocal/multiply transformation 10745 // has taken place). As a result, this matters more for older cores than for 10746 // newer ones. 10747 10748 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 10749 // reciprocal if there are two or more FDIVs (for embedded cores with only 10750 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 10751 switch (Subtarget.getDarwinDirective()) { 10752 default: 10753 return 3; 10754 case PPC::DIR_440: 10755 case PPC::DIR_A2: 10756 case PPC::DIR_E500mc: 10757 case PPC::DIR_E5500: 10758 return 2; 10759 } 10760 } 10761 10762 // isConsecutiveLSLoc needs to work even if all adds have not yet been 10763 // collapsed, and so we need to look through chains of them. 10764 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 10765 int64_t& Offset, SelectionDAG &DAG) { 10766 if (DAG.isBaseWithConstantOffset(Loc)) { 10767 Base = Loc.getOperand(0); 10768 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 10769 10770 // The base might itself be a base plus an offset, and if so, accumulate 10771 // that as well. 10772 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 10773 } 10774 } 10775 10776 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 10777 unsigned Bytes, int Dist, 10778 SelectionDAG &DAG) { 10779 if (VT.getSizeInBits() / 8 != Bytes) 10780 return false; 10781 10782 SDValue BaseLoc = Base->getBasePtr(); 10783 if (Loc.getOpcode() == ISD::FrameIndex) { 10784 if (BaseLoc.getOpcode() != ISD::FrameIndex) 10785 return false; 10786 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10787 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 10788 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 10789 int FS = MFI.getObjectSize(FI); 10790 int BFS = MFI.getObjectSize(BFI); 10791 if (FS != BFS || FS != (int)Bytes) return false; 10792 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 10793 } 10794 10795 SDValue Base1 = Loc, Base2 = BaseLoc; 10796 int64_t Offset1 = 0, Offset2 = 0; 10797 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 10798 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 10799 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 10800 return true; 10801 10802 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10803 const GlobalValue *GV1 = nullptr; 10804 const GlobalValue *GV2 = nullptr; 10805 Offset1 = 0; 10806 Offset2 = 0; 10807 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 10808 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 10809 if (isGA1 && isGA2 && GV1 == GV2) 10810 return Offset1 == (Offset2 + Dist*Bytes); 10811 return false; 10812 } 10813 10814 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 10815 // not enforce equality of the chain operands. 10816 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 10817 unsigned Bytes, int Dist, 10818 SelectionDAG &DAG) { 10819 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 10820 EVT VT = LS->getMemoryVT(); 10821 SDValue Loc = LS->getBasePtr(); 10822 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 10823 } 10824 10825 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 10826 EVT VT; 10827 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10828 default: return false; 10829 case Intrinsic::ppc_qpx_qvlfd: 10830 case Intrinsic::ppc_qpx_qvlfda: 10831 VT = MVT::v4f64; 10832 break; 10833 case Intrinsic::ppc_qpx_qvlfs: 10834 case Intrinsic::ppc_qpx_qvlfsa: 10835 VT = MVT::v4f32; 10836 break; 10837 case Intrinsic::ppc_qpx_qvlfcd: 10838 case Intrinsic::ppc_qpx_qvlfcda: 10839 VT = MVT::v2f64; 10840 break; 10841 case Intrinsic::ppc_qpx_qvlfcs: 10842 case Intrinsic::ppc_qpx_qvlfcsa: 10843 VT = MVT::v2f32; 10844 break; 10845 case Intrinsic::ppc_qpx_qvlfiwa: 10846 case Intrinsic::ppc_qpx_qvlfiwz: 10847 case Intrinsic::ppc_altivec_lvx: 10848 case Intrinsic::ppc_altivec_lvxl: 10849 case Intrinsic::ppc_vsx_lxvw4x: 10850 case Intrinsic::ppc_vsx_lxvw4x_be: 10851 VT = MVT::v4i32; 10852 break; 10853 case Intrinsic::ppc_vsx_lxvd2x: 10854 case Intrinsic::ppc_vsx_lxvd2x_be: 10855 VT = MVT::v2f64; 10856 break; 10857 case Intrinsic::ppc_altivec_lvebx: 10858 VT = MVT::i8; 10859 break; 10860 case Intrinsic::ppc_altivec_lvehx: 10861 VT = MVT::i16; 10862 break; 10863 case Intrinsic::ppc_altivec_lvewx: 10864 VT = MVT::i32; 10865 break; 10866 } 10867 10868 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 10869 } 10870 10871 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 10872 EVT VT; 10873 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10874 default: return false; 10875 case Intrinsic::ppc_qpx_qvstfd: 10876 case Intrinsic::ppc_qpx_qvstfda: 10877 VT = MVT::v4f64; 10878 break; 10879 case Intrinsic::ppc_qpx_qvstfs: 10880 case Intrinsic::ppc_qpx_qvstfsa: 10881 VT = MVT::v4f32; 10882 break; 10883 case Intrinsic::ppc_qpx_qvstfcd: 10884 case Intrinsic::ppc_qpx_qvstfcda: 10885 VT = MVT::v2f64; 10886 break; 10887 case Intrinsic::ppc_qpx_qvstfcs: 10888 case Intrinsic::ppc_qpx_qvstfcsa: 10889 VT = MVT::v2f32; 10890 break; 10891 case Intrinsic::ppc_qpx_qvstfiw: 10892 case Intrinsic::ppc_qpx_qvstfiwa: 10893 case Intrinsic::ppc_altivec_stvx: 10894 case Intrinsic::ppc_altivec_stvxl: 10895 case Intrinsic::ppc_vsx_stxvw4x: 10896 VT = MVT::v4i32; 10897 break; 10898 case Intrinsic::ppc_vsx_stxvd2x: 10899 VT = MVT::v2f64; 10900 break; 10901 case Intrinsic::ppc_vsx_stxvw4x_be: 10902 VT = MVT::v4i32; 10903 break; 10904 case Intrinsic::ppc_vsx_stxvd2x_be: 10905 VT = MVT::v2f64; 10906 break; 10907 case Intrinsic::ppc_altivec_stvebx: 10908 VT = MVT::i8; 10909 break; 10910 case Intrinsic::ppc_altivec_stvehx: 10911 VT = MVT::i16; 10912 break; 10913 case Intrinsic::ppc_altivec_stvewx: 10914 VT = MVT::i32; 10915 break; 10916 } 10917 10918 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 10919 } 10920 10921 return false; 10922 } 10923 10924 // Return true is there is a nearyby consecutive load to the one provided 10925 // (regardless of alignment). We search up and down the chain, looking though 10926 // token factors and other loads (but nothing else). As a result, a true result 10927 // indicates that it is safe to create a new consecutive load adjacent to the 10928 // load provided. 10929 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 10930 SDValue Chain = LD->getChain(); 10931 EVT VT = LD->getMemoryVT(); 10932 10933 SmallSet<SDNode *, 16> LoadRoots; 10934 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 10935 SmallSet<SDNode *, 16> Visited; 10936 10937 // First, search up the chain, branching to follow all token-factor operands. 10938 // If we find a consecutive load, then we're done, otherwise, record all 10939 // nodes just above the top-level loads and token factors. 10940 while (!Queue.empty()) { 10941 SDNode *ChainNext = Queue.pop_back_val(); 10942 if (!Visited.insert(ChainNext).second) 10943 continue; 10944 10945 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 10946 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10947 return true; 10948 10949 if (!Visited.count(ChainLD->getChain().getNode())) 10950 Queue.push_back(ChainLD->getChain().getNode()); 10951 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 10952 for (const SDUse &O : ChainNext->ops()) 10953 if (!Visited.count(O.getNode())) 10954 Queue.push_back(O.getNode()); 10955 } else 10956 LoadRoots.insert(ChainNext); 10957 } 10958 10959 // Second, search down the chain, starting from the top-level nodes recorded 10960 // in the first phase. These top-level nodes are the nodes just above all 10961 // loads and token factors. Starting with their uses, recursively look though 10962 // all loads (just the chain uses) and token factors to find a consecutive 10963 // load. 10964 Visited.clear(); 10965 Queue.clear(); 10966 10967 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 10968 IE = LoadRoots.end(); I != IE; ++I) { 10969 Queue.push_back(*I); 10970 10971 while (!Queue.empty()) { 10972 SDNode *LoadRoot = Queue.pop_back_val(); 10973 if (!Visited.insert(LoadRoot).second) 10974 continue; 10975 10976 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 10977 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10978 return true; 10979 10980 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 10981 UE = LoadRoot->use_end(); UI != UE; ++UI) 10982 if (((isa<MemSDNode>(*UI) && 10983 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 10984 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 10985 Queue.push_back(*UI); 10986 } 10987 } 10988 10989 return false; 10990 } 10991 10992 /// This function is called when we have proved that a SETCC node can be replaced 10993 /// by subtraction (and other supporting instructions) so that the result of 10994 /// comparison is kept in a GPR instead of CR. This function is purely for 10995 /// codegen purposes and has some flags to guide the codegen process. 10996 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 10997 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 10998 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10999 11000 // Zero extend the operands to the largest legal integer. Originally, they 11001 // must be of a strictly smaller size. 11002 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 11003 DAG.getConstant(Size, DL, MVT::i32)); 11004 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 11005 DAG.getConstant(Size, DL, MVT::i32)); 11006 11007 // Swap if needed. Depends on the condition code. 11008 if (Swap) 11009 std::swap(Op0, Op1); 11010 11011 // Subtract extended integers. 11012 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 11013 11014 // Move the sign bit to the least significant position and zero out the rest. 11015 // Now the least significant bit carries the result of original comparison. 11016 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 11017 DAG.getConstant(Size - 1, DL, MVT::i32)); 11018 auto Final = Shifted; 11019 11020 // Complement the result if needed. Based on the condition code. 11021 if (Complement) 11022 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 11023 DAG.getConstant(1, DL, MVT::i64)); 11024 11025 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 11026 } 11027 11028 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 11029 DAGCombinerInfo &DCI) const { 11030 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 11031 11032 SelectionDAG &DAG = DCI.DAG; 11033 SDLoc DL(N); 11034 11035 // Size of integers being compared has a critical role in the following 11036 // analysis, so we prefer to do this when all types are legal. 11037 if (!DCI.isAfterLegalizeDAG()) 11038 return SDValue(); 11039 11040 // If all users of SETCC extend its value to a legal integer type 11041 // then we replace SETCC with a subtraction 11042 for (SDNode::use_iterator UI = N->use_begin(), 11043 UE = N->use_end(); UI != UE; ++UI) { 11044 if (UI->getOpcode() != ISD::ZERO_EXTEND) 11045 return SDValue(); 11046 } 11047 11048 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 11049 auto OpSize = N->getOperand(0).getValueSizeInBits(); 11050 11051 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 11052 11053 if (OpSize < Size) { 11054 switch (CC) { 11055 default: break; 11056 case ISD::SETULT: 11057 return generateEquivalentSub(N, Size, false, false, DL, DAG); 11058 case ISD::SETULE: 11059 return generateEquivalentSub(N, Size, true, true, DL, DAG); 11060 case ISD::SETUGT: 11061 return generateEquivalentSub(N, Size, false, true, DL, DAG); 11062 case ISD::SETUGE: 11063 return generateEquivalentSub(N, Size, true, false, DL, DAG); 11064 } 11065 } 11066 11067 return SDValue(); 11068 } 11069 11070 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 11071 DAGCombinerInfo &DCI) const { 11072 SelectionDAG &DAG = DCI.DAG; 11073 SDLoc dl(N); 11074 11075 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 11076 // If we're tracking CR bits, we need to be careful that we don't have: 11077 // trunc(binary-ops(zext(x), zext(y))) 11078 // or 11079 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 11080 // such that we're unnecessarily moving things into GPRs when it would be 11081 // better to keep them in CR bits. 11082 11083 // Note that trunc here can be an actual i1 trunc, or can be the effective 11084 // truncation that comes from a setcc or select_cc. 11085 if (N->getOpcode() == ISD::TRUNCATE && 11086 N->getValueType(0) != MVT::i1) 11087 return SDValue(); 11088 11089 if (N->getOperand(0).getValueType() != MVT::i32 && 11090 N->getOperand(0).getValueType() != MVT::i64) 11091 return SDValue(); 11092 11093 if (N->getOpcode() == ISD::SETCC || 11094 N->getOpcode() == ISD::SELECT_CC) { 11095 // If we're looking at a comparison, then we need to make sure that the 11096 // high bits (all except for the first) don't matter the result. 11097 ISD::CondCode CC = 11098 cast<CondCodeSDNode>(N->getOperand( 11099 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 11100 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 11101 11102 if (ISD::isSignedIntSetCC(CC)) { 11103 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 11104 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 11105 return SDValue(); 11106 } else if (ISD::isUnsignedIntSetCC(CC)) { 11107 if (!DAG.MaskedValueIsZero(N->getOperand(0), 11108 APInt::getHighBitsSet(OpBits, OpBits-1)) || 11109 !DAG.MaskedValueIsZero(N->getOperand(1), 11110 APInt::getHighBitsSet(OpBits, OpBits-1))) 11111 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 11112 : SDValue()); 11113 } else { 11114 // This is neither a signed nor an unsigned comparison, just make sure 11115 // that the high bits are equal. 11116 KnownBits Op1Known, Op2Known; 11117 DAG.computeKnownBits(N->getOperand(0), Op1Known); 11118 DAG.computeKnownBits(N->getOperand(1), Op2Known); 11119 11120 // We don't really care about what is known about the first bit (if 11121 // anything), so clear it in all masks prior to comparing them. 11122 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 11123 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 11124 11125 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 11126 return SDValue(); 11127 } 11128 } 11129 11130 // We now know that the higher-order bits are irrelevant, we just need to 11131 // make sure that all of the intermediate operations are bit operations, and 11132 // all inputs are extensions. 11133 if (N->getOperand(0).getOpcode() != ISD::AND && 11134 N->getOperand(0).getOpcode() != ISD::OR && 11135 N->getOperand(0).getOpcode() != ISD::XOR && 11136 N->getOperand(0).getOpcode() != ISD::SELECT && 11137 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 11138 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 11139 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 11140 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 11141 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 11142 return SDValue(); 11143 11144 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 11145 N->getOperand(1).getOpcode() != ISD::AND && 11146 N->getOperand(1).getOpcode() != ISD::OR && 11147 N->getOperand(1).getOpcode() != ISD::XOR && 11148 N->getOperand(1).getOpcode() != ISD::SELECT && 11149 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 11150 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 11151 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 11152 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 11153 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 11154 return SDValue(); 11155 11156 SmallVector<SDValue, 4> Inputs; 11157 SmallVector<SDValue, 8> BinOps, PromOps; 11158 SmallPtrSet<SDNode *, 16> Visited; 11159 11160 for (unsigned i = 0; i < 2; ++i) { 11161 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11162 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11163 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11164 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11165 isa<ConstantSDNode>(N->getOperand(i))) 11166 Inputs.push_back(N->getOperand(i)); 11167 else 11168 BinOps.push_back(N->getOperand(i)); 11169 11170 if (N->getOpcode() == ISD::TRUNCATE) 11171 break; 11172 } 11173 11174 // Visit all inputs, collect all binary operations (and, or, xor and 11175 // select) that are all fed by extensions. 11176 while (!BinOps.empty()) { 11177 SDValue BinOp = BinOps.back(); 11178 BinOps.pop_back(); 11179 11180 if (!Visited.insert(BinOp.getNode()).second) 11181 continue; 11182 11183 PromOps.push_back(BinOp); 11184 11185 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11186 // The condition of the select is not promoted. 11187 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11188 continue; 11189 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11190 continue; 11191 11192 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11193 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11194 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11195 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11196 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11197 Inputs.push_back(BinOp.getOperand(i)); 11198 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11199 BinOp.getOperand(i).getOpcode() == ISD::OR || 11200 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11201 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11202 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 11203 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11204 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11205 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11206 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 11207 BinOps.push_back(BinOp.getOperand(i)); 11208 } else { 11209 // We have an input that is not an extension or another binary 11210 // operation; we'll abort this transformation. 11211 return SDValue(); 11212 } 11213 } 11214 } 11215 11216 // Make sure that this is a self-contained cluster of operations (which 11217 // is not quite the same thing as saying that everything has only one 11218 // use). 11219 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11220 if (isa<ConstantSDNode>(Inputs[i])) 11221 continue; 11222 11223 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11224 UE = Inputs[i].getNode()->use_end(); 11225 UI != UE; ++UI) { 11226 SDNode *User = *UI; 11227 if (User != N && !Visited.count(User)) 11228 return SDValue(); 11229 11230 // Make sure that we're not going to promote the non-output-value 11231 // operand(s) or SELECT or SELECT_CC. 11232 // FIXME: Although we could sometimes handle this, and it does occur in 11233 // practice that one of the condition inputs to the select is also one of 11234 // the outputs, we currently can't deal with this. 11235 if (User->getOpcode() == ISD::SELECT) { 11236 if (User->getOperand(0) == Inputs[i]) 11237 return SDValue(); 11238 } else if (User->getOpcode() == ISD::SELECT_CC) { 11239 if (User->getOperand(0) == Inputs[i] || 11240 User->getOperand(1) == Inputs[i]) 11241 return SDValue(); 11242 } 11243 } 11244 } 11245 11246 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11247 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11248 UE = PromOps[i].getNode()->use_end(); 11249 UI != UE; ++UI) { 11250 SDNode *User = *UI; 11251 if (User != N && !Visited.count(User)) 11252 return SDValue(); 11253 11254 // Make sure that we're not going to promote the non-output-value 11255 // operand(s) or SELECT or SELECT_CC. 11256 // FIXME: Although we could sometimes handle this, and it does occur in 11257 // practice that one of the condition inputs to the select is also one of 11258 // the outputs, we currently can't deal with this. 11259 if (User->getOpcode() == ISD::SELECT) { 11260 if (User->getOperand(0) == PromOps[i]) 11261 return SDValue(); 11262 } else if (User->getOpcode() == ISD::SELECT_CC) { 11263 if (User->getOperand(0) == PromOps[i] || 11264 User->getOperand(1) == PromOps[i]) 11265 return SDValue(); 11266 } 11267 } 11268 } 11269 11270 // Replace all inputs with the extension operand. 11271 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11272 // Constants may have users outside the cluster of to-be-promoted nodes, 11273 // and so we need to replace those as we do the promotions. 11274 if (isa<ConstantSDNode>(Inputs[i])) 11275 continue; 11276 else 11277 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 11278 } 11279 11280 std::list<HandleSDNode> PromOpHandles; 11281 for (auto &PromOp : PromOps) 11282 PromOpHandles.emplace_back(PromOp); 11283 11284 // Replace all operations (these are all the same, but have a different 11285 // (i1) return type). DAG.getNode will validate that the types of 11286 // a binary operator match, so go through the list in reverse so that 11287 // we've likely promoted both operands first. Any intermediate truncations or 11288 // extensions disappear. 11289 while (!PromOpHandles.empty()) { 11290 SDValue PromOp = PromOpHandles.back().getValue(); 11291 PromOpHandles.pop_back(); 11292 11293 if (PromOp.getOpcode() == ISD::TRUNCATE || 11294 PromOp.getOpcode() == ISD::SIGN_EXTEND || 11295 PromOp.getOpcode() == ISD::ZERO_EXTEND || 11296 PromOp.getOpcode() == ISD::ANY_EXTEND) { 11297 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 11298 PromOp.getOperand(0).getValueType() != MVT::i1) { 11299 // The operand is not yet ready (see comment below). 11300 PromOpHandles.emplace_front(PromOp); 11301 continue; 11302 } 11303 11304 SDValue RepValue = PromOp.getOperand(0); 11305 if (isa<ConstantSDNode>(RepValue)) 11306 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 11307 11308 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 11309 continue; 11310 } 11311 11312 unsigned C; 11313 switch (PromOp.getOpcode()) { 11314 default: C = 0; break; 11315 case ISD::SELECT: C = 1; break; 11316 case ISD::SELECT_CC: C = 2; break; 11317 } 11318 11319 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11320 PromOp.getOperand(C).getValueType() != MVT::i1) || 11321 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11322 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 11323 // The to-be-promoted operands of this node have not yet been 11324 // promoted (this should be rare because we're going through the 11325 // list backward, but if one of the operands has several users in 11326 // this cluster of to-be-promoted nodes, it is possible). 11327 PromOpHandles.emplace_front(PromOp); 11328 continue; 11329 } 11330 11331 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11332 PromOp.getNode()->op_end()); 11333 11334 // If there are any constant inputs, make sure they're replaced now. 11335 for (unsigned i = 0; i < 2; ++i) 11336 if (isa<ConstantSDNode>(Ops[C+i])) 11337 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 11338 11339 DAG.ReplaceAllUsesOfValueWith(PromOp, 11340 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 11341 } 11342 11343 // Now we're left with the initial truncation itself. 11344 if (N->getOpcode() == ISD::TRUNCATE) 11345 return N->getOperand(0); 11346 11347 // Otherwise, this is a comparison. The operands to be compared have just 11348 // changed type (to i1), but everything else is the same. 11349 return SDValue(N, 0); 11350 } 11351 11352 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 11353 DAGCombinerInfo &DCI) const { 11354 SelectionDAG &DAG = DCI.DAG; 11355 SDLoc dl(N); 11356 11357 // If we're tracking CR bits, we need to be careful that we don't have: 11358 // zext(binary-ops(trunc(x), trunc(y))) 11359 // or 11360 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 11361 // such that we're unnecessarily moving things into CR bits that can more 11362 // efficiently stay in GPRs. Note that if we're not certain that the high 11363 // bits are set as required by the final extension, we still may need to do 11364 // some masking to get the proper behavior. 11365 11366 // This same functionality is important on PPC64 when dealing with 11367 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 11368 // the return values of functions. Because it is so similar, it is handled 11369 // here as well. 11370 11371 if (N->getValueType(0) != MVT::i32 && 11372 N->getValueType(0) != MVT::i64) 11373 return SDValue(); 11374 11375 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 11376 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 11377 return SDValue(); 11378 11379 if (N->getOperand(0).getOpcode() != ISD::AND && 11380 N->getOperand(0).getOpcode() != ISD::OR && 11381 N->getOperand(0).getOpcode() != ISD::XOR && 11382 N->getOperand(0).getOpcode() != ISD::SELECT && 11383 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 11384 return SDValue(); 11385 11386 SmallVector<SDValue, 4> Inputs; 11387 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 11388 SmallPtrSet<SDNode *, 16> Visited; 11389 11390 // Visit all inputs, collect all binary operations (and, or, xor and 11391 // select) that are all fed by truncations. 11392 while (!BinOps.empty()) { 11393 SDValue BinOp = BinOps.back(); 11394 BinOps.pop_back(); 11395 11396 if (!Visited.insert(BinOp.getNode()).second) 11397 continue; 11398 11399 PromOps.push_back(BinOp); 11400 11401 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11402 // The condition of the select is not promoted. 11403 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11404 continue; 11405 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11406 continue; 11407 11408 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11409 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11410 Inputs.push_back(BinOp.getOperand(i)); 11411 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11412 BinOp.getOperand(i).getOpcode() == ISD::OR || 11413 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11414 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11415 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 11416 BinOps.push_back(BinOp.getOperand(i)); 11417 } else { 11418 // We have an input that is not a truncation or another binary 11419 // operation; we'll abort this transformation. 11420 return SDValue(); 11421 } 11422 } 11423 } 11424 11425 // The operands of a select that must be truncated when the select is 11426 // promoted because the operand is actually part of the to-be-promoted set. 11427 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 11428 11429 // Make sure that this is a self-contained cluster of operations (which 11430 // is not quite the same thing as saying that everything has only one 11431 // use). 11432 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11433 if (isa<ConstantSDNode>(Inputs[i])) 11434 continue; 11435 11436 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11437 UE = Inputs[i].getNode()->use_end(); 11438 UI != UE; ++UI) { 11439 SDNode *User = *UI; 11440 if (User != N && !Visited.count(User)) 11441 return SDValue(); 11442 11443 // If we're going to promote the non-output-value operand(s) or SELECT or 11444 // SELECT_CC, record them for truncation. 11445 if (User->getOpcode() == ISD::SELECT) { 11446 if (User->getOperand(0) == Inputs[i]) 11447 SelectTruncOp[0].insert(std::make_pair(User, 11448 User->getOperand(0).getValueType())); 11449 } else if (User->getOpcode() == ISD::SELECT_CC) { 11450 if (User->getOperand(0) == Inputs[i]) 11451 SelectTruncOp[0].insert(std::make_pair(User, 11452 User->getOperand(0).getValueType())); 11453 if (User->getOperand(1) == Inputs[i]) 11454 SelectTruncOp[1].insert(std::make_pair(User, 11455 User->getOperand(1).getValueType())); 11456 } 11457 } 11458 } 11459 11460 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11461 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11462 UE = PromOps[i].getNode()->use_end(); 11463 UI != UE; ++UI) { 11464 SDNode *User = *UI; 11465 if (User != N && !Visited.count(User)) 11466 return SDValue(); 11467 11468 // If we're going to promote the non-output-value operand(s) or SELECT or 11469 // SELECT_CC, record them for truncation. 11470 if (User->getOpcode() == ISD::SELECT) { 11471 if (User->getOperand(0) == PromOps[i]) 11472 SelectTruncOp[0].insert(std::make_pair(User, 11473 User->getOperand(0).getValueType())); 11474 } else if (User->getOpcode() == ISD::SELECT_CC) { 11475 if (User->getOperand(0) == PromOps[i]) 11476 SelectTruncOp[0].insert(std::make_pair(User, 11477 User->getOperand(0).getValueType())); 11478 if (User->getOperand(1) == PromOps[i]) 11479 SelectTruncOp[1].insert(std::make_pair(User, 11480 User->getOperand(1).getValueType())); 11481 } 11482 } 11483 } 11484 11485 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 11486 bool ReallyNeedsExt = false; 11487 if (N->getOpcode() != ISD::ANY_EXTEND) { 11488 // If all of the inputs are not already sign/zero extended, then 11489 // we'll still need to do that at the end. 11490 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11491 if (isa<ConstantSDNode>(Inputs[i])) 11492 continue; 11493 11494 unsigned OpBits = 11495 Inputs[i].getOperand(0).getValueSizeInBits(); 11496 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 11497 11498 if ((N->getOpcode() == ISD::ZERO_EXTEND && 11499 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 11500 APInt::getHighBitsSet(OpBits, 11501 OpBits-PromBits))) || 11502 (N->getOpcode() == ISD::SIGN_EXTEND && 11503 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 11504 (OpBits-(PromBits-1)))) { 11505 ReallyNeedsExt = true; 11506 break; 11507 } 11508 } 11509 } 11510 11511 // Replace all inputs, either with the truncation operand, or a 11512 // truncation or extension to the final output type. 11513 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11514 // Constant inputs need to be replaced with the to-be-promoted nodes that 11515 // use them because they might have users outside of the cluster of 11516 // promoted nodes. 11517 if (isa<ConstantSDNode>(Inputs[i])) 11518 continue; 11519 11520 SDValue InSrc = Inputs[i].getOperand(0); 11521 if (Inputs[i].getValueType() == N->getValueType(0)) 11522 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 11523 else if (N->getOpcode() == ISD::SIGN_EXTEND) 11524 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11525 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 11526 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11527 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11528 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 11529 else 11530 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11531 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 11532 } 11533 11534 std::list<HandleSDNode> PromOpHandles; 11535 for (auto &PromOp : PromOps) 11536 PromOpHandles.emplace_back(PromOp); 11537 11538 // Replace all operations (these are all the same, but have a different 11539 // (promoted) return type). DAG.getNode will validate that the types of 11540 // a binary operator match, so go through the list in reverse so that 11541 // we've likely promoted both operands first. 11542 while (!PromOpHandles.empty()) { 11543 SDValue PromOp = PromOpHandles.back().getValue(); 11544 PromOpHandles.pop_back(); 11545 11546 unsigned C; 11547 switch (PromOp.getOpcode()) { 11548 default: C = 0; break; 11549 case ISD::SELECT: C = 1; break; 11550 case ISD::SELECT_CC: C = 2; break; 11551 } 11552 11553 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11554 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 11555 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11556 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 11557 // The to-be-promoted operands of this node have not yet been 11558 // promoted (this should be rare because we're going through the 11559 // list backward, but if one of the operands has several users in 11560 // this cluster of to-be-promoted nodes, it is possible). 11561 PromOpHandles.emplace_front(PromOp); 11562 continue; 11563 } 11564 11565 // For SELECT and SELECT_CC nodes, we do a similar check for any 11566 // to-be-promoted comparison inputs. 11567 if (PromOp.getOpcode() == ISD::SELECT || 11568 PromOp.getOpcode() == ISD::SELECT_CC) { 11569 if ((SelectTruncOp[0].count(PromOp.getNode()) && 11570 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 11571 (SelectTruncOp[1].count(PromOp.getNode()) && 11572 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 11573 PromOpHandles.emplace_front(PromOp); 11574 continue; 11575 } 11576 } 11577 11578 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11579 PromOp.getNode()->op_end()); 11580 11581 // If this node has constant inputs, then they'll need to be promoted here. 11582 for (unsigned i = 0; i < 2; ++i) { 11583 if (!isa<ConstantSDNode>(Ops[C+i])) 11584 continue; 11585 if (Ops[C+i].getValueType() == N->getValueType(0)) 11586 continue; 11587 11588 if (N->getOpcode() == ISD::SIGN_EXTEND) 11589 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11590 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11591 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11592 else 11593 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11594 } 11595 11596 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 11597 // truncate them again to the original value type. 11598 if (PromOp.getOpcode() == ISD::SELECT || 11599 PromOp.getOpcode() == ISD::SELECT_CC) { 11600 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 11601 if (SI0 != SelectTruncOp[0].end()) 11602 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 11603 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 11604 if (SI1 != SelectTruncOp[1].end()) 11605 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 11606 } 11607 11608 DAG.ReplaceAllUsesOfValueWith(PromOp, 11609 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 11610 } 11611 11612 // Now we're left with the initial extension itself. 11613 if (!ReallyNeedsExt) 11614 return N->getOperand(0); 11615 11616 // To zero extend, just mask off everything except for the first bit (in the 11617 // i1 case). 11618 if (N->getOpcode() == ISD::ZERO_EXTEND) 11619 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 11620 DAG.getConstant(APInt::getLowBitsSet( 11621 N->getValueSizeInBits(0), PromBits), 11622 dl, N->getValueType(0))); 11623 11624 assert(N->getOpcode() == ISD::SIGN_EXTEND && 11625 "Invalid extension type"); 11626 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 11627 SDValue ShiftCst = 11628 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 11629 return DAG.getNode( 11630 ISD::SRA, dl, N->getValueType(0), 11631 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 11632 ShiftCst); 11633 } 11634 11635 /// Reduces the number of fp-to-int conversion when building a vector. 11636 /// 11637 /// If this vector is built out of floating to integer conversions, 11638 /// transform it to a vector built out of floating point values followed by a 11639 /// single floating to integer conversion of the vector. 11640 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 11641 /// becomes (fptosi (build_vector ($A, $B, ...))) 11642 SDValue PPCTargetLowering:: 11643 combineElementTruncationToVectorTruncation(SDNode *N, 11644 DAGCombinerInfo &DCI) const { 11645 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11646 "Should be called with a BUILD_VECTOR node"); 11647 11648 SelectionDAG &DAG = DCI.DAG; 11649 SDLoc dl(N); 11650 11651 SDValue FirstInput = N->getOperand(0); 11652 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 11653 "The input operand must be an fp-to-int conversion."); 11654 11655 // This combine happens after legalization so the fp_to_[su]i nodes are 11656 // already converted to PPCSISD nodes. 11657 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 11658 if (FirstConversion == PPCISD::FCTIDZ || 11659 FirstConversion == PPCISD::FCTIDUZ || 11660 FirstConversion == PPCISD::FCTIWZ || 11661 FirstConversion == PPCISD::FCTIWUZ) { 11662 bool IsSplat = true; 11663 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 11664 FirstConversion == PPCISD::FCTIWUZ; 11665 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 11666 SmallVector<SDValue, 4> Ops; 11667 EVT TargetVT = N->getValueType(0); 11668 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11669 if (N->getOperand(i).getOpcode() != PPCISD::MFVSR) 11670 return SDValue(); 11671 unsigned NextConversion = N->getOperand(i).getOperand(0).getOpcode(); 11672 if (NextConversion != FirstConversion) 11673 return SDValue(); 11674 if (N->getOperand(i) != FirstInput) 11675 IsSplat = false; 11676 } 11677 11678 // If this is a splat, we leave it as-is since there will be only a single 11679 // fp-to-int conversion followed by a splat of the integer. This is better 11680 // for 32-bit and smaller ints and neutral for 64-bit ints. 11681 if (IsSplat) 11682 return SDValue(); 11683 11684 // Now that we know we have the right type of node, get its operands 11685 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11686 SDValue In = N->getOperand(i).getOperand(0); 11687 // For 32-bit values, we need to add an FP_ROUND node. 11688 if (Is32Bit) { 11689 if (In.isUndef()) 11690 Ops.push_back(DAG.getUNDEF(SrcVT)); 11691 else { 11692 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 11693 MVT::f32, In.getOperand(0), 11694 DAG.getIntPtrConstant(1, dl)); 11695 Ops.push_back(Trunc); 11696 } 11697 } else 11698 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 11699 } 11700 11701 unsigned Opcode; 11702 if (FirstConversion == PPCISD::FCTIDZ || 11703 FirstConversion == PPCISD::FCTIWZ) 11704 Opcode = ISD::FP_TO_SINT; 11705 else 11706 Opcode = ISD::FP_TO_UINT; 11707 11708 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 11709 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 11710 return DAG.getNode(Opcode, dl, TargetVT, BV); 11711 } 11712 return SDValue(); 11713 } 11714 11715 /// Reduce the number of loads when building a vector. 11716 /// 11717 /// Building a vector out of multiple loads can be converted to a load 11718 /// of the vector type if the loads are consecutive. If the loads are 11719 /// consecutive but in descending order, a shuffle is added at the end 11720 /// to reorder the vector. 11721 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 11722 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11723 "Should be called with a BUILD_VECTOR node"); 11724 11725 SDLoc dl(N); 11726 bool InputsAreConsecutiveLoads = true; 11727 bool InputsAreReverseConsecutive = true; 11728 unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8; 11729 SDValue FirstInput = N->getOperand(0); 11730 bool IsRoundOfExtLoad = false; 11731 11732 if (FirstInput.getOpcode() == ISD::FP_ROUND && 11733 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 11734 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 11735 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 11736 } 11737 // Not a build vector of (possibly fp_rounded) loads. 11738 if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) 11739 return SDValue(); 11740 11741 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 11742 // If any inputs are fp_round(extload), they all must be. 11743 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 11744 return SDValue(); 11745 11746 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 11747 N->getOperand(i); 11748 if (NextInput.getOpcode() != ISD::LOAD) 11749 return SDValue(); 11750 11751 SDValue PreviousInput = 11752 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 11753 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 11754 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 11755 11756 // If any inputs are fp_round(extload), they all must be. 11757 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 11758 return SDValue(); 11759 11760 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 11761 InputsAreConsecutiveLoads = false; 11762 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 11763 InputsAreReverseConsecutive = false; 11764 11765 // Exit early if the loads are neither consecutive nor reverse consecutive. 11766 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 11767 return SDValue(); 11768 } 11769 11770 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 11771 "The loads cannot be both consecutive and reverse consecutive."); 11772 11773 SDValue FirstLoadOp = 11774 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 11775 SDValue LastLoadOp = 11776 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 11777 N->getOperand(N->getNumOperands()-1); 11778 11779 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 11780 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 11781 if (InputsAreConsecutiveLoads) { 11782 assert(LD1 && "Input needs to be a LoadSDNode."); 11783 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 11784 LD1->getBasePtr(), LD1->getPointerInfo(), 11785 LD1->getAlignment()); 11786 } 11787 if (InputsAreReverseConsecutive) { 11788 assert(LDL && "Input needs to be a LoadSDNode."); 11789 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 11790 LDL->getBasePtr(), LDL->getPointerInfo(), 11791 LDL->getAlignment()); 11792 SmallVector<int, 16> Ops; 11793 for (int i = N->getNumOperands() - 1; i >= 0; i--) 11794 Ops.push_back(i); 11795 11796 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 11797 DAG.getUNDEF(N->getValueType(0)), Ops); 11798 } 11799 return SDValue(); 11800 } 11801 11802 // This function adds the required vector_shuffle needed to get 11803 // the elements of the vector extract in the correct position 11804 // as specified by the CorrectElems encoding. 11805 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 11806 SDValue Input, uint64_t Elems, 11807 uint64_t CorrectElems) { 11808 SDLoc dl(N); 11809 11810 unsigned NumElems = Input.getValueType().getVectorNumElements(); 11811 SmallVector<int, 16> ShuffleMask(NumElems, -1); 11812 11813 // Knowing the element indices being extracted from the original 11814 // vector and the order in which they're being inserted, just put 11815 // them at element indices required for the instruction. 11816 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11817 if (DAG.getDataLayout().isLittleEndian()) 11818 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 11819 else 11820 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 11821 CorrectElems = CorrectElems >> 8; 11822 Elems = Elems >> 8; 11823 } 11824 11825 SDValue Shuffle = 11826 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 11827 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 11828 11829 EVT Ty = N->getValueType(0); 11830 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 11831 return BV; 11832 } 11833 11834 // Look for build vector patterns where input operands come from sign 11835 // extended vector_extract elements of specific indices. If the correct indices 11836 // aren't used, add a vector shuffle to fix up the indices and create a new 11837 // PPCISD:SExtVElems node which selects the vector sign extend instructions 11838 // during instruction selection. 11839 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 11840 // This array encodes the indices that the vector sign extend instructions 11841 // extract from when extending from one type to another for both BE and LE. 11842 // The right nibble of each byte corresponds to the LE incides. 11843 // and the left nibble of each byte corresponds to the BE incides. 11844 // For example: 0x3074B8FC byte->word 11845 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 11846 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 11847 // For example: 0x000070F8 byte->double word 11848 // For LE: the allowed indices are: 0x0,0x8 11849 // For BE: the allowed indices are: 0x7,0xF 11850 uint64_t TargetElems[] = { 11851 0x3074B8FC, // b->w 11852 0x000070F8, // b->d 11853 0x10325476, // h->w 11854 0x00003074, // h->d 11855 0x00001032, // w->d 11856 }; 11857 11858 uint64_t Elems = 0; 11859 int Index; 11860 SDValue Input; 11861 11862 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 11863 if (!Op) 11864 return false; 11865 if (Op.getOpcode() != ISD::SIGN_EXTEND) 11866 return false; 11867 11868 SDValue Extract = Op.getOperand(0); 11869 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11870 return false; 11871 11872 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 11873 if (!ExtOp) 11874 return false; 11875 11876 Index = ExtOp->getZExtValue(); 11877 if (Input && Input != Extract.getOperand(0)) 11878 return false; 11879 11880 if (!Input) 11881 Input = Extract.getOperand(0); 11882 11883 Elems = Elems << 8; 11884 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 11885 Elems |= Index; 11886 11887 return true; 11888 }; 11889 11890 // If the build vector operands aren't sign extended vector extracts, 11891 // of the same input vector, then return. 11892 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11893 if (!isSExtOfVecExtract(N->getOperand(i))) { 11894 return SDValue(); 11895 } 11896 } 11897 11898 // If the vector extract indicies are not correct, add the appropriate 11899 // vector_shuffle. 11900 int TgtElemArrayIdx; 11901 int InputSize = Input.getValueType().getScalarSizeInBits(); 11902 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 11903 if (InputSize + OutputSize == 40) 11904 TgtElemArrayIdx = 0; 11905 else if (InputSize + OutputSize == 72) 11906 TgtElemArrayIdx = 1; 11907 else if (InputSize + OutputSize == 48) 11908 TgtElemArrayIdx = 2; 11909 else if (InputSize + OutputSize == 80) 11910 TgtElemArrayIdx = 3; 11911 else if (InputSize + OutputSize == 96) 11912 TgtElemArrayIdx = 4; 11913 else 11914 return SDValue(); 11915 11916 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 11917 CorrectElems = DAG.getDataLayout().isLittleEndian() 11918 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 11919 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 11920 if (Elems != CorrectElems) { 11921 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 11922 } 11923 11924 // Regular lowering will catch cases where a shuffle is not needed. 11925 return SDValue(); 11926 } 11927 11928 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 11929 DAGCombinerInfo &DCI) const { 11930 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11931 "Should be called with a BUILD_VECTOR node"); 11932 11933 SelectionDAG &DAG = DCI.DAG; 11934 SDLoc dl(N); 11935 11936 if (!Subtarget.hasVSX()) 11937 return SDValue(); 11938 11939 // The target independent DAG combiner will leave a build_vector of 11940 // float-to-int conversions intact. We can generate MUCH better code for 11941 // a float-to-int conversion of a vector of floats. 11942 SDValue FirstInput = N->getOperand(0); 11943 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 11944 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 11945 if (Reduced) 11946 return Reduced; 11947 } 11948 11949 // If we're building a vector out of consecutive loads, just load that 11950 // vector type. 11951 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 11952 if (Reduced) 11953 return Reduced; 11954 11955 // If we're building a vector out of extended elements from another vector 11956 // we have P9 vector integer extend instructions. 11957 if (Subtarget.hasP9Altivec()) { 11958 Reduced = combineBVOfVecSExt(N, DAG); 11959 if (Reduced) 11960 return Reduced; 11961 } 11962 11963 11964 if (N->getValueType(0) != MVT::v2f64) 11965 return SDValue(); 11966 11967 // Looking for: 11968 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 11969 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 11970 FirstInput.getOpcode() != ISD::UINT_TO_FP) 11971 return SDValue(); 11972 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 11973 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 11974 return SDValue(); 11975 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 11976 return SDValue(); 11977 11978 SDValue Ext1 = FirstInput.getOperand(0); 11979 SDValue Ext2 = N->getOperand(1).getOperand(0); 11980 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 11981 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11982 return SDValue(); 11983 11984 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 11985 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 11986 if (!Ext1Op || !Ext2Op) 11987 return SDValue(); 11988 if (Ext1.getValueType() != MVT::i32 || 11989 Ext2.getValueType() != MVT::i32) 11990 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 11991 return SDValue(); 11992 11993 int FirstElem = Ext1Op->getZExtValue(); 11994 int SecondElem = Ext2Op->getZExtValue(); 11995 int SubvecIdx; 11996 if (FirstElem == 0 && SecondElem == 1) 11997 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 11998 else if (FirstElem == 2 && SecondElem == 3) 11999 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 12000 else 12001 return SDValue(); 12002 12003 SDValue SrcVec = Ext1.getOperand(0); 12004 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 12005 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 12006 return DAG.getNode(NodeType, dl, MVT::v2f64, 12007 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 12008 } 12009 12010 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 12011 DAGCombinerInfo &DCI) const { 12012 assert((N->getOpcode() == ISD::SINT_TO_FP || 12013 N->getOpcode() == ISD::UINT_TO_FP) && 12014 "Need an int -> FP conversion node here"); 12015 12016 if (useSoftFloat() || !Subtarget.has64BitSupport()) 12017 return SDValue(); 12018 12019 SelectionDAG &DAG = DCI.DAG; 12020 SDLoc dl(N); 12021 SDValue Op(N, 0); 12022 12023 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 12024 // from the hardware. 12025 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 12026 return SDValue(); 12027 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 12028 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 12029 return SDValue(); 12030 12031 SDValue FirstOperand(Op.getOperand(0)); 12032 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 12033 (FirstOperand.getValueType() == MVT::i8 || 12034 FirstOperand.getValueType() == MVT::i16); 12035 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 12036 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 12037 bool DstDouble = Op.getValueType() == MVT::f64; 12038 unsigned ConvOp = Signed ? 12039 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 12040 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 12041 SDValue WidthConst = 12042 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 12043 dl, false); 12044 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 12045 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 12046 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 12047 DAG.getVTList(MVT::f64, MVT::Other), 12048 Ops, MVT::i8, LDN->getMemOperand()); 12049 12050 // For signed conversion, we need to sign-extend the value in the VSR 12051 if (Signed) { 12052 SDValue ExtOps[] = { Ld, WidthConst }; 12053 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 12054 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 12055 } else 12056 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 12057 } 12058 12059 12060 // For i32 intermediate values, unfortunately, the conversion functions 12061 // leave the upper 32 bits of the value are undefined. Within the set of 12062 // scalar instructions, we have no method for zero- or sign-extending the 12063 // value. Thus, we cannot handle i32 intermediate values here. 12064 if (Op.getOperand(0).getValueType() == MVT::i32) 12065 return SDValue(); 12066 12067 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 12068 "UINT_TO_FP is supported only with FPCVT"); 12069 12070 // If we have FCFIDS, then use it when converting to single-precision. 12071 // Otherwise, convert to double-precision and then round. 12072 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12073 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 12074 : PPCISD::FCFIDS) 12075 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 12076 : PPCISD::FCFID); 12077 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 12078 ? MVT::f32 12079 : MVT::f64; 12080 12081 // If we're converting from a float, to an int, and back to a float again, 12082 // then we don't need the store/load pair at all. 12083 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 12084 Subtarget.hasFPCVT()) || 12085 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 12086 SDValue Src = Op.getOperand(0).getOperand(0); 12087 if (Src.getValueType() == MVT::f32) { 12088 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 12089 DCI.AddToWorklist(Src.getNode()); 12090 } else if (Src.getValueType() != MVT::f64) { 12091 // Make sure that we don't pick up a ppc_fp128 source value. 12092 return SDValue(); 12093 } 12094 12095 unsigned FCTOp = 12096 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 12097 PPCISD::FCTIDUZ; 12098 12099 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 12100 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 12101 12102 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 12103 FP = DAG.getNode(ISD::FP_ROUND, dl, 12104 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 12105 DCI.AddToWorklist(FP.getNode()); 12106 } 12107 12108 return FP; 12109 } 12110 12111 return SDValue(); 12112 } 12113 12114 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 12115 // builtins) into loads with swaps. 12116 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 12117 DAGCombinerInfo &DCI) const { 12118 SelectionDAG &DAG = DCI.DAG; 12119 SDLoc dl(N); 12120 SDValue Chain; 12121 SDValue Base; 12122 MachineMemOperand *MMO; 12123 12124 switch (N->getOpcode()) { 12125 default: 12126 llvm_unreachable("Unexpected opcode for little endian VSX load"); 12127 case ISD::LOAD: { 12128 LoadSDNode *LD = cast<LoadSDNode>(N); 12129 Chain = LD->getChain(); 12130 Base = LD->getBasePtr(); 12131 MMO = LD->getMemOperand(); 12132 // If the MMO suggests this isn't a load of a full vector, leave 12133 // things alone. For a built-in, we have to make the change for 12134 // correctness, so if there is a size problem that will be a bug. 12135 if (MMO->getSize() < 16) 12136 return SDValue(); 12137 break; 12138 } 12139 case ISD::INTRINSIC_W_CHAIN: { 12140 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12141 Chain = Intrin->getChain(); 12142 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 12143 // us what we want. Get operand 2 instead. 12144 Base = Intrin->getOperand(2); 12145 MMO = Intrin->getMemOperand(); 12146 break; 12147 } 12148 } 12149 12150 MVT VecTy = N->getValueType(0).getSimpleVT(); 12151 12152 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 12153 // aligned and the type is a vector with elements up to 4 bytes 12154 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12155 && VecTy.getScalarSizeInBits() <= 32 ) { 12156 return SDValue(); 12157 } 12158 12159 SDValue LoadOps[] = { Chain, Base }; 12160 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 12161 DAG.getVTList(MVT::v2f64, MVT::Other), 12162 LoadOps, MVT::v2f64, MMO); 12163 12164 DCI.AddToWorklist(Load.getNode()); 12165 Chain = Load.getValue(1); 12166 SDValue Swap = DAG.getNode( 12167 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 12168 DCI.AddToWorklist(Swap.getNode()); 12169 12170 // Add a bitcast if the resulting load type doesn't match v2f64. 12171 if (VecTy != MVT::v2f64) { 12172 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 12173 DCI.AddToWorklist(N.getNode()); 12174 // Package {bitcast value, swap's chain} to match Load's shape. 12175 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 12176 N, Swap.getValue(1)); 12177 } 12178 12179 return Swap; 12180 } 12181 12182 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 12183 // builtins) into stores with swaps. 12184 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 12185 DAGCombinerInfo &DCI) const { 12186 SelectionDAG &DAG = DCI.DAG; 12187 SDLoc dl(N); 12188 SDValue Chain; 12189 SDValue Base; 12190 unsigned SrcOpnd; 12191 MachineMemOperand *MMO; 12192 12193 switch (N->getOpcode()) { 12194 default: 12195 llvm_unreachable("Unexpected opcode for little endian VSX store"); 12196 case ISD::STORE: { 12197 StoreSDNode *ST = cast<StoreSDNode>(N); 12198 Chain = ST->getChain(); 12199 Base = ST->getBasePtr(); 12200 MMO = ST->getMemOperand(); 12201 SrcOpnd = 1; 12202 // If the MMO suggests this isn't a store of a full vector, leave 12203 // things alone. For a built-in, we have to make the change for 12204 // correctness, so if there is a size problem that will be a bug. 12205 if (MMO->getSize() < 16) 12206 return SDValue(); 12207 break; 12208 } 12209 case ISD::INTRINSIC_VOID: { 12210 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12211 Chain = Intrin->getChain(); 12212 // Intrin->getBasePtr() oddly does not get what we want. 12213 Base = Intrin->getOperand(3); 12214 MMO = Intrin->getMemOperand(); 12215 SrcOpnd = 2; 12216 break; 12217 } 12218 } 12219 12220 SDValue Src = N->getOperand(SrcOpnd); 12221 MVT VecTy = Src.getValueType().getSimpleVT(); 12222 12223 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 12224 // aligned and the type is a vector with elements up to 4 bytes 12225 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12226 && VecTy.getScalarSizeInBits() <= 32 ) { 12227 return SDValue(); 12228 } 12229 12230 // All stores are done as v2f64 and possible bit cast. 12231 if (VecTy != MVT::v2f64) { 12232 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 12233 DCI.AddToWorklist(Src.getNode()); 12234 } 12235 12236 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 12237 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 12238 DCI.AddToWorklist(Swap.getNode()); 12239 Chain = Swap.getValue(1); 12240 SDValue StoreOps[] = { Chain, Swap, Base }; 12241 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 12242 DAG.getVTList(MVT::Other), 12243 StoreOps, VecTy, MMO); 12244 DCI.AddToWorklist(Store.getNode()); 12245 return Store; 12246 } 12247 12248 // Handle DAG combine for STORE (FP_TO_INT F). 12249 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 12250 DAGCombinerInfo &DCI) const { 12251 12252 SelectionDAG &DAG = DCI.DAG; 12253 SDLoc dl(N); 12254 unsigned Opcode = N->getOperand(1).getOpcode(); 12255 12256 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 12257 && "Not a FP_TO_INT Instruction!"); 12258 12259 SDValue Val = N->getOperand(1).getOperand(0); 12260 EVT Op1VT = N->getOperand(1).getValueType(); 12261 EVT ResVT = Val.getValueType(); 12262 12263 // Floating point types smaller than 32 bits are not legal on Power. 12264 if (ResVT.getScalarSizeInBits() < 32) 12265 return SDValue(); 12266 12267 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 12268 bool ValidTypeForStoreFltAsInt = 12269 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 12270 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 12271 12272 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() || 12273 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 12274 return SDValue(); 12275 12276 // Extend f32 values to f64 12277 if (ResVT.getScalarSizeInBits() == 32) { 12278 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 12279 DCI.AddToWorklist(Val.getNode()); 12280 } 12281 12282 // Set signed or unsigned conversion opcode. 12283 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 12284 PPCISD::FP_TO_SINT_IN_VSR : 12285 PPCISD::FP_TO_UINT_IN_VSR; 12286 12287 Val = DAG.getNode(ConvOpcode, 12288 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 12289 DCI.AddToWorklist(Val.getNode()); 12290 12291 // Set number of bytes being converted. 12292 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 12293 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 12294 DAG.getIntPtrConstant(ByteSize, dl, false), 12295 DAG.getValueType(Op1VT) }; 12296 12297 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 12298 DAG.getVTList(MVT::Other), Ops, 12299 cast<StoreSDNode>(N)->getMemoryVT(), 12300 cast<StoreSDNode>(N)->getMemOperand()); 12301 12302 DCI.AddToWorklist(Val.getNode()); 12303 return Val; 12304 } 12305 12306 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 12307 DAGCombinerInfo &DCI) const { 12308 SelectionDAG &DAG = DCI.DAG; 12309 SDLoc dl(N); 12310 switch (N->getOpcode()) { 12311 default: break; 12312 case ISD::SHL: 12313 return combineSHL(N, DCI); 12314 case ISD::SRA: 12315 return combineSRA(N, DCI); 12316 case ISD::SRL: 12317 return combineSRL(N, DCI); 12318 case PPCISD::SHL: 12319 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 12320 return N->getOperand(0); 12321 break; 12322 case PPCISD::SRL: 12323 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 12324 return N->getOperand(0); 12325 break; 12326 case PPCISD::SRA: 12327 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 12328 if (C->isNullValue() || // 0 >>s V -> 0. 12329 C->isAllOnesValue()) // -1 >>s V -> -1. 12330 return N->getOperand(0); 12331 } 12332 break; 12333 case ISD::SIGN_EXTEND: 12334 case ISD::ZERO_EXTEND: 12335 case ISD::ANY_EXTEND: 12336 return DAGCombineExtBoolTrunc(N, DCI); 12337 case ISD::TRUNCATE: 12338 case ISD::SETCC: 12339 case ISD::SELECT_CC: 12340 return DAGCombineTruncBoolExt(N, DCI); 12341 case ISD::SINT_TO_FP: 12342 case ISD::UINT_TO_FP: 12343 return combineFPToIntToFP(N, DCI); 12344 case ISD::STORE: { 12345 12346 EVT Op1VT = N->getOperand(1).getValueType(); 12347 unsigned Opcode = N->getOperand(1).getOpcode(); 12348 12349 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 12350 SDValue Val= combineStoreFPToInt(N, DCI); 12351 if (Val) 12352 return Val; 12353 } 12354 12355 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 12356 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 12357 N->getOperand(1).getNode()->hasOneUse() && 12358 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 12359 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 12360 12361 // STBRX can only handle simple types. 12362 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 12363 if (mVT.isExtended()) 12364 break; 12365 12366 SDValue BSwapOp = N->getOperand(1).getOperand(0); 12367 // Do an any-extend to 32-bits if this is a half-word input. 12368 if (BSwapOp.getValueType() == MVT::i16) 12369 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 12370 12371 // If the type of BSWAP operand is wider than stored memory width 12372 // it need to be shifted to the right side before STBRX. 12373 if (Op1VT.bitsGT(mVT)) { 12374 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 12375 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 12376 DAG.getConstant(Shift, dl, MVT::i32)); 12377 // Need to truncate if this is a bswap of i64 stored as i32/i16. 12378 if (Op1VT == MVT::i64) 12379 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 12380 } 12381 12382 SDValue Ops[] = { 12383 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 12384 }; 12385 return 12386 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 12387 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 12388 cast<StoreSDNode>(N)->getMemOperand()); 12389 } 12390 12391 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 12392 // So it can increase the chance of CSE constant construction. 12393 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 12394 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 12395 // Need to sign-extended to 64-bits to handle negative values. 12396 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 12397 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 12398 MemVT.getSizeInBits()); 12399 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 12400 12401 // DAG.getTruncStore() can't be used here because it doesn't accept 12402 // the general (base + offset) addressing mode. 12403 // So we use UpdateNodeOperands and setTruncatingStore instead. 12404 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 12405 N->getOperand(3)); 12406 cast<StoreSDNode>(N)->setTruncatingStore(true); 12407 return SDValue(N, 0); 12408 } 12409 12410 // For little endian, VSX stores require generating xxswapd/lxvd2x. 12411 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12412 if (Op1VT.isSimple()) { 12413 MVT StoreVT = Op1VT.getSimpleVT(); 12414 if (Subtarget.needsSwapsForVSXMemOps() && 12415 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 12416 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 12417 return expandVSXStoreForLE(N, DCI); 12418 } 12419 break; 12420 } 12421 case ISD::LOAD: { 12422 LoadSDNode *LD = cast<LoadSDNode>(N); 12423 EVT VT = LD->getValueType(0); 12424 12425 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12426 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12427 if (VT.isSimple()) { 12428 MVT LoadVT = VT.getSimpleVT(); 12429 if (Subtarget.needsSwapsForVSXMemOps() && 12430 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 12431 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 12432 return expandVSXLoadForLE(N, DCI); 12433 } 12434 12435 // We sometimes end up with a 64-bit integer load, from which we extract 12436 // two single-precision floating-point numbers. This happens with 12437 // std::complex<float>, and other similar structures, because of the way we 12438 // canonicalize structure copies. However, if we lack direct moves, 12439 // then the final bitcasts from the extracted integer values to the 12440 // floating-point numbers turn into store/load pairs. Even with direct moves, 12441 // just loading the two floating-point numbers is likely better. 12442 auto ReplaceTwoFloatLoad = [&]() { 12443 if (VT != MVT::i64) 12444 return false; 12445 12446 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 12447 LD->isVolatile()) 12448 return false; 12449 12450 // We're looking for a sequence like this: 12451 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 12452 // t16: i64 = srl t13, Constant:i32<32> 12453 // t17: i32 = truncate t16 12454 // t18: f32 = bitcast t17 12455 // t19: i32 = truncate t13 12456 // t20: f32 = bitcast t19 12457 12458 if (!LD->hasNUsesOfValue(2, 0)) 12459 return false; 12460 12461 auto UI = LD->use_begin(); 12462 while (UI.getUse().getResNo() != 0) ++UI; 12463 SDNode *Trunc = *UI++; 12464 while (UI.getUse().getResNo() != 0) ++UI; 12465 SDNode *RightShift = *UI; 12466 if (Trunc->getOpcode() != ISD::TRUNCATE) 12467 std::swap(Trunc, RightShift); 12468 12469 if (Trunc->getOpcode() != ISD::TRUNCATE || 12470 Trunc->getValueType(0) != MVT::i32 || 12471 !Trunc->hasOneUse()) 12472 return false; 12473 if (RightShift->getOpcode() != ISD::SRL || 12474 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 12475 RightShift->getConstantOperandVal(1) != 32 || 12476 !RightShift->hasOneUse()) 12477 return false; 12478 12479 SDNode *Trunc2 = *RightShift->use_begin(); 12480 if (Trunc2->getOpcode() != ISD::TRUNCATE || 12481 Trunc2->getValueType(0) != MVT::i32 || 12482 !Trunc2->hasOneUse()) 12483 return false; 12484 12485 SDNode *Bitcast = *Trunc->use_begin(); 12486 SDNode *Bitcast2 = *Trunc2->use_begin(); 12487 12488 if (Bitcast->getOpcode() != ISD::BITCAST || 12489 Bitcast->getValueType(0) != MVT::f32) 12490 return false; 12491 if (Bitcast2->getOpcode() != ISD::BITCAST || 12492 Bitcast2->getValueType(0) != MVT::f32) 12493 return false; 12494 12495 if (Subtarget.isLittleEndian()) 12496 std::swap(Bitcast, Bitcast2); 12497 12498 // Bitcast has the second float (in memory-layout order) and Bitcast2 12499 // has the first one. 12500 12501 SDValue BasePtr = LD->getBasePtr(); 12502 if (LD->isIndexed()) { 12503 assert(LD->getAddressingMode() == ISD::PRE_INC && 12504 "Non-pre-inc AM on PPC?"); 12505 BasePtr = 12506 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 12507 LD->getOffset()); 12508 } 12509 12510 auto MMOFlags = 12511 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 12512 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 12513 LD->getPointerInfo(), LD->getAlignment(), 12514 MMOFlags, LD->getAAInfo()); 12515 SDValue AddPtr = 12516 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 12517 BasePtr, DAG.getIntPtrConstant(4, dl)); 12518 SDValue FloatLoad2 = DAG.getLoad( 12519 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 12520 LD->getPointerInfo().getWithOffset(4), 12521 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 12522 12523 if (LD->isIndexed()) { 12524 // Note that DAGCombine should re-form any pre-increment load(s) from 12525 // what is produced here if that makes sense. 12526 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 12527 } 12528 12529 DCI.CombineTo(Bitcast2, FloatLoad); 12530 DCI.CombineTo(Bitcast, FloatLoad2); 12531 12532 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 12533 SDValue(FloatLoad2.getNode(), 1)); 12534 return true; 12535 }; 12536 12537 if (ReplaceTwoFloatLoad()) 12538 return SDValue(N, 0); 12539 12540 EVT MemVT = LD->getMemoryVT(); 12541 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 12542 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 12543 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 12544 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 12545 if (LD->isUnindexed() && VT.isVector() && 12546 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 12547 // P8 and later hardware should just use LOAD. 12548 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 12549 VT == MVT::v4i32 || VT == MVT::v4f32)) || 12550 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 12551 LD->getAlignment() >= ScalarABIAlignment)) && 12552 LD->getAlignment() < ABIAlignment) { 12553 // This is a type-legal unaligned Altivec or QPX load. 12554 SDValue Chain = LD->getChain(); 12555 SDValue Ptr = LD->getBasePtr(); 12556 bool isLittleEndian = Subtarget.isLittleEndian(); 12557 12558 // This implements the loading of unaligned vectors as described in 12559 // the venerable Apple Velocity Engine overview. Specifically: 12560 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 12561 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 12562 // 12563 // The general idea is to expand a sequence of one or more unaligned 12564 // loads into an alignment-based permutation-control instruction (lvsl 12565 // or lvsr), a series of regular vector loads (which always truncate 12566 // their input address to an aligned address), and a series of 12567 // permutations. The results of these permutations are the requested 12568 // loaded values. The trick is that the last "extra" load is not taken 12569 // from the address you might suspect (sizeof(vector) bytes after the 12570 // last requested load), but rather sizeof(vector) - 1 bytes after the 12571 // last requested vector. The point of this is to avoid a page fault if 12572 // the base address happened to be aligned. This works because if the 12573 // base address is aligned, then adding less than a full vector length 12574 // will cause the last vector in the sequence to be (re)loaded. 12575 // Otherwise, the next vector will be fetched as you might suspect was 12576 // necessary. 12577 12578 // We might be able to reuse the permutation generation from 12579 // a different base address offset from this one by an aligned amount. 12580 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 12581 // optimization later. 12582 Intrinsic::ID Intr, IntrLD, IntrPerm; 12583 MVT PermCntlTy, PermTy, LDTy; 12584 if (Subtarget.hasAltivec()) { 12585 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 12586 Intrinsic::ppc_altivec_lvsl; 12587 IntrLD = Intrinsic::ppc_altivec_lvx; 12588 IntrPerm = Intrinsic::ppc_altivec_vperm; 12589 PermCntlTy = MVT::v16i8; 12590 PermTy = MVT::v4i32; 12591 LDTy = MVT::v4i32; 12592 } else { 12593 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 12594 Intrinsic::ppc_qpx_qvlpcls; 12595 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 12596 Intrinsic::ppc_qpx_qvlfs; 12597 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 12598 PermCntlTy = MVT::v4f64; 12599 PermTy = MVT::v4f64; 12600 LDTy = MemVT.getSimpleVT(); 12601 } 12602 12603 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 12604 12605 // Create the new MMO for the new base load. It is like the original MMO, 12606 // but represents an area in memory almost twice the vector size centered 12607 // on the original address. If the address is unaligned, we might start 12608 // reading up to (sizeof(vector)-1) bytes below the address of the 12609 // original unaligned load. 12610 MachineFunction &MF = DAG.getMachineFunction(); 12611 MachineMemOperand *BaseMMO = 12612 MF.getMachineMemOperand(LD->getMemOperand(), 12613 -(long)MemVT.getStoreSize()+1, 12614 2*MemVT.getStoreSize()-1); 12615 12616 // Create the new base load. 12617 SDValue LDXIntID = 12618 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 12619 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 12620 SDValue BaseLoad = 12621 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12622 DAG.getVTList(PermTy, MVT::Other), 12623 BaseLoadOps, LDTy, BaseMMO); 12624 12625 // Note that the value of IncOffset (which is provided to the next 12626 // load's pointer info offset value, and thus used to calculate the 12627 // alignment), and the value of IncValue (which is actually used to 12628 // increment the pointer value) are different! This is because we 12629 // require the next load to appear to be aligned, even though it 12630 // is actually offset from the base pointer by a lesser amount. 12631 int IncOffset = VT.getSizeInBits() / 8; 12632 int IncValue = IncOffset; 12633 12634 // Walk (both up and down) the chain looking for another load at the real 12635 // (aligned) offset (the alignment of the other load does not matter in 12636 // this case). If found, then do not use the offset reduction trick, as 12637 // that will prevent the loads from being later combined (as they would 12638 // otherwise be duplicates). 12639 if (!findConsecutiveLoad(LD, DAG)) 12640 --IncValue; 12641 12642 SDValue Increment = 12643 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 12644 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 12645 12646 MachineMemOperand *ExtraMMO = 12647 MF.getMachineMemOperand(LD->getMemOperand(), 12648 1, 2*MemVT.getStoreSize()-1); 12649 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 12650 SDValue ExtraLoad = 12651 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12652 DAG.getVTList(PermTy, MVT::Other), 12653 ExtraLoadOps, LDTy, ExtraMMO); 12654 12655 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 12656 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 12657 12658 // Because vperm has a big-endian bias, we must reverse the order 12659 // of the input vectors and complement the permute control vector 12660 // when generating little endian code. We have already handled the 12661 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 12662 // and ExtraLoad here. 12663 SDValue Perm; 12664 if (isLittleEndian) 12665 Perm = BuildIntrinsicOp(IntrPerm, 12666 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 12667 else 12668 Perm = BuildIntrinsicOp(IntrPerm, 12669 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 12670 12671 if (VT != PermTy) 12672 Perm = Subtarget.hasAltivec() ? 12673 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 12674 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 12675 DAG.getTargetConstant(1, dl, MVT::i64)); 12676 // second argument is 1 because this rounding 12677 // is always exact. 12678 12679 // The output of the permutation is our loaded result, the TokenFactor is 12680 // our new chain. 12681 DCI.CombineTo(N, Perm, TF); 12682 return SDValue(N, 0); 12683 } 12684 } 12685 break; 12686 case ISD::INTRINSIC_WO_CHAIN: { 12687 bool isLittleEndian = Subtarget.isLittleEndian(); 12688 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 12689 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 12690 : Intrinsic::ppc_altivec_lvsl); 12691 if ((IID == Intr || 12692 IID == Intrinsic::ppc_qpx_qvlpcld || 12693 IID == Intrinsic::ppc_qpx_qvlpcls) && 12694 N->getOperand(1)->getOpcode() == ISD::ADD) { 12695 SDValue Add = N->getOperand(1); 12696 12697 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 12698 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 12699 12700 if (DAG.MaskedValueIsZero(Add->getOperand(1), 12701 APInt::getAllOnesValue(Bits /* alignment */) 12702 .zext(Add.getScalarValueSizeInBits()))) { 12703 SDNode *BasePtr = Add->getOperand(0).getNode(); 12704 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12705 UE = BasePtr->use_end(); 12706 UI != UE; ++UI) { 12707 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12708 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 12709 // We've found another LVSL/LVSR, and this address is an aligned 12710 // multiple of that one. The results will be the same, so use the 12711 // one we've just found instead. 12712 12713 return SDValue(*UI, 0); 12714 } 12715 } 12716 } 12717 12718 if (isa<ConstantSDNode>(Add->getOperand(1))) { 12719 SDNode *BasePtr = Add->getOperand(0).getNode(); 12720 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12721 UE = BasePtr->use_end(); UI != UE; ++UI) { 12722 if (UI->getOpcode() == ISD::ADD && 12723 isa<ConstantSDNode>(UI->getOperand(1)) && 12724 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 12725 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 12726 (1ULL << Bits) == 0) { 12727 SDNode *OtherAdd = *UI; 12728 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 12729 VE = OtherAdd->use_end(); VI != VE; ++VI) { 12730 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12731 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 12732 return SDValue(*VI, 0); 12733 } 12734 } 12735 } 12736 } 12737 } 12738 } 12739 } 12740 12741 break; 12742 case ISD::INTRINSIC_W_CHAIN: 12743 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12744 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12745 if (Subtarget.needsSwapsForVSXMemOps()) { 12746 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12747 default: 12748 break; 12749 case Intrinsic::ppc_vsx_lxvw4x: 12750 case Intrinsic::ppc_vsx_lxvd2x: 12751 return expandVSXLoadForLE(N, DCI); 12752 } 12753 } 12754 break; 12755 case ISD::INTRINSIC_VOID: 12756 // For little endian, VSX stores require generating xxswapd/stxvd2x. 12757 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12758 if (Subtarget.needsSwapsForVSXMemOps()) { 12759 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12760 default: 12761 break; 12762 case Intrinsic::ppc_vsx_stxvw4x: 12763 case Intrinsic::ppc_vsx_stxvd2x: 12764 return expandVSXStoreForLE(N, DCI); 12765 } 12766 } 12767 break; 12768 case ISD::BSWAP: 12769 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 12770 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 12771 N->getOperand(0).hasOneUse() && 12772 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 12773 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 12774 N->getValueType(0) == MVT::i64))) { 12775 SDValue Load = N->getOperand(0); 12776 LoadSDNode *LD = cast<LoadSDNode>(Load); 12777 // Create the byte-swapping load. 12778 SDValue Ops[] = { 12779 LD->getChain(), // Chain 12780 LD->getBasePtr(), // Ptr 12781 DAG.getValueType(N->getValueType(0)) // VT 12782 }; 12783 SDValue BSLoad = 12784 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 12785 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 12786 MVT::i64 : MVT::i32, MVT::Other), 12787 Ops, LD->getMemoryVT(), LD->getMemOperand()); 12788 12789 // If this is an i16 load, insert the truncate. 12790 SDValue ResVal = BSLoad; 12791 if (N->getValueType(0) == MVT::i16) 12792 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 12793 12794 // First, combine the bswap away. This makes the value produced by the 12795 // load dead. 12796 DCI.CombineTo(N, ResVal); 12797 12798 // Next, combine the load away, we give it a bogus result value but a real 12799 // chain result. The result value is dead because the bswap is dead. 12800 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 12801 12802 // Return N so it doesn't get rechecked! 12803 return SDValue(N, 0); 12804 } 12805 break; 12806 case PPCISD::VCMP: 12807 // If a VCMPo node already exists with exactly the same operands as this 12808 // node, use its result instead of this node (VCMPo computes both a CR6 and 12809 // a normal output). 12810 // 12811 if (!N->getOperand(0).hasOneUse() && 12812 !N->getOperand(1).hasOneUse() && 12813 !N->getOperand(2).hasOneUse()) { 12814 12815 // Scan all of the users of the LHS, looking for VCMPo's that match. 12816 SDNode *VCMPoNode = nullptr; 12817 12818 SDNode *LHSN = N->getOperand(0).getNode(); 12819 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 12820 UI != E; ++UI) 12821 if (UI->getOpcode() == PPCISD::VCMPo && 12822 UI->getOperand(1) == N->getOperand(1) && 12823 UI->getOperand(2) == N->getOperand(2) && 12824 UI->getOperand(0) == N->getOperand(0)) { 12825 VCMPoNode = *UI; 12826 break; 12827 } 12828 12829 // If there is no VCMPo node, or if the flag value has a single use, don't 12830 // transform this. 12831 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 12832 break; 12833 12834 // Look at the (necessarily single) use of the flag value. If it has a 12835 // chain, this transformation is more complex. Note that multiple things 12836 // could use the value result, which we should ignore. 12837 SDNode *FlagUser = nullptr; 12838 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 12839 FlagUser == nullptr; ++UI) { 12840 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 12841 SDNode *User = *UI; 12842 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 12843 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 12844 FlagUser = User; 12845 break; 12846 } 12847 } 12848 } 12849 12850 // If the user is a MFOCRF instruction, we know this is safe. 12851 // Otherwise we give up for right now. 12852 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 12853 return SDValue(VCMPoNode, 0); 12854 } 12855 break; 12856 case ISD::BRCOND: { 12857 SDValue Cond = N->getOperand(1); 12858 SDValue Target = N->getOperand(2); 12859 12860 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12861 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 12862 Intrinsic::ppc_is_decremented_ctr_nonzero) { 12863 12864 // We now need to make the intrinsic dead (it cannot be instruction 12865 // selected). 12866 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 12867 assert(Cond.getNode()->hasOneUse() && 12868 "Counter decrement has more than one use"); 12869 12870 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 12871 N->getOperand(0), Target); 12872 } 12873 } 12874 break; 12875 case ISD::BR_CC: { 12876 // If this is a branch on an altivec predicate comparison, lower this so 12877 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 12878 // lowering is done pre-legalize, because the legalizer lowers the predicate 12879 // compare down to code that is difficult to reassemble. 12880 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 12881 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 12882 12883 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 12884 // value. If so, pass-through the AND to get to the intrinsic. 12885 if (LHS.getOpcode() == ISD::AND && 12886 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 12887 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 12888 Intrinsic::ppc_is_decremented_ctr_nonzero && 12889 isa<ConstantSDNode>(LHS.getOperand(1)) && 12890 !isNullConstant(LHS.getOperand(1))) 12891 LHS = LHS.getOperand(0); 12892 12893 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12894 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 12895 Intrinsic::ppc_is_decremented_ctr_nonzero && 12896 isa<ConstantSDNode>(RHS)) { 12897 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 12898 "Counter decrement comparison is not EQ or NE"); 12899 12900 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12901 bool isBDNZ = (CC == ISD::SETEQ && Val) || 12902 (CC == ISD::SETNE && !Val); 12903 12904 // We now need to make the intrinsic dead (it cannot be instruction 12905 // selected). 12906 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 12907 assert(LHS.getNode()->hasOneUse() && 12908 "Counter decrement has more than one use"); 12909 12910 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 12911 N->getOperand(0), N->getOperand(4)); 12912 } 12913 12914 int CompareOpc; 12915 bool isDot; 12916 12917 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12918 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 12919 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 12920 assert(isDot && "Can't compare against a vector result!"); 12921 12922 // If this is a comparison against something other than 0/1, then we know 12923 // that the condition is never/always true. 12924 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12925 if (Val != 0 && Val != 1) { 12926 if (CC == ISD::SETEQ) // Cond never true, remove branch. 12927 return N->getOperand(0); 12928 // Always !=, turn it into an unconditional branch. 12929 return DAG.getNode(ISD::BR, dl, MVT::Other, 12930 N->getOperand(0), N->getOperand(4)); 12931 } 12932 12933 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 12934 12935 // Create the PPCISD altivec 'dot' comparison node. 12936 SDValue Ops[] = { 12937 LHS.getOperand(2), // LHS of compare 12938 LHS.getOperand(3), // RHS of compare 12939 DAG.getConstant(CompareOpc, dl, MVT::i32) 12940 }; 12941 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 12942 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 12943 12944 // Unpack the result based on how the target uses it. 12945 PPC::Predicate CompOpc; 12946 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 12947 default: // Can't happen, don't crash on invalid number though. 12948 case 0: // Branch on the value of the EQ bit of CR6. 12949 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 12950 break; 12951 case 1: // Branch on the inverted value of the EQ bit of CR6. 12952 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 12953 break; 12954 case 2: // Branch on the value of the LT bit of CR6. 12955 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 12956 break; 12957 case 3: // Branch on the inverted value of the LT bit of CR6. 12958 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 12959 break; 12960 } 12961 12962 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 12963 DAG.getConstant(CompOpc, dl, MVT::i32), 12964 DAG.getRegister(PPC::CR6, MVT::i32), 12965 N->getOperand(4), CompNode.getValue(1)); 12966 } 12967 break; 12968 } 12969 case ISD::BUILD_VECTOR: 12970 return DAGCombineBuildVector(N, DCI); 12971 } 12972 12973 return SDValue(); 12974 } 12975 12976 SDValue 12977 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 12978 SelectionDAG &DAG, 12979 std::vector<SDNode *> *Created) const { 12980 // fold (sdiv X, pow2) 12981 EVT VT = N->getValueType(0); 12982 if (VT == MVT::i64 && !Subtarget.isPPC64()) 12983 return SDValue(); 12984 if ((VT != MVT::i32 && VT != MVT::i64) || 12985 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 12986 return SDValue(); 12987 12988 SDLoc DL(N); 12989 SDValue N0 = N->getOperand(0); 12990 12991 bool IsNegPow2 = (-Divisor).isPowerOf2(); 12992 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 12993 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 12994 12995 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 12996 if (Created) 12997 Created->push_back(Op.getNode()); 12998 12999 if (IsNegPow2) { 13000 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 13001 if (Created) 13002 Created->push_back(Op.getNode()); 13003 } 13004 13005 return Op; 13006 } 13007 13008 //===----------------------------------------------------------------------===// 13009 // Inline Assembly Support 13010 //===----------------------------------------------------------------------===// 13011 13012 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 13013 KnownBits &Known, 13014 const APInt &DemandedElts, 13015 const SelectionDAG &DAG, 13016 unsigned Depth) const { 13017 Known.resetAll(); 13018 switch (Op.getOpcode()) { 13019 default: break; 13020 case PPCISD::LBRX: { 13021 // lhbrx is known to have the top bits cleared out. 13022 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 13023 Known.Zero = 0xFFFF0000; 13024 break; 13025 } 13026 case ISD::INTRINSIC_WO_CHAIN: { 13027 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 13028 default: break; 13029 case Intrinsic::ppc_altivec_vcmpbfp_p: 13030 case Intrinsic::ppc_altivec_vcmpeqfp_p: 13031 case Intrinsic::ppc_altivec_vcmpequb_p: 13032 case Intrinsic::ppc_altivec_vcmpequh_p: 13033 case Intrinsic::ppc_altivec_vcmpequw_p: 13034 case Intrinsic::ppc_altivec_vcmpequd_p: 13035 case Intrinsic::ppc_altivec_vcmpgefp_p: 13036 case Intrinsic::ppc_altivec_vcmpgtfp_p: 13037 case Intrinsic::ppc_altivec_vcmpgtsb_p: 13038 case Intrinsic::ppc_altivec_vcmpgtsh_p: 13039 case Intrinsic::ppc_altivec_vcmpgtsw_p: 13040 case Intrinsic::ppc_altivec_vcmpgtsd_p: 13041 case Intrinsic::ppc_altivec_vcmpgtub_p: 13042 case Intrinsic::ppc_altivec_vcmpgtuh_p: 13043 case Intrinsic::ppc_altivec_vcmpgtuw_p: 13044 case Intrinsic::ppc_altivec_vcmpgtud_p: 13045 Known.Zero = ~1U; // All bits but the low one are known to be zero. 13046 break; 13047 } 13048 } 13049 } 13050 } 13051 13052 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 13053 switch (Subtarget.getDarwinDirective()) { 13054 default: break; 13055 case PPC::DIR_970: 13056 case PPC::DIR_PWR4: 13057 case PPC::DIR_PWR5: 13058 case PPC::DIR_PWR5X: 13059 case PPC::DIR_PWR6: 13060 case PPC::DIR_PWR6X: 13061 case PPC::DIR_PWR7: 13062 case PPC::DIR_PWR8: 13063 case PPC::DIR_PWR9: { 13064 if (!ML) 13065 break; 13066 13067 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 13068 13069 // For small loops (between 5 and 8 instructions), align to a 32-byte 13070 // boundary so that the entire loop fits in one instruction-cache line. 13071 uint64_t LoopSize = 0; 13072 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 13073 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 13074 LoopSize += TII->getInstSizeInBytes(*J); 13075 if (LoopSize > 32) 13076 break; 13077 } 13078 13079 if (LoopSize > 16 && LoopSize <= 32) 13080 return 5; 13081 13082 break; 13083 } 13084 } 13085 13086 return TargetLowering::getPrefLoopAlignment(ML); 13087 } 13088 13089 /// getConstraintType - Given a constraint, return the type of 13090 /// constraint it is for this target. 13091 PPCTargetLowering::ConstraintType 13092 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 13093 if (Constraint.size() == 1) { 13094 switch (Constraint[0]) { 13095 default: break; 13096 case 'b': 13097 case 'r': 13098 case 'f': 13099 case 'd': 13100 case 'v': 13101 case 'y': 13102 return C_RegisterClass; 13103 case 'Z': 13104 // FIXME: While Z does indicate a memory constraint, it specifically 13105 // indicates an r+r address (used in conjunction with the 'y' modifier 13106 // in the replacement string). Currently, we're forcing the base 13107 // register to be r0 in the asm printer (which is interpreted as zero) 13108 // and forming the complete address in the second register. This is 13109 // suboptimal. 13110 return C_Memory; 13111 } 13112 } else if (Constraint == "wc") { // individual CR bits. 13113 return C_RegisterClass; 13114 } else if (Constraint == "wa" || Constraint == "wd" || 13115 Constraint == "wf" || Constraint == "ws") { 13116 return C_RegisterClass; // VSX registers. 13117 } 13118 return TargetLowering::getConstraintType(Constraint); 13119 } 13120 13121 /// Examine constraint type and operand type and determine a weight value. 13122 /// This object must already have been set up with the operand type 13123 /// and the current alternative constraint selected. 13124 TargetLowering::ConstraintWeight 13125 PPCTargetLowering::getSingleConstraintMatchWeight( 13126 AsmOperandInfo &info, const char *constraint) const { 13127 ConstraintWeight weight = CW_Invalid; 13128 Value *CallOperandVal = info.CallOperandVal; 13129 // If we don't have a value, we can't do a match, 13130 // but allow it at the lowest weight. 13131 if (!CallOperandVal) 13132 return CW_Default; 13133 Type *type = CallOperandVal->getType(); 13134 13135 // Look at the constraint type. 13136 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 13137 return CW_Register; // an individual CR bit. 13138 else if ((StringRef(constraint) == "wa" || 13139 StringRef(constraint) == "wd" || 13140 StringRef(constraint) == "wf") && 13141 type->isVectorTy()) 13142 return CW_Register; 13143 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 13144 return CW_Register; 13145 13146 switch (*constraint) { 13147 default: 13148 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 13149 break; 13150 case 'b': 13151 if (type->isIntegerTy()) 13152 weight = CW_Register; 13153 break; 13154 case 'f': 13155 if (type->isFloatTy()) 13156 weight = CW_Register; 13157 break; 13158 case 'd': 13159 if (type->isDoubleTy()) 13160 weight = CW_Register; 13161 break; 13162 case 'v': 13163 if (type->isVectorTy()) 13164 weight = CW_Register; 13165 break; 13166 case 'y': 13167 weight = CW_Register; 13168 break; 13169 case 'Z': 13170 weight = CW_Memory; 13171 break; 13172 } 13173 return weight; 13174 } 13175 13176 std::pair<unsigned, const TargetRegisterClass *> 13177 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 13178 StringRef Constraint, 13179 MVT VT) const { 13180 if (Constraint.size() == 1) { 13181 // GCC RS6000 Constraint Letters 13182 switch (Constraint[0]) { 13183 case 'b': // R1-R31 13184 if (VT == MVT::i64 && Subtarget.isPPC64()) 13185 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 13186 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 13187 case 'r': // R0-R31 13188 if (VT == MVT::i64 && Subtarget.isPPC64()) 13189 return std::make_pair(0U, &PPC::G8RCRegClass); 13190 return std::make_pair(0U, &PPC::GPRCRegClass); 13191 // 'd' and 'f' constraints are both defined to be "the floating point 13192 // registers", where one is for 32-bit and the other for 64-bit. We don't 13193 // really care overly much here so just give them all the same reg classes. 13194 case 'd': 13195 case 'f': 13196 if (VT == MVT::f32 || VT == MVT::i32) 13197 return std::make_pair(0U, &PPC::F4RCRegClass); 13198 if (VT == MVT::f64 || VT == MVT::i64) 13199 return std::make_pair(0U, &PPC::F8RCRegClass); 13200 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13201 return std::make_pair(0U, &PPC::QFRCRegClass); 13202 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13203 return std::make_pair(0U, &PPC::QSRCRegClass); 13204 break; 13205 case 'v': 13206 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13207 return std::make_pair(0U, &PPC::QFRCRegClass); 13208 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13209 return std::make_pair(0U, &PPC::QSRCRegClass); 13210 if (Subtarget.hasAltivec()) 13211 return std::make_pair(0U, &PPC::VRRCRegClass); 13212 break; 13213 case 'y': // crrc 13214 return std::make_pair(0U, &PPC::CRRCRegClass); 13215 } 13216 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 13217 // An individual CR bit. 13218 return std::make_pair(0U, &PPC::CRBITRCRegClass); 13219 } else if ((Constraint == "wa" || Constraint == "wd" || 13220 Constraint == "wf") && Subtarget.hasVSX()) { 13221 return std::make_pair(0U, &PPC::VSRCRegClass); 13222 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 13223 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 13224 return std::make_pair(0U, &PPC::VSSRCRegClass); 13225 else 13226 return std::make_pair(0U, &PPC::VSFRCRegClass); 13227 } 13228 13229 std::pair<unsigned, const TargetRegisterClass *> R = 13230 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 13231 13232 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 13233 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 13234 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 13235 // register. 13236 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 13237 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 13238 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 13239 PPC::GPRCRegClass.contains(R.first)) 13240 return std::make_pair(TRI->getMatchingSuperReg(R.first, 13241 PPC::sub_32, &PPC::G8RCRegClass), 13242 &PPC::G8RCRegClass); 13243 13244 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 13245 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 13246 R.first = PPC::CR0; 13247 R.second = &PPC::CRRCRegClass; 13248 } 13249 13250 return R; 13251 } 13252 13253 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 13254 /// vector. If it is invalid, don't add anything to Ops. 13255 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 13256 std::string &Constraint, 13257 std::vector<SDValue>&Ops, 13258 SelectionDAG &DAG) const { 13259 SDValue Result; 13260 13261 // Only support length 1 constraints. 13262 if (Constraint.length() > 1) return; 13263 13264 char Letter = Constraint[0]; 13265 switch (Letter) { 13266 default: break; 13267 case 'I': 13268 case 'J': 13269 case 'K': 13270 case 'L': 13271 case 'M': 13272 case 'N': 13273 case 'O': 13274 case 'P': { 13275 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 13276 if (!CST) return; // Must be an immediate to match. 13277 SDLoc dl(Op); 13278 int64_t Value = CST->getSExtValue(); 13279 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 13280 // numbers are printed as such. 13281 switch (Letter) { 13282 default: llvm_unreachable("Unknown constraint letter!"); 13283 case 'I': // "I" is a signed 16-bit constant. 13284 if (isInt<16>(Value)) 13285 Result = DAG.getTargetConstant(Value, dl, TCVT); 13286 break; 13287 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 13288 if (isShiftedUInt<16, 16>(Value)) 13289 Result = DAG.getTargetConstant(Value, dl, TCVT); 13290 break; 13291 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 13292 if (isShiftedInt<16, 16>(Value)) 13293 Result = DAG.getTargetConstant(Value, dl, TCVT); 13294 break; 13295 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 13296 if (isUInt<16>(Value)) 13297 Result = DAG.getTargetConstant(Value, dl, TCVT); 13298 break; 13299 case 'M': // "M" is a constant that is greater than 31. 13300 if (Value > 31) 13301 Result = DAG.getTargetConstant(Value, dl, TCVT); 13302 break; 13303 case 'N': // "N" is a positive constant that is an exact power of two. 13304 if (Value > 0 && isPowerOf2_64(Value)) 13305 Result = DAG.getTargetConstant(Value, dl, TCVT); 13306 break; 13307 case 'O': // "O" is the constant zero. 13308 if (Value == 0) 13309 Result = DAG.getTargetConstant(Value, dl, TCVT); 13310 break; 13311 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 13312 if (isInt<16>(-Value)) 13313 Result = DAG.getTargetConstant(Value, dl, TCVT); 13314 break; 13315 } 13316 break; 13317 } 13318 } 13319 13320 if (Result.getNode()) { 13321 Ops.push_back(Result); 13322 return; 13323 } 13324 13325 // Handle standard constraint letters. 13326 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 13327 } 13328 13329 // isLegalAddressingMode - Return true if the addressing mode represented 13330 // by AM is legal for this target, for a load/store of the specified type. 13331 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 13332 const AddrMode &AM, Type *Ty, 13333 unsigned AS, Instruction *I) const { 13334 // PPC does not allow r+i addressing modes for vectors! 13335 if (Ty->isVectorTy() && AM.BaseOffs != 0) 13336 return false; 13337 13338 // PPC allows a sign-extended 16-bit immediate field. 13339 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 13340 return false; 13341 13342 // No global is ever allowed as a base. 13343 if (AM.BaseGV) 13344 return false; 13345 13346 // PPC only support r+r, 13347 switch (AM.Scale) { 13348 case 0: // "r+i" or just "i", depending on HasBaseReg. 13349 break; 13350 case 1: 13351 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 13352 return false; 13353 // Otherwise we have r+r or r+i. 13354 break; 13355 case 2: 13356 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 13357 return false; 13358 // Allow 2*r as r+r. 13359 break; 13360 default: 13361 // No other scales are supported. 13362 return false; 13363 } 13364 13365 return true; 13366 } 13367 13368 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 13369 SelectionDAG &DAG) const { 13370 MachineFunction &MF = DAG.getMachineFunction(); 13371 MachineFrameInfo &MFI = MF.getFrameInfo(); 13372 MFI.setReturnAddressIsTaken(true); 13373 13374 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 13375 return SDValue(); 13376 13377 SDLoc dl(Op); 13378 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13379 13380 // Make sure the function does not optimize away the store of the RA to 13381 // the stack. 13382 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 13383 FuncInfo->setLRStoreRequired(); 13384 bool isPPC64 = Subtarget.isPPC64(); 13385 auto PtrVT = getPointerTy(MF.getDataLayout()); 13386 13387 if (Depth > 0) { 13388 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 13389 SDValue Offset = 13390 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 13391 isPPC64 ? MVT::i64 : MVT::i32); 13392 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 13393 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 13394 MachinePointerInfo()); 13395 } 13396 13397 // Just load the return address off the stack. 13398 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 13399 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 13400 MachinePointerInfo()); 13401 } 13402 13403 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 13404 SelectionDAG &DAG) const { 13405 SDLoc dl(Op); 13406 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13407 13408 MachineFunction &MF = DAG.getMachineFunction(); 13409 MachineFrameInfo &MFI = MF.getFrameInfo(); 13410 MFI.setFrameAddressIsTaken(true); 13411 13412 EVT PtrVT = getPointerTy(MF.getDataLayout()); 13413 bool isPPC64 = PtrVT == MVT::i64; 13414 13415 // Naked functions never have a frame pointer, and so we use r1. For all 13416 // other functions, this decision must be delayed until during PEI. 13417 unsigned FrameReg; 13418 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 13419 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 13420 else 13421 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 13422 13423 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 13424 PtrVT); 13425 while (Depth--) 13426 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 13427 FrameAddr, MachinePointerInfo()); 13428 return FrameAddr; 13429 } 13430 13431 // FIXME? Maybe this could be a TableGen attribute on some registers and 13432 // this table could be generated automatically from RegInfo. 13433 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 13434 SelectionDAG &DAG) const { 13435 bool isPPC64 = Subtarget.isPPC64(); 13436 bool isDarwinABI = Subtarget.isDarwinABI(); 13437 13438 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 13439 (!isPPC64 && VT != MVT::i32)) 13440 report_fatal_error("Invalid register global variable type"); 13441 13442 bool is64Bit = isPPC64 && VT == MVT::i64; 13443 unsigned Reg = StringSwitch<unsigned>(RegName) 13444 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 13445 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 13446 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 13447 (is64Bit ? PPC::X13 : PPC::R13)) 13448 .Default(0); 13449 13450 if (Reg) 13451 return Reg; 13452 report_fatal_error("Invalid register name global variable"); 13453 } 13454 13455 bool 13456 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 13457 // The PowerPC target isn't yet aware of offsets. 13458 return false; 13459 } 13460 13461 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 13462 const CallInst &I, 13463 MachineFunction &MF, 13464 unsigned Intrinsic) const { 13465 switch (Intrinsic) { 13466 case Intrinsic::ppc_qpx_qvlfd: 13467 case Intrinsic::ppc_qpx_qvlfs: 13468 case Intrinsic::ppc_qpx_qvlfcd: 13469 case Intrinsic::ppc_qpx_qvlfcs: 13470 case Intrinsic::ppc_qpx_qvlfiwa: 13471 case Intrinsic::ppc_qpx_qvlfiwz: 13472 case Intrinsic::ppc_altivec_lvx: 13473 case Intrinsic::ppc_altivec_lvxl: 13474 case Intrinsic::ppc_altivec_lvebx: 13475 case Intrinsic::ppc_altivec_lvehx: 13476 case Intrinsic::ppc_altivec_lvewx: 13477 case Intrinsic::ppc_vsx_lxvd2x: 13478 case Intrinsic::ppc_vsx_lxvw4x: { 13479 EVT VT; 13480 switch (Intrinsic) { 13481 case Intrinsic::ppc_altivec_lvebx: 13482 VT = MVT::i8; 13483 break; 13484 case Intrinsic::ppc_altivec_lvehx: 13485 VT = MVT::i16; 13486 break; 13487 case Intrinsic::ppc_altivec_lvewx: 13488 VT = MVT::i32; 13489 break; 13490 case Intrinsic::ppc_vsx_lxvd2x: 13491 VT = MVT::v2f64; 13492 break; 13493 case Intrinsic::ppc_qpx_qvlfd: 13494 VT = MVT::v4f64; 13495 break; 13496 case Intrinsic::ppc_qpx_qvlfs: 13497 VT = MVT::v4f32; 13498 break; 13499 case Intrinsic::ppc_qpx_qvlfcd: 13500 VT = MVT::v2f64; 13501 break; 13502 case Intrinsic::ppc_qpx_qvlfcs: 13503 VT = MVT::v2f32; 13504 break; 13505 default: 13506 VT = MVT::v4i32; 13507 break; 13508 } 13509 13510 Info.opc = ISD::INTRINSIC_W_CHAIN; 13511 Info.memVT = VT; 13512 Info.ptrVal = I.getArgOperand(0); 13513 Info.offset = -VT.getStoreSize()+1; 13514 Info.size = 2*VT.getStoreSize()-1; 13515 Info.align = 1; 13516 Info.flags = MachineMemOperand::MOLoad; 13517 return true; 13518 } 13519 case Intrinsic::ppc_qpx_qvlfda: 13520 case Intrinsic::ppc_qpx_qvlfsa: 13521 case Intrinsic::ppc_qpx_qvlfcda: 13522 case Intrinsic::ppc_qpx_qvlfcsa: 13523 case Intrinsic::ppc_qpx_qvlfiwaa: 13524 case Intrinsic::ppc_qpx_qvlfiwza: { 13525 EVT VT; 13526 switch (Intrinsic) { 13527 case Intrinsic::ppc_qpx_qvlfda: 13528 VT = MVT::v4f64; 13529 break; 13530 case Intrinsic::ppc_qpx_qvlfsa: 13531 VT = MVT::v4f32; 13532 break; 13533 case Intrinsic::ppc_qpx_qvlfcda: 13534 VT = MVT::v2f64; 13535 break; 13536 case Intrinsic::ppc_qpx_qvlfcsa: 13537 VT = MVT::v2f32; 13538 break; 13539 default: 13540 VT = MVT::v4i32; 13541 break; 13542 } 13543 13544 Info.opc = ISD::INTRINSIC_W_CHAIN; 13545 Info.memVT = VT; 13546 Info.ptrVal = I.getArgOperand(0); 13547 Info.offset = 0; 13548 Info.size = VT.getStoreSize(); 13549 Info.align = 1; 13550 Info.flags = MachineMemOperand::MOLoad; 13551 return true; 13552 } 13553 case Intrinsic::ppc_qpx_qvstfd: 13554 case Intrinsic::ppc_qpx_qvstfs: 13555 case Intrinsic::ppc_qpx_qvstfcd: 13556 case Intrinsic::ppc_qpx_qvstfcs: 13557 case Intrinsic::ppc_qpx_qvstfiw: 13558 case Intrinsic::ppc_altivec_stvx: 13559 case Intrinsic::ppc_altivec_stvxl: 13560 case Intrinsic::ppc_altivec_stvebx: 13561 case Intrinsic::ppc_altivec_stvehx: 13562 case Intrinsic::ppc_altivec_stvewx: 13563 case Intrinsic::ppc_vsx_stxvd2x: 13564 case Intrinsic::ppc_vsx_stxvw4x: { 13565 EVT VT; 13566 switch (Intrinsic) { 13567 case Intrinsic::ppc_altivec_stvebx: 13568 VT = MVT::i8; 13569 break; 13570 case Intrinsic::ppc_altivec_stvehx: 13571 VT = MVT::i16; 13572 break; 13573 case Intrinsic::ppc_altivec_stvewx: 13574 VT = MVT::i32; 13575 break; 13576 case Intrinsic::ppc_vsx_stxvd2x: 13577 VT = MVT::v2f64; 13578 break; 13579 case Intrinsic::ppc_qpx_qvstfd: 13580 VT = MVT::v4f64; 13581 break; 13582 case Intrinsic::ppc_qpx_qvstfs: 13583 VT = MVT::v4f32; 13584 break; 13585 case Intrinsic::ppc_qpx_qvstfcd: 13586 VT = MVT::v2f64; 13587 break; 13588 case Intrinsic::ppc_qpx_qvstfcs: 13589 VT = MVT::v2f32; 13590 break; 13591 default: 13592 VT = MVT::v4i32; 13593 break; 13594 } 13595 13596 Info.opc = ISD::INTRINSIC_VOID; 13597 Info.memVT = VT; 13598 Info.ptrVal = I.getArgOperand(1); 13599 Info.offset = -VT.getStoreSize()+1; 13600 Info.size = 2*VT.getStoreSize()-1; 13601 Info.align = 1; 13602 Info.flags = MachineMemOperand::MOStore; 13603 return true; 13604 } 13605 case Intrinsic::ppc_qpx_qvstfda: 13606 case Intrinsic::ppc_qpx_qvstfsa: 13607 case Intrinsic::ppc_qpx_qvstfcda: 13608 case Intrinsic::ppc_qpx_qvstfcsa: 13609 case Intrinsic::ppc_qpx_qvstfiwa: { 13610 EVT VT; 13611 switch (Intrinsic) { 13612 case Intrinsic::ppc_qpx_qvstfda: 13613 VT = MVT::v4f64; 13614 break; 13615 case Intrinsic::ppc_qpx_qvstfsa: 13616 VT = MVT::v4f32; 13617 break; 13618 case Intrinsic::ppc_qpx_qvstfcda: 13619 VT = MVT::v2f64; 13620 break; 13621 case Intrinsic::ppc_qpx_qvstfcsa: 13622 VT = MVT::v2f32; 13623 break; 13624 default: 13625 VT = MVT::v4i32; 13626 break; 13627 } 13628 13629 Info.opc = ISD::INTRINSIC_VOID; 13630 Info.memVT = VT; 13631 Info.ptrVal = I.getArgOperand(1); 13632 Info.offset = 0; 13633 Info.size = VT.getStoreSize(); 13634 Info.align = 1; 13635 Info.flags = MachineMemOperand::MOStore; 13636 return true; 13637 } 13638 default: 13639 break; 13640 } 13641 13642 return false; 13643 } 13644 13645 /// getOptimalMemOpType - Returns the target specific optimal type for load 13646 /// and store operations as a result of memset, memcpy, and memmove 13647 /// lowering. If DstAlign is zero that means it's safe to destination 13648 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 13649 /// means there isn't a need to check it against alignment requirement, 13650 /// probably because the source does not need to be loaded. If 'IsMemset' is 13651 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 13652 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 13653 /// source is constant so it does not need to be loaded. 13654 /// It returns EVT::Other if the type should be determined using generic 13655 /// target-independent logic. 13656 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 13657 unsigned DstAlign, unsigned SrcAlign, 13658 bool IsMemset, bool ZeroMemset, 13659 bool MemcpyStrSrc, 13660 MachineFunction &MF) const { 13661 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 13662 const Function &F = MF.getFunction(); 13663 // When expanding a memset, require at least two QPX instructions to cover 13664 // the cost of loading the value to be stored from the constant pool. 13665 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 13666 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 13667 !F.hasFnAttribute(Attribute::NoImplicitFloat)) { 13668 return MVT::v4f64; 13669 } 13670 13671 // We should use Altivec/VSX loads and stores when available. For unaligned 13672 // addresses, unaligned VSX loads are only fast starting with the P8. 13673 if (Subtarget.hasAltivec() && Size >= 16 && 13674 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 13675 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 13676 return MVT::v4i32; 13677 } 13678 13679 if (Subtarget.isPPC64()) { 13680 return MVT::i64; 13681 } 13682 13683 return MVT::i32; 13684 } 13685 13686 /// Returns true if it is beneficial to convert a load of a constant 13687 /// to just the constant itself. 13688 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 13689 Type *Ty) const { 13690 assert(Ty->isIntegerTy()); 13691 13692 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 13693 return !(BitSize == 0 || BitSize > 64); 13694 } 13695 13696 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 13697 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 13698 return false; 13699 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 13700 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 13701 return NumBits1 == 64 && NumBits2 == 32; 13702 } 13703 13704 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 13705 if (!VT1.isInteger() || !VT2.isInteger()) 13706 return false; 13707 unsigned NumBits1 = VT1.getSizeInBits(); 13708 unsigned NumBits2 = VT2.getSizeInBits(); 13709 return NumBits1 == 64 && NumBits2 == 32; 13710 } 13711 13712 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 13713 // Generally speaking, zexts are not free, but they are free when they can be 13714 // folded with other operations. 13715 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 13716 EVT MemVT = LD->getMemoryVT(); 13717 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 13718 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 13719 (LD->getExtensionType() == ISD::NON_EXTLOAD || 13720 LD->getExtensionType() == ISD::ZEXTLOAD)) 13721 return true; 13722 } 13723 13724 // FIXME: Add other cases... 13725 // - 32-bit shifts with a zext to i64 13726 // - zext after ctlz, bswap, etc. 13727 // - zext after and by a constant mask 13728 13729 return TargetLowering::isZExtFree(Val, VT2); 13730 } 13731 13732 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 13733 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 13734 "invalid fpext types"); 13735 return true; 13736 } 13737 13738 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 13739 return isInt<16>(Imm) || isUInt<16>(Imm); 13740 } 13741 13742 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 13743 return isInt<16>(Imm) || isUInt<16>(Imm); 13744 } 13745 13746 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 13747 unsigned, 13748 unsigned, 13749 bool *Fast) const { 13750 if (DisablePPCUnaligned) 13751 return false; 13752 13753 // PowerPC supports unaligned memory access for simple non-vector types. 13754 // Although accessing unaligned addresses is not as efficient as accessing 13755 // aligned addresses, it is generally more efficient than manual expansion, 13756 // and generally only traps for software emulation when crossing page 13757 // boundaries. 13758 13759 if (!VT.isSimple()) 13760 return false; 13761 13762 if (VT.getSimpleVT().isVector()) { 13763 if (Subtarget.hasVSX()) { 13764 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 13765 VT != MVT::v4f32 && VT != MVT::v4i32) 13766 return false; 13767 } else { 13768 return false; 13769 } 13770 } 13771 13772 if (VT == MVT::ppcf128) 13773 return false; 13774 13775 if (Fast) 13776 *Fast = true; 13777 13778 return true; 13779 } 13780 13781 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 13782 VT = VT.getScalarType(); 13783 13784 if (!VT.isSimple()) 13785 return false; 13786 13787 switch (VT.getSimpleVT().SimpleTy) { 13788 case MVT::f32: 13789 case MVT::f64: 13790 return true; 13791 case MVT::f128: 13792 return (EnableQuadPrecision && Subtarget.hasP9Vector()); 13793 default: 13794 break; 13795 } 13796 13797 return false; 13798 } 13799 13800 const MCPhysReg * 13801 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 13802 // LR is a callee-save register, but we must treat it as clobbered by any call 13803 // site. Hence we include LR in the scratch registers, which are in turn added 13804 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 13805 // to CTR, which is used by any indirect call. 13806 static const MCPhysReg ScratchRegs[] = { 13807 PPC::X12, PPC::LR8, PPC::CTR8, 0 13808 }; 13809 13810 return ScratchRegs; 13811 } 13812 13813 unsigned PPCTargetLowering::getExceptionPointerRegister( 13814 const Constant *PersonalityFn) const { 13815 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 13816 } 13817 13818 unsigned PPCTargetLowering::getExceptionSelectorRegister( 13819 const Constant *PersonalityFn) const { 13820 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 13821 } 13822 13823 bool 13824 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 13825 EVT VT , unsigned DefinedValues) const { 13826 if (VT == MVT::v2i64) 13827 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 13828 13829 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 13830 return true; 13831 13832 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 13833 } 13834 13835 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 13836 if (DisableILPPref || Subtarget.enableMachineScheduler()) 13837 return TargetLowering::getSchedulingPreference(N); 13838 13839 return Sched::ILP; 13840 } 13841 13842 // Create a fast isel object. 13843 FastISel * 13844 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 13845 const TargetLibraryInfo *LibInfo) const { 13846 return PPC::createFastISel(FuncInfo, LibInfo); 13847 } 13848 13849 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 13850 if (Subtarget.isDarwinABI()) return; 13851 if (!Subtarget.isPPC64()) return; 13852 13853 // Update IsSplitCSR in PPCFunctionInfo 13854 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 13855 PFI->setIsSplitCSR(true); 13856 } 13857 13858 void PPCTargetLowering::insertCopiesSplitCSR( 13859 MachineBasicBlock *Entry, 13860 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 13861 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 13862 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 13863 if (!IStart) 13864 return; 13865 13866 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 13867 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 13868 MachineBasicBlock::iterator MBBI = Entry->begin(); 13869 for (const MCPhysReg *I = IStart; *I; ++I) { 13870 const TargetRegisterClass *RC = nullptr; 13871 if (PPC::G8RCRegClass.contains(*I)) 13872 RC = &PPC::G8RCRegClass; 13873 else if (PPC::F8RCRegClass.contains(*I)) 13874 RC = &PPC::F8RCRegClass; 13875 else if (PPC::CRRCRegClass.contains(*I)) 13876 RC = &PPC::CRRCRegClass; 13877 else if (PPC::VRRCRegClass.contains(*I)) 13878 RC = &PPC::VRRCRegClass; 13879 else 13880 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 13881 13882 unsigned NewVR = MRI->createVirtualRegister(RC); 13883 // Create copy from CSR to a virtual register. 13884 // FIXME: this currently does not emit CFI pseudo-instructions, it works 13885 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 13886 // nounwind. If we want to generalize this later, we may need to emit 13887 // CFI pseudo-instructions. 13888 assert(Entry->getParent()->getFunction().hasFnAttribute( 13889 Attribute::NoUnwind) && 13890 "Function should be nounwind in insertCopiesSplitCSR!"); 13891 Entry->addLiveIn(*I); 13892 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 13893 .addReg(*I); 13894 13895 // Insert the copy-back instructions right before the terminator 13896 for (auto *Exit : Exits) 13897 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 13898 TII->get(TargetOpcode::COPY), *I) 13899 .addReg(NewVR); 13900 } 13901 } 13902 13903 // Override to enable LOAD_STACK_GUARD lowering on Linux. 13904 bool PPCTargetLowering::useLoadStackGuardNode() const { 13905 if (!Subtarget.isTargetLinux()) 13906 return TargetLowering::useLoadStackGuardNode(); 13907 return true; 13908 } 13909 13910 // Override to disable global variable loading on Linux. 13911 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 13912 if (!Subtarget.isTargetLinux()) 13913 return TargetLowering::insertSSPDeclarations(M); 13914 } 13915 13916 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 13917 if (!VT.isSimple() || !Subtarget.hasVSX()) 13918 return false; 13919 13920 switch(VT.getSimpleVT().SimpleTy) { 13921 default: 13922 // For FP types that are currently not supported by PPC backend, return 13923 // false. Examples: f16, f80. 13924 return false; 13925 case MVT::f32: 13926 case MVT::f64: 13927 case MVT::ppcf128: 13928 return Imm.isPosZero(); 13929 } 13930 } 13931 13932 // For vector shift operation op, fold 13933 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 13934 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 13935 SelectionDAG &DAG) { 13936 SDValue N0 = N->getOperand(0); 13937 SDValue N1 = N->getOperand(1); 13938 EVT VT = N0.getValueType(); 13939 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 13940 unsigned Opcode = N->getOpcode(); 13941 unsigned TargetOpcode; 13942 13943 switch (Opcode) { 13944 default: 13945 llvm_unreachable("Unexpected shift operation"); 13946 case ISD::SHL: 13947 TargetOpcode = PPCISD::SHL; 13948 break; 13949 case ISD::SRL: 13950 TargetOpcode = PPCISD::SRL; 13951 break; 13952 case ISD::SRA: 13953 TargetOpcode = PPCISD::SRA; 13954 break; 13955 } 13956 13957 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 13958 N1->getOpcode() == ISD::AND) 13959 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 13960 if (Mask->getZExtValue() == OpSizeInBits - 1) 13961 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 13962 13963 return SDValue(); 13964 } 13965 13966 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 13967 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13968 return Value; 13969 13970 return SDValue(); 13971 } 13972 13973 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 13974 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13975 return Value; 13976 13977 return SDValue(); 13978 } 13979 13980 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 13981 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13982 return Value; 13983 13984 return SDValue(); 13985 } 13986 13987 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 13988 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 13989 if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64()) 13990 return false; 13991 13992 // If not a tail call then no need to proceed. 13993 if (!CI->isTailCall()) 13994 return false; 13995 13996 // If tail calls are disabled for the caller then we are done. 13997 const Function *Caller = CI->getParent()->getParent(); 13998 auto Attr = Caller->getFnAttribute("disable-tail-calls"); 13999 if (Attr.getValueAsString() == "true") 14000 return false; 14001 14002 // If sibling calls have been disabled and tail-calls aren't guaranteed 14003 // there is no reason to duplicate. 14004 auto &TM = getTargetMachine(); 14005 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 14006 return false; 14007 14008 // Can't tail call a function called indirectly, or if it has variadic args. 14009 const Function *Callee = CI->getCalledFunction(); 14010 if (!Callee || Callee->isVarArg()) 14011 return false; 14012 14013 // Make sure the callee and caller calling conventions are eligible for tco. 14014 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 14015 CI->getCallingConv())) 14016 return false; 14017 14018 // If the function is local then we have a good chance at tail-calling it 14019 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 14020 } 14021 14022 bool PPCTargetLowering:: 14023 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 14024 const Value *Mask = AndI.getOperand(1); 14025 // If the mask is suitable for andi. or andis. we should sink the and. 14026 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 14027 // Can't handle constants wider than 64-bits. 14028 if (CI->getBitWidth() > 64) 14029 return false; 14030 int64_t ConstVal = CI->getZExtValue(); 14031 return isUInt<16>(ConstVal) || 14032 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 14033 } 14034 14035 // For non-constant masks, we can always use the record-form and. 14036 return true; 14037 } 14038