1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCCCState.h" 18 #include "PPCCallingConv.h" 19 #include "PPCFrameLowering.h" 20 #include "PPCInstrInfo.h" 21 #include "PPCMachineFunctionInfo.h" 22 #include "PPCPerfectShuffle.h" 23 #include "PPCRegisterInfo.h" 24 #include "PPCSubtarget.h" 25 #include "PPCTargetMachine.h" 26 #include "llvm/ADT/APFloat.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallPtrSet.h" 33 #include "llvm/ADT/SmallSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/ISDOpcodes.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFrameInfo.h" 42 #include "llvm/CodeGen/MachineFunction.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineJumpTableInfo.h" 46 #include "llvm/CodeGen/MachineLoopInfo.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/MachineValueType.h" 51 #include "llvm/CodeGen/RuntimeLibcalls.h" 52 #include "llvm/CodeGen/SelectionDAG.h" 53 #include "llvm/CodeGen/SelectionDAGNodes.h" 54 #include "llvm/CodeGen/TargetInstrInfo.h" 55 #include "llvm/CodeGen/TargetLowering.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/CallSite.h" 59 #include "llvm/IR/CallingConv.h" 60 #include "llvm/IR/Constant.h" 61 #include "llvm/IR/Constants.h" 62 #include "llvm/IR/DataLayout.h" 63 #include "llvm/IR/DebugLoc.h" 64 #include "llvm/IR/DerivedTypes.h" 65 #include "llvm/IR/Function.h" 66 #include "llvm/IR/GlobalValue.h" 67 #include "llvm/IR/IRBuilder.h" 68 #include "llvm/IR/Instructions.h" 69 #include "llvm/IR/Intrinsics.h" 70 #include "llvm/IR/Module.h" 71 #include "llvm/IR/Type.h" 72 #include "llvm/IR/Use.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/MC/MCExpr.h" 75 #include "llvm/MC/MCRegisterInfo.h" 76 #include "llvm/Support/AtomicOrdering.h" 77 #include "llvm/Support/BranchProbability.h" 78 #include "llvm/Support/Casting.h" 79 #include "llvm/Support/CodeGen.h" 80 #include "llvm/Support/CommandLine.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/KnownBits.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include "llvm/Target/TargetMachine.h" 89 #include "llvm/Target/TargetOptions.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 100 #define DEBUG_TYPE "ppc-lowering" 101 102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 104 105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 107 108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 110 111 static cl::opt<bool> DisableSCO("disable-ppc-sco", 112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 113 114 STATISTIC(NumTailCalls, "Number of tail calls"); 115 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 116 117 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 118 119 // FIXME: Remove this once the bug has been fixed! 120 extern cl::opt<bool> ANDIGlueBug; 121 122 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 123 const PPCSubtarget &STI) 124 : TargetLowering(TM), Subtarget(STI) { 125 // Use _setjmp/_longjmp instead of setjmp/longjmp. 126 setUseUnderscoreSetJmp(true); 127 setUseUnderscoreLongJmp(true); 128 129 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 130 // arguments are at least 4/8 bytes aligned. 131 bool isPPC64 = Subtarget.isPPC64(); 132 setMinStackArgumentAlignment(isPPC64 ? 8:4); 133 134 // Set up the register classes. 135 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 136 if (!useSoftFloat()) { 137 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 138 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 139 } 140 141 // Match BITREVERSE to customized fast code sequence in the td file. 142 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 143 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 144 145 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 146 for (MVT VT : MVT::integer_valuetypes()) { 147 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 148 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 149 } 150 151 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 152 153 // PowerPC has pre-inc load and store's. 154 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 155 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 156 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 157 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 158 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 159 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 160 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 161 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 162 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 163 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 164 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 165 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 166 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 167 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 168 169 if (Subtarget.useCRBits()) { 170 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 171 172 if (isPPC64 || Subtarget.hasFPCVT()) { 173 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 174 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 175 isPPC64 ? MVT::i64 : MVT::i32); 176 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 177 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 178 isPPC64 ? MVT::i64 : MVT::i32); 179 } else { 180 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 181 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 182 } 183 184 // PowerPC does not support direct load/store of condition registers. 185 setOperationAction(ISD::LOAD, MVT::i1, Custom); 186 setOperationAction(ISD::STORE, MVT::i1, Custom); 187 188 // FIXME: Remove this once the ANDI glue bug is fixed: 189 if (ANDIGlueBug) 190 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 191 192 for (MVT VT : MVT::integer_valuetypes()) { 193 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 194 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 195 setTruncStoreAction(VT, MVT::i1, Expand); 196 } 197 198 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 199 } 200 201 // This is used in the ppcf128->int sequence. Note it has different semantics 202 // from FP_ROUND: that rounds to nearest, this rounds to zero. 203 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 204 205 // We do not currently implement these libm ops for PowerPC. 206 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 207 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 208 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 209 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 210 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 211 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 212 213 // PowerPC has no SREM/UREM instructions unless we are on P9 214 // On P9 we may use a hardware instruction to compute the remainder. 215 // The instructions are not legalized directly because in the cases where the 216 // result of both the remainder and the division is required it is more 217 // efficient to compute the remainder from the result of the division rather 218 // than use the remainder instruction. 219 if (Subtarget.isISA3_0()) { 220 setOperationAction(ISD::SREM, MVT::i32, Custom); 221 setOperationAction(ISD::UREM, MVT::i32, Custom); 222 setOperationAction(ISD::SREM, MVT::i64, Custom); 223 setOperationAction(ISD::UREM, MVT::i64, Custom); 224 } else { 225 setOperationAction(ISD::SREM, MVT::i32, Expand); 226 setOperationAction(ISD::UREM, MVT::i32, Expand); 227 setOperationAction(ISD::SREM, MVT::i64, Expand); 228 setOperationAction(ISD::UREM, MVT::i64, Expand); 229 } 230 231 if (Subtarget.hasP9Vector()) { 232 setOperationAction(ISD::ABS, MVT::v4i32, Legal); 233 setOperationAction(ISD::ABS, MVT::v8i16, Legal); 234 setOperationAction(ISD::ABS, MVT::v16i8, Legal); 235 } 236 237 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 238 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 239 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 240 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 241 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 242 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 243 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 244 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 245 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 246 247 // We don't support sin/cos/sqrt/fmod/pow 248 setOperationAction(ISD::FSIN , MVT::f64, Expand); 249 setOperationAction(ISD::FCOS , MVT::f64, Expand); 250 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 251 setOperationAction(ISD::FREM , MVT::f64, Expand); 252 setOperationAction(ISD::FPOW , MVT::f64, Expand); 253 setOperationAction(ISD::FMA , MVT::f64, Legal); 254 setOperationAction(ISD::FSIN , MVT::f32, Expand); 255 setOperationAction(ISD::FCOS , MVT::f32, Expand); 256 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 257 setOperationAction(ISD::FREM , MVT::f32, Expand); 258 setOperationAction(ISD::FPOW , MVT::f32, Expand); 259 setOperationAction(ISD::FMA , MVT::f32, Legal); 260 261 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 262 263 // If we're enabling GP optimizations, use hardware square root 264 if (!Subtarget.hasFSQRT() && 265 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 266 Subtarget.hasFRE())) 267 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 268 269 if (!Subtarget.hasFSQRT() && 270 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 271 Subtarget.hasFRES())) 272 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 273 274 if (Subtarget.hasFCPSGN()) { 275 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 276 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 277 } else { 278 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 279 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 280 } 281 282 if (Subtarget.hasFPRND()) { 283 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 284 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 285 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 286 setOperationAction(ISD::FROUND, MVT::f64, Legal); 287 288 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 289 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 290 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 291 setOperationAction(ISD::FROUND, MVT::f32, Legal); 292 } 293 294 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 295 // to speed up scalar BSWAP64. 296 // CTPOP or CTTZ were introduced in P8/P9 respectivelly 297 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 298 if (Subtarget.isISA3_0()) { 299 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 300 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 301 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 302 } else { 303 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 304 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 305 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 306 } 307 308 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 309 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 310 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 311 } else { 312 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 313 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 314 } 315 316 // PowerPC does not have ROTR 317 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 318 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 319 320 if (!Subtarget.useCRBits()) { 321 // PowerPC does not have Select 322 setOperationAction(ISD::SELECT, MVT::i32, Expand); 323 setOperationAction(ISD::SELECT, MVT::i64, Expand); 324 setOperationAction(ISD::SELECT, MVT::f32, Expand); 325 setOperationAction(ISD::SELECT, MVT::f64, Expand); 326 } 327 328 // PowerPC wants to turn select_cc of FP into fsel when possible. 329 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 330 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 331 332 // PowerPC wants to optimize integer setcc a bit 333 if (!Subtarget.useCRBits()) 334 setOperationAction(ISD::SETCC, MVT::i32, Custom); 335 336 // PowerPC does not have BRCOND which requires SetCC 337 if (!Subtarget.useCRBits()) 338 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 339 340 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 341 342 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 343 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 344 345 // PowerPC does not have [U|S]INT_TO_FP 346 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 347 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 348 349 if (Subtarget.hasDirectMove() && isPPC64) { 350 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 351 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 352 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 353 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 354 } else { 355 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 356 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 357 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 358 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 359 } 360 361 // We cannot sextinreg(i1). Expand to shifts. 362 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 363 364 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 365 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 366 // support continuation, user-level threading, and etc.. As a result, no 367 // other SjLj exception interfaces are implemented and please don't build 368 // your own exception handling based on them. 369 // LLVM/Clang supports zero-cost DWARF exception handling. 370 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 371 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 372 373 // We want to legalize GlobalAddress and ConstantPool nodes into the 374 // appropriate instructions to materialize the address. 375 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 376 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 377 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 378 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 379 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 380 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 381 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 382 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 383 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 384 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 385 386 // TRAP is legal. 387 setOperationAction(ISD::TRAP, MVT::Other, Legal); 388 389 // TRAMPOLINE is custom lowered. 390 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 391 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 392 393 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 394 setOperationAction(ISD::VASTART , MVT::Other, Custom); 395 396 if (Subtarget.isSVR4ABI()) { 397 if (isPPC64) { 398 // VAARG always uses double-word chunks, so promote anything smaller. 399 setOperationAction(ISD::VAARG, MVT::i1, Promote); 400 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 401 setOperationAction(ISD::VAARG, MVT::i8, Promote); 402 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 403 setOperationAction(ISD::VAARG, MVT::i16, Promote); 404 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 405 setOperationAction(ISD::VAARG, MVT::i32, Promote); 406 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 407 setOperationAction(ISD::VAARG, MVT::Other, Expand); 408 } else { 409 // VAARG is custom lowered with the 32-bit SVR4 ABI. 410 setOperationAction(ISD::VAARG, MVT::Other, Custom); 411 setOperationAction(ISD::VAARG, MVT::i64, Custom); 412 } 413 } else 414 setOperationAction(ISD::VAARG, MVT::Other, Expand); 415 416 if (Subtarget.isSVR4ABI() && !isPPC64) 417 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 418 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 419 else 420 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 421 422 // Use the default implementation. 423 setOperationAction(ISD::VAEND , MVT::Other, Expand); 424 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 425 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 426 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 427 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 428 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 429 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 430 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 431 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 432 433 // We want to custom lower some of our intrinsics. 434 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 435 436 // To handle counter-based loop conditions. 437 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 438 439 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 440 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 441 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 442 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 443 444 // Comparisons that require checking two conditions. 445 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 446 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 447 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 448 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 449 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 450 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 451 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 452 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 453 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 454 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 455 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 456 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 457 458 if (Subtarget.has64BitSupport()) { 459 // They also have instructions for converting between i64 and fp. 460 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 461 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 462 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 463 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 464 // This is just the low 32 bits of a (signed) fp->i64 conversion. 465 // We cannot do this with Promote because i64 is not a legal type. 466 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 467 468 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 469 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 470 } else { 471 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 472 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 473 } 474 475 // With the instructions enabled under FPCVT, we can do everything. 476 if (Subtarget.hasFPCVT()) { 477 if (Subtarget.has64BitSupport()) { 478 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 479 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 480 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 481 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 482 } 483 484 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 485 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 486 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 487 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 488 } 489 490 if (Subtarget.use64BitRegs()) { 491 // 64-bit PowerPC implementations can support i64 types directly 492 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 493 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 494 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 495 // 64-bit PowerPC wants to expand i128 shifts itself. 496 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 497 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 498 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 499 } else { 500 // 32-bit PowerPC wants to expand i64 shifts itself. 501 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 502 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 503 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 504 } 505 506 if (Subtarget.hasAltivec()) { 507 // First set operation action for all vector types to expand. Then we 508 // will selectively turn on ones that can be effectively codegen'd. 509 for (MVT VT : MVT::vector_valuetypes()) { 510 // add/sub are legal for all supported vector VT's. 511 setOperationAction(ISD::ADD, VT, Legal); 512 setOperationAction(ISD::SUB, VT, Legal); 513 514 // Vector instructions introduced in P8 515 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 516 setOperationAction(ISD::CTPOP, VT, Legal); 517 setOperationAction(ISD::CTLZ, VT, Legal); 518 } 519 else { 520 setOperationAction(ISD::CTPOP, VT, Expand); 521 setOperationAction(ISD::CTLZ, VT, Expand); 522 } 523 524 // Vector instructions introduced in P9 525 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 526 setOperationAction(ISD::CTTZ, VT, Legal); 527 else 528 setOperationAction(ISD::CTTZ, VT, Expand); 529 530 // We promote all shuffles to v16i8. 531 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 532 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 533 534 // We promote all non-typed operations to v4i32. 535 setOperationAction(ISD::AND , VT, Promote); 536 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 537 setOperationAction(ISD::OR , VT, Promote); 538 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 539 setOperationAction(ISD::XOR , VT, Promote); 540 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 541 setOperationAction(ISD::LOAD , VT, Promote); 542 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 543 setOperationAction(ISD::SELECT, VT, Promote); 544 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 545 setOperationAction(ISD::SELECT_CC, VT, Promote); 546 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 547 setOperationAction(ISD::STORE, VT, Promote); 548 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 549 550 // No other operations are legal. 551 setOperationAction(ISD::MUL , VT, Expand); 552 setOperationAction(ISD::SDIV, VT, Expand); 553 setOperationAction(ISD::SREM, VT, Expand); 554 setOperationAction(ISD::UDIV, VT, Expand); 555 setOperationAction(ISD::UREM, VT, Expand); 556 setOperationAction(ISD::FDIV, VT, Expand); 557 setOperationAction(ISD::FREM, VT, Expand); 558 setOperationAction(ISD::FNEG, VT, Expand); 559 setOperationAction(ISD::FSQRT, VT, Expand); 560 setOperationAction(ISD::FLOG, VT, Expand); 561 setOperationAction(ISD::FLOG10, VT, Expand); 562 setOperationAction(ISD::FLOG2, VT, Expand); 563 setOperationAction(ISD::FEXP, VT, Expand); 564 setOperationAction(ISD::FEXP2, VT, Expand); 565 setOperationAction(ISD::FSIN, VT, Expand); 566 setOperationAction(ISD::FCOS, VT, Expand); 567 setOperationAction(ISD::FABS, VT, Expand); 568 setOperationAction(ISD::FFLOOR, VT, Expand); 569 setOperationAction(ISD::FCEIL, VT, Expand); 570 setOperationAction(ISD::FTRUNC, VT, Expand); 571 setOperationAction(ISD::FRINT, VT, Expand); 572 setOperationAction(ISD::FNEARBYINT, VT, Expand); 573 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 574 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 575 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 576 setOperationAction(ISD::MULHU, VT, Expand); 577 setOperationAction(ISD::MULHS, VT, Expand); 578 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 579 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 580 setOperationAction(ISD::UDIVREM, VT, Expand); 581 setOperationAction(ISD::SDIVREM, VT, Expand); 582 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 583 setOperationAction(ISD::FPOW, VT, Expand); 584 setOperationAction(ISD::BSWAP, VT, Expand); 585 setOperationAction(ISD::VSELECT, VT, Expand); 586 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 587 setOperationAction(ISD::ROTL, VT, Expand); 588 setOperationAction(ISD::ROTR, VT, Expand); 589 590 for (MVT InnerVT : MVT::vector_valuetypes()) { 591 setTruncStoreAction(VT, InnerVT, Expand); 592 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 593 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 594 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 595 } 596 } 597 598 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 599 // with merges, splats, etc. 600 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 601 602 setOperationAction(ISD::AND , MVT::v4i32, Legal); 603 setOperationAction(ISD::OR , MVT::v4i32, Legal); 604 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 605 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 606 setOperationAction(ISD::SELECT, MVT::v4i32, 607 Subtarget.useCRBits() ? Legal : Expand); 608 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 609 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 610 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 611 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 612 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 613 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 614 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 615 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 616 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 617 618 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 619 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 620 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 621 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 622 623 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 624 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 625 626 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 627 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 628 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 629 } 630 631 if (Subtarget.hasP8Altivec()) 632 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 633 else 634 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 635 636 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 637 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 638 639 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 640 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 641 642 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 643 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 644 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 645 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 646 647 // Altivec does not contain unordered floating-point compare instructions 648 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 649 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 650 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 651 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 652 653 if (Subtarget.hasVSX()) { 654 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 655 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 656 if (Subtarget.hasP8Vector()) { 657 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 658 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 659 } 660 if (Subtarget.hasDirectMove() && isPPC64) { 661 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 662 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 663 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 664 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 665 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 666 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 667 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 668 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 669 } 670 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 671 672 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 673 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 674 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 675 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 676 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 677 678 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 679 680 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 681 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 682 683 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 684 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 685 686 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 687 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 688 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 689 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 690 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 691 692 // Share the Altivec comparison restrictions. 693 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 694 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 695 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 696 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 697 698 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 699 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 700 701 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 702 703 if (Subtarget.hasP8Vector()) 704 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 705 706 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 707 708 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 709 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 710 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 711 712 if (Subtarget.hasP8Altivec()) { 713 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 714 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 715 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 716 717 // 128 bit shifts can be accomplished via 3 instructions for SHL and 718 // SRL, but not for SRA because of the instructions available: 719 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 720 // doing 721 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 722 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 723 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 724 725 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 726 } 727 else { 728 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 729 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 730 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 731 732 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 733 734 // VSX v2i64 only supports non-arithmetic operations. 735 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 736 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 737 } 738 739 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 740 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 741 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 742 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 743 744 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 745 746 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 747 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 748 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 749 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 750 751 // Vector operation legalization checks the result type of 752 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 753 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 754 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 755 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 756 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 757 758 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 759 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 760 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 761 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 762 763 if (Subtarget.hasDirectMove()) 764 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 765 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 766 767 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 768 } 769 770 if (Subtarget.hasP8Altivec()) { 771 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 772 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 773 } 774 775 if (Subtarget.hasP9Vector()) { 776 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 777 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 778 779 // 128 bit shifts can be accomplished via 3 instructions for SHL and 780 // SRL, but not for SRA because of the instructions available: 781 // VS{RL} and VS{RL}O. 782 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 783 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 784 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 785 } 786 787 if (Subtarget.hasP9Altivec()) { 788 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 789 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 790 } 791 } 792 793 if (Subtarget.hasQPX()) { 794 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 795 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 796 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 797 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 798 799 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 800 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 801 802 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 803 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 804 805 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 806 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 807 808 if (!Subtarget.useCRBits()) 809 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 810 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 811 812 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 813 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 814 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 815 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 816 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 817 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 818 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 819 820 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 821 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 822 823 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 824 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 825 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 826 827 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 828 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 829 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 830 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 831 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 832 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 833 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 834 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 835 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 836 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 837 838 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 839 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 840 841 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 842 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 843 844 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 845 846 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 847 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 848 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 849 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 850 851 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 852 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 853 854 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 855 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 856 857 if (!Subtarget.useCRBits()) 858 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 859 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 860 861 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 862 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 863 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 864 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 865 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 866 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 867 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 868 869 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 870 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 871 872 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 873 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 874 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 875 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 876 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 877 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 878 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 879 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 880 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 881 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 882 883 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 884 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 885 886 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 887 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 888 889 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 890 891 setOperationAction(ISD::AND , MVT::v4i1, Legal); 892 setOperationAction(ISD::OR , MVT::v4i1, Legal); 893 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 894 895 if (!Subtarget.useCRBits()) 896 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 897 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 898 899 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 900 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 901 902 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 903 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 904 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 905 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 906 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 907 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 908 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 909 910 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 911 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 912 913 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 914 915 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 916 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 917 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 918 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 919 920 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 921 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 922 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 923 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 924 925 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 926 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 927 928 // These need to set FE_INEXACT, and so cannot be vectorized here. 929 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 930 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 931 932 if (TM.Options.UnsafeFPMath) { 933 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 934 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 935 936 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 937 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 938 } else { 939 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 940 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 941 942 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 943 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 944 } 945 } 946 947 if (Subtarget.has64BitSupport()) 948 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 949 950 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 951 952 if (!isPPC64) { 953 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 954 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 955 } 956 957 setBooleanContents(ZeroOrOneBooleanContent); 958 959 if (Subtarget.hasAltivec()) { 960 // Altivec instructions set fields to all zeros or all ones. 961 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 962 } 963 964 if (!isPPC64) { 965 // These libcalls are not available in 32-bit. 966 setLibcallName(RTLIB::SHL_I128, nullptr); 967 setLibcallName(RTLIB::SRL_I128, nullptr); 968 setLibcallName(RTLIB::SRA_I128, nullptr); 969 } 970 971 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 972 973 // We have target-specific dag combine patterns for the following nodes: 974 setTargetDAGCombine(ISD::SHL); 975 setTargetDAGCombine(ISD::SRA); 976 setTargetDAGCombine(ISD::SRL); 977 setTargetDAGCombine(ISD::SINT_TO_FP); 978 setTargetDAGCombine(ISD::BUILD_VECTOR); 979 if (Subtarget.hasFPCVT()) 980 setTargetDAGCombine(ISD::UINT_TO_FP); 981 setTargetDAGCombine(ISD::LOAD); 982 setTargetDAGCombine(ISD::STORE); 983 setTargetDAGCombine(ISD::BR_CC); 984 if (Subtarget.useCRBits()) 985 setTargetDAGCombine(ISD::BRCOND); 986 setTargetDAGCombine(ISD::BSWAP); 987 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 988 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 989 setTargetDAGCombine(ISD::INTRINSIC_VOID); 990 991 setTargetDAGCombine(ISD::SIGN_EXTEND); 992 setTargetDAGCombine(ISD::ZERO_EXTEND); 993 setTargetDAGCombine(ISD::ANY_EXTEND); 994 995 if (Subtarget.useCRBits()) { 996 setTargetDAGCombine(ISD::TRUNCATE); 997 setTargetDAGCombine(ISD::SETCC); 998 setTargetDAGCombine(ISD::SELECT_CC); 999 } 1000 1001 // Use reciprocal estimates. 1002 if (TM.Options.UnsafeFPMath) { 1003 setTargetDAGCombine(ISD::FDIV); 1004 setTargetDAGCombine(ISD::FSQRT); 1005 } 1006 1007 // Darwin long double math library functions have $LDBL128 appended. 1008 if (Subtarget.isDarwin()) { 1009 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1010 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1011 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1012 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1013 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1014 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1015 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1016 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1017 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1018 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1019 } 1020 1021 // With 32 condition bits, we don't need to sink (and duplicate) compares 1022 // aggressively in CodeGenPrep. 1023 if (Subtarget.useCRBits()) { 1024 setHasMultipleConditionRegisters(); 1025 setJumpIsExpensive(); 1026 } 1027 1028 setMinFunctionAlignment(2); 1029 if (Subtarget.isDarwin()) 1030 setPrefFunctionAlignment(4); 1031 1032 switch (Subtarget.getDarwinDirective()) { 1033 default: break; 1034 case PPC::DIR_970: 1035 case PPC::DIR_A2: 1036 case PPC::DIR_E500mc: 1037 case PPC::DIR_E5500: 1038 case PPC::DIR_PWR4: 1039 case PPC::DIR_PWR5: 1040 case PPC::DIR_PWR5X: 1041 case PPC::DIR_PWR6: 1042 case PPC::DIR_PWR6X: 1043 case PPC::DIR_PWR7: 1044 case PPC::DIR_PWR8: 1045 case PPC::DIR_PWR9: 1046 setPrefFunctionAlignment(4); 1047 setPrefLoopAlignment(4); 1048 break; 1049 } 1050 1051 if (Subtarget.enableMachineScheduler()) 1052 setSchedulingPreference(Sched::Source); 1053 else 1054 setSchedulingPreference(Sched::Hybrid); 1055 1056 computeRegisterProperties(STI.getRegisterInfo()); 1057 1058 // The Freescale cores do better with aggressive inlining of memcpy and 1059 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1060 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1061 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1062 MaxStoresPerMemset = 32; 1063 MaxStoresPerMemsetOptSize = 16; 1064 MaxStoresPerMemcpy = 32; 1065 MaxStoresPerMemcpyOptSize = 8; 1066 MaxStoresPerMemmove = 32; 1067 MaxStoresPerMemmoveOptSize = 8; 1068 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1069 // The A2 also benefits from (very) aggressive inlining of memcpy and 1070 // friends. The overhead of a the function call, even when warm, can be 1071 // over one hundred cycles. 1072 MaxStoresPerMemset = 128; 1073 MaxStoresPerMemcpy = 128; 1074 MaxStoresPerMemmove = 128; 1075 MaxLoadsPerMemcmp = 128; 1076 } else { 1077 MaxLoadsPerMemcmp = 8; 1078 MaxLoadsPerMemcmpOptSize = 4; 1079 } 1080 } 1081 1082 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1083 /// the desired ByVal argument alignment. 1084 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1085 unsigned MaxMaxAlign) { 1086 if (MaxAlign == MaxMaxAlign) 1087 return; 1088 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1089 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1090 MaxAlign = 32; 1091 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1092 MaxAlign = 16; 1093 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1094 unsigned EltAlign = 0; 1095 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1096 if (EltAlign > MaxAlign) 1097 MaxAlign = EltAlign; 1098 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1099 for (auto *EltTy : STy->elements()) { 1100 unsigned EltAlign = 0; 1101 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1102 if (EltAlign > MaxAlign) 1103 MaxAlign = EltAlign; 1104 if (MaxAlign == MaxMaxAlign) 1105 break; 1106 } 1107 } 1108 } 1109 1110 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1111 /// function arguments in the caller parameter area. 1112 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1113 const DataLayout &DL) const { 1114 // Darwin passes everything on 4 byte boundary. 1115 if (Subtarget.isDarwin()) 1116 return 4; 1117 1118 // 16byte and wider vectors are passed on 16byte boundary. 1119 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1120 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1121 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1122 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1123 return Align; 1124 } 1125 1126 bool PPCTargetLowering::useSoftFloat() const { 1127 return Subtarget.useSoftFloat(); 1128 } 1129 1130 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1131 switch ((PPCISD::NodeType)Opcode) { 1132 case PPCISD::FIRST_NUMBER: break; 1133 case PPCISD::FSEL: return "PPCISD::FSEL"; 1134 case PPCISD::FCFID: return "PPCISD::FCFID"; 1135 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1136 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1137 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1138 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1139 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1140 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1141 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1142 case PPCISD::FRE: return "PPCISD::FRE"; 1143 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1144 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1145 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1146 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1147 case PPCISD::VPERM: return "PPCISD::VPERM"; 1148 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1149 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1150 case PPCISD::XXREVERSE: return "PPCISD::XXREVERSE"; 1151 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1152 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1153 case PPCISD::CMPB: return "PPCISD::CMPB"; 1154 case PPCISD::Hi: return "PPCISD::Hi"; 1155 case PPCISD::Lo: return "PPCISD::Lo"; 1156 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1157 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1158 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1159 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1160 case PPCISD::SRL: return "PPCISD::SRL"; 1161 case PPCISD::SRA: return "PPCISD::SRA"; 1162 case PPCISD::SHL: return "PPCISD::SHL"; 1163 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1164 case PPCISD::CALL: return "PPCISD::CALL"; 1165 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1166 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1167 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1168 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1169 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1170 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1171 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1172 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1173 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1174 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1175 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1176 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1177 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1178 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1179 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1180 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1181 case PPCISD::VCMP: return "PPCISD::VCMP"; 1182 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1183 case PPCISD::LBRX: return "PPCISD::LBRX"; 1184 case PPCISD::STBRX: return "PPCISD::STBRX"; 1185 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1186 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1187 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1188 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1189 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1190 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1191 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1192 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1193 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1194 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1195 case PPCISD::BDZ: return "PPCISD::BDZ"; 1196 case PPCISD::MFFS: return "PPCISD::MFFS"; 1197 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1198 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1199 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1200 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1201 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1202 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1203 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1204 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1205 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1206 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1207 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1208 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1209 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1210 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1211 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1212 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1213 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1214 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1215 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1216 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1217 case PPCISD::SC: return "PPCISD::SC"; 1218 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1219 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1220 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1221 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1222 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1223 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1224 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1225 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1226 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1227 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1228 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1229 } 1230 return nullptr; 1231 } 1232 1233 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1234 EVT VT) const { 1235 if (!VT.isVector()) 1236 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1237 1238 if (Subtarget.hasQPX()) 1239 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1240 1241 return VT.changeVectorElementTypeToInteger(); 1242 } 1243 1244 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1245 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1246 return true; 1247 } 1248 1249 //===----------------------------------------------------------------------===// 1250 // Node matching predicates, for use by the tblgen matching code. 1251 //===----------------------------------------------------------------------===// 1252 1253 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1254 static bool isFloatingPointZero(SDValue Op) { 1255 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1256 return CFP->getValueAPF().isZero(); 1257 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1258 // Maybe this has already been legalized into the constant pool? 1259 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1260 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1261 return CFP->getValueAPF().isZero(); 1262 } 1263 return false; 1264 } 1265 1266 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1267 /// true if Op is undef or if it matches the specified value. 1268 static bool isConstantOrUndef(int Op, int Val) { 1269 return Op < 0 || Op == Val; 1270 } 1271 1272 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1273 /// VPKUHUM instruction. 1274 /// The ShuffleKind distinguishes between big-endian operations with 1275 /// two different inputs (0), either-endian operations with two identical 1276 /// inputs (1), and little-endian operations with two different inputs (2). 1277 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1278 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1279 SelectionDAG &DAG) { 1280 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1281 if (ShuffleKind == 0) { 1282 if (IsLE) 1283 return false; 1284 for (unsigned i = 0; i != 16; ++i) 1285 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1286 return false; 1287 } else if (ShuffleKind == 2) { 1288 if (!IsLE) 1289 return false; 1290 for (unsigned i = 0; i != 16; ++i) 1291 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1292 return false; 1293 } else if (ShuffleKind == 1) { 1294 unsigned j = IsLE ? 0 : 1; 1295 for (unsigned i = 0; i != 8; ++i) 1296 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1297 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1298 return false; 1299 } 1300 return true; 1301 } 1302 1303 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1304 /// VPKUWUM instruction. 1305 /// The ShuffleKind distinguishes between big-endian operations with 1306 /// two different inputs (0), either-endian operations with two identical 1307 /// inputs (1), and little-endian operations with two different inputs (2). 1308 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1309 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1310 SelectionDAG &DAG) { 1311 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1312 if (ShuffleKind == 0) { 1313 if (IsLE) 1314 return false; 1315 for (unsigned i = 0; i != 16; i += 2) 1316 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1317 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1318 return false; 1319 } else if (ShuffleKind == 2) { 1320 if (!IsLE) 1321 return false; 1322 for (unsigned i = 0; i != 16; i += 2) 1323 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1324 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1325 return false; 1326 } else if (ShuffleKind == 1) { 1327 unsigned j = IsLE ? 0 : 2; 1328 for (unsigned i = 0; i != 8; i += 2) 1329 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1330 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1331 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1332 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1333 return false; 1334 } 1335 return true; 1336 } 1337 1338 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1339 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1340 /// current subtarget. 1341 /// 1342 /// The ShuffleKind distinguishes between big-endian operations with 1343 /// two different inputs (0), either-endian operations with two identical 1344 /// inputs (1), and little-endian operations with two different inputs (2). 1345 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1346 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1347 SelectionDAG &DAG) { 1348 const PPCSubtarget& Subtarget = 1349 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1350 if (!Subtarget.hasP8Vector()) 1351 return false; 1352 1353 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1354 if (ShuffleKind == 0) { 1355 if (IsLE) 1356 return false; 1357 for (unsigned i = 0; i != 16; i += 4) 1358 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1359 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1360 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1361 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1362 return false; 1363 } else if (ShuffleKind == 2) { 1364 if (!IsLE) 1365 return false; 1366 for (unsigned i = 0; i != 16; i += 4) 1367 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1368 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1369 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1370 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1371 return false; 1372 } else if (ShuffleKind == 1) { 1373 unsigned j = IsLE ? 0 : 4; 1374 for (unsigned i = 0; i != 8; i += 4) 1375 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1376 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1377 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1378 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1379 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1380 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1381 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1382 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1383 return false; 1384 } 1385 return true; 1386 } 1387 1388 /// isVMerge - Common function, used to match vmrg* shuffles. 1389 /// 1390 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1391 unsigned LHSStart, unsigned RHSStart) { 1392 if (N->getValueType(0) != MVT::v16i8) 1393 return false; 1394 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1395 "Unsupported merge size!"); 1396 1397 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1398 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1399 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1400 LHSStart+j+i*UnitSize) || 1401 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1402 RHSStart+j+i*UnitSize)) 1403 return false; 1404 } 1405 return true; 1406 } 1407 1408 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1409 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1410 /// The ShuffleKind distinguishes between big-endian merges with two 1411 /// different inputs (0), either-endian merges with two identical inputs (1), 1412 /// and little-endian merges with two different inputs (2). For the latter, 1413 /// the input operands are swapped (see PPCInstrAltivec.td). 1414 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1415 unsigned ShuffleKind, SelectionDAG &DAG) { 1416 if (DAG.getDataLayout().isLittleEndian()) { 1417 if (ShuffleKind == 1) // unary 1418 return isVMerge(N, UnitSize, 0, 0); 1419 else if (ShuffleKind == 2) // swapped 1420 return isVMerge(N, UnitSize, 0, 16); 1421 else 1422 return false; 1423 } else { 1424 if (ShuffleKind == 1) // unary 1425 return isVMerge(N, UnitSize, 8, 8); 1426 else if (ShuffleKind == 0) // normal 1427 return isVMerge(N, UnitSize, 8, 24); 1428 else 1429 return false; 1430 } 1431 } 1432 1433 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1434 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1435 /// The ShuffleKind distinguishes between big-endian merges with two 1436 /// different inputs (0), either-endian merges with two identical inputs (1), 1437 /// and little-endian merges with two different inputs (2). For the latter, 1438 /// the input operands are swapped (see PPCInstrAltivec.td). 1439 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1440 unsigned ShuffleKind, SelectionDAG &DAG) { 1441 if (DAG.getDataLayout().isLittleEndian()) { 1442 if (ShuffleKind == 1) // unary 1443 return isVMerge(N, UnitSize, 8, 8); 1444 else if (ShuffleKind == 2) // swapped 1445 return isVMerge(N, UnitSize, 8, 24); 1446 else 1447 return false; 1448 } else { 1449 if (ShuffleKind == 1) // unary 1450 return isVMerge(N, UnitSize, 0, 0); 1451 else if (ShuffleKind == 0) // normal 1452 return isVMerge(N, UnitSize, 0, 16); 1453 else 1454 return false; 1455 } 1456 } 1457 1458 /** 1459 * \brief Common function used to match vmrgew and vmrgow shuffles 1460 * 1461 * The indexOffset determines whether to look for even or odd words in 1462 * the shuffle mask. This is based on the of the endianness of the target 1463 * machine. 1464 * - Little Endian: 1465 * - Use offset of 0 to check for odd elements 1466 * - Use offset of 4 to check for even elements 1467 * - Big Endian: 1468 * - Use offset of 0 to check for even elements 1469 * - Use offset of 4 to check for odd elements 1470 * A detailed description of the vector element ordering for little endian and 1471 * big endian can be found at 1472 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1473 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1474 * compiler differences mean to you 1475 * 1476 * The mask to the shuffle vector instruction specifies the indices of the 1477 * elements from the two input vectors to place in the result. The elements are 1478 * numbered in array-access order, starting with the first vector. These vectors 1479 * are always of type v16i8, thus each vector will contain 16 elements of size 1480 * 8. More info on the shuffle vector can be found in the 1481 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1482 * Language Reference. 1483 * 1484 * The RHSStartValue indicates whether the same input vectors are used (unary) 1485 * or two different input vectors are used, based on the following: 1486 * - If the instruction uses the same vector for both inputs, the range of the 1487 * indices will be 0 to 15. In this case, the RHSStart value passed should 1488 * be 0. 1489 * - If the instruction has two different vectors then the range of the 1490 * indices will be 0 to 31. In this case, the RHSStart value passed should 1491 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1492 * to 31 specify elements in the second vector). 1493 * 1494 * \param[in] N The shuffle vector SD Node to analyze 1495 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1496 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1497 * vector to the shuffle_vector instruction 1498 * \return true iff this shuffle vector represents an even or odd word merge 1499 */ 1500 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1501 unsigned RHSStartValue) { 1502 if (N->getValueType(0) != MVT::v16i8) 1503 return false; 1504 1505 for (unsigned i = 0; i < 2; ++i) 1506 for (unsigned j = 0; j < 4; ++j) 1507 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1508 i*RHSStartValue+j+IndexOffset) || 1509 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1510 i*RHSStartValue+j+IndexOffset+8)) 1511 return false; 1512 return true; 1513 } 1514 1515 /** 1516 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1517 * vmrgow instructions. 1518 * 1519 * \param[in] N The shuffle vector SD Node to analyze 1520 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1521 * \param[in] ShuffleKind Identify the type of merge: 1522 * - 0 = big-endian merge with two different inputs; 1523 * - 1 = either-endian merge with two identical inputs; 1524 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1525 * little-endian merges). 1526 * \param[in] DAG The current SelectionDAG 1527 * \return true iff this shuffle mask 1528 */ 1529 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1530 unsigned ShuffleKind, SelectionDAG &DAG) { 1531 if (DAG.getDataLayout().isLittleEndian()) { 1532 unsigned indexOffset = CheckEven ? 4 : 0; 1533 if (ShuffleKind == 1) // Unary 1534 return isVMerge(N, indexOffset, 0); 1535 else if (ShuffleKind == 2) // swapped 1536 return isVMerge(N, indexOffset, 16); 1537 else 1538 return false; 1539 } 1540 else { 1541 unsigned indexOffset = CheckEven ? 0 : 4; 1542 if (ShuffleKind == 1) // Unary 1543 return isVMerge(N, indexOffset, 0); 1544 else if (ShuffleKind == 0) // Normal 1545 return isVMerge(N, indexOffset, 16); 1546 else 1547 return false; 1548 } 1549 return false; 1550 } 1551 1552 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1553 /// amount, otherwise return -1. 1554 /// The ShuffleKind distinguishes between big-endian operations with two 1555 /// different inputs (0), either-endian operations with two identical inputs 1556 /// (1), and little-endian operations with two different inputs (2). For the 1557 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1558 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1559 SelectionDAG &DAG) { 1560 if (N->getValueType(0) != MVT::v16i8) 1561 return -1; 1562 1563 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1564 1565 // Find the first non-undef value in the shuffle mask. 1566 unsigned i; 1567 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1568 /*search*/; 1569 1570 if (i == 16) return -1; // all undef. 1571 1572 // Otherwise, check to see if the rest of the elements are consecutively 1573 // numbered from this value. 1574 unsigned ShiftAmt = SVOp->getMaskElt(i); 1575 if (ShiftAmt < i) return -1; 1576 1577 ShiftAmt -= i; 1578 bool isLE = DAG.getDataLayout().isLittleEndian(); 1579 1580 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1581 // Check the rest of the elements to see if they are consecutive. 1582 for (++i; i != 16; ++i) 1583 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1584 return -1; 1585 } else if (ShuffleKind == 1) { 1586 // Check the rest of the elements to see if they are consecutive. 1587 for (++i; i != 16; ++i) 1588 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1589 return -1; 1590 } else 1591 return -1; 1592 1593 if (isLE) 1594 ShiftAmt = 16 - ShiftAmt; 1595 1596 return ShiftAmt; 1597 } 1598 1599 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1600 /// specifies a splat of a single element that is suitable for input to 1601 /// VSPLTB/VSPLTH/VSPLTW. 1602 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1603 assert(N->getValueType(0) == MVT::v16i8 && 1604 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1605 1606 // The consecutive indices need to specify an element, not part of two 1607 // different elements. So abandon ship early if this isn't the case. 1608 if (N->getMaskElt(0) % EltSize != 0) 1609 return false; 1610 1611 // This is a splat operation if each element of the permute is the same, and 1612 // if the value doesn't reference the second vector. 1613 unsigned ElementBase = N->getMaskElt(0); 1614 1615 // FIXME: Handle UNDEF elements too! 1616 if (ElementBase >= 16) 1617 return false; 1618 1619 // Check that the indices are consecutive, in the case of a multi-byte element 1620 // splatted with a v16i8 mask. 1621 for (unsigned i = 1; i != EltSize; ++i) 1622 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1623 return false; 1624 1625 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1626 if (N->getMaskElt(i) < 0) continue; 1627 for (unsigned j = 0; j != EltSize; ++j) 1628 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1629 return false; 1630 } 1631 return true; 1632 } 1633 1634 /// Check that the mask is shuffling N byte elements. Within each N byte 1635 /// element of the mask, the indices could be either in increasing or 1636 /// decreasing order as long as they are consecutive. 1637 /// \param[in] N the shuffle vector SD Node to analyze 1638 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1639 /// Word/DoubleWord/QuadWord). 1640 /// \param[in] StepLen the delta indices number among the N byte element, if 1641 /// the mask is in increasing/decreasing order then it is 1/-1. 1642 /// \return true iff the mask is shuffling N byte elements. 1643 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1644 int StepLen) { 1645 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1646 "Unexpected element width."); 1647 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1648 1649 unsigned NumOfElem = 16 / Width; 1650 unsigned MaskVal[16]; // Width is never greater than 16 1651 for (unsigned i = 0; i < NumOfElem; ++i) { 1652 MaskVal[0] = N->getMaskElt(i * Width); 1653 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1654 return false; 1655 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1656 return false; 1657 } 1658 1659 for (unsigned int j = 1; j < Width; ++j) { 1660 MaskVal[j] = N->getMaskElt(i * Width + j); 1661 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1662 return false; 1663 } 1664 } 1665 } 1666 1667 return true; 1668 } 1669 1670 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1671 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1672 if (!isNByteElemShuffleMask(N, 4, 1)) 1673 return false; 1674 1675 // Now we look at mask elements 0,4,8,12 1676 unsigned M0 = N->getMaskElt(0) / 4; 1677 unsigned M1 = N->getMaskElt(4) / 4; 1678 unsigned M2 = N->getMaskElt(8) / 4; 1679 unsigned M3 = N->getMaskElt(12) / 4; 1680 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1681 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1682 1683 // Below, let H and L be arbitrary elements of the shuffle mask 1684 // where H is in the range [4,7] and L is in the range [0,3]. 1685 // H, 1, 2, 3 or L, 5, 6, 7 1686 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1687 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1688 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1689 InsertAtByte = IsLE ? 12 : 0; 1690 Swap = M0 < 4; 1691 return true; 1692 } 1693 // 0, H, 2, 3 or 4, L, 6, 7 1694 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1695 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1696 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1697 InsertAtByte = IsLE ? 8 : 4; 1698 Swap = M1 < 4; 1699 return true; 1700 } 1701 // 0, 1, H, 3 or 4, 5, L, 7 1702 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1703 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1704 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1705 InsertAtByte = IsLE ? 4 : 8; 1706 Swap = M2 < 4; 1707 return true; 1708 } 1709 // 0, 1, 2, H or 4, 5, 6, L 1710 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1711 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1712 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1713 InsertAtByte = IsLE ? 0 : 12; 1714 Swap = M3 < 4; 1715 return true; 1716 } 1717 1718 // If both vector operands for the shuffle are the same vector, the mask will 1719 // contain only elements from the first one and the second one will be undef. 1720 if (N->getOperand(1).isUndef()) { 1721 ShiftElts = 0; 1722 Swap = true; 1723 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1724 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1725 InsertAtByte = IsLE ? 12 : 0; 1726 return true; 1727 } 1728 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1729 InsertAtByte = IsLE ? 8 : 4; 1730 return true; 1731 } 1732 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1733 InsertAtByte = IsLE ? 4 : 8; 1734 return true; 1735 } 1736 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1737 InsertAtByte = IsLE ? 0 : 12; 1738 return true; 1739 } 1740 } 1741 1742 return false; 1743 } 1744 1745 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1746 bool &Swap, bool IsLE) { 1747 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1748 // Ensure each byte index of the word is consecutive. 1749 if (!isNByteElemShuffleMask(N, 4, 1)) 1750 return false; 1751 1752 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1753 unsigned M0 = N->getMaskElt(0) / 4; 1754 unsigned M1 = N->getMaskElt(4) / 4; 1755 unsigned M2 = N->getMaskElt(8) / 4; 1756 unsigned M3 = N->getMaskElt(12) / 4; 1757 1758 // If both vector operands for the shuffle are the same vector, the mask will 1759 // contain only elements from the first one and the second one will be undef. 1760 if (N->getOperand(1).isUndef()) { 1761 assert(M0 < 4 && "Indexing into an undef vector?"); 1762 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1763 return false; 1764 1765 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1766 Swap = false; 1767 return true; 1768 } 1769 1770 // Ensure each word index of the ShuffleVector Mask is consecutive. 1771 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1772 return false; 1773 1774 if (IsLE) { 1775 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1776 // Input vectors don't need to be swapped if the leading element 1777 // of the result is one of the 3 left elements of the second vector 1778 // (or if there is no shift to be done at all). 1779 Swap = false; 1780 ShiftElts = (8 - M0) % 8; 1781 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 1782 // Input vectors need to be swapped if the leading element 1783 // of the result is one of the 3 left elements of the first vector 1784 // (or if we're shifting by 4 - thereby simply swapping the vectors). 1785 Swap = true; 1786 ShiftElts = (4 - M0) % 4; 1787 } 1788 1789 return true; 1790 } else { // BE 1791 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 1792 // Input vectors don't need to be swapped if the leading element 1793 // of the result is one of the 4 elements of the first vector. 1794 Swap = false; 1795 ShiftElts = M0; 1796 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 1797 // Input vectors need to be swapped if the leading element 1798 // of the result is one of the 4 elements of the right vector. 1799 Swap = true; 1800 ShiftElts = M0 - 4; 1801 } 1802 1803 return true; 1804 } 1805 } 1806 1807 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 1808 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1809 1810 if (!isNByteElemShuffleMask(N, Width, -1)) 1811 return false; 1812 1813 for (int i = 0; i < 16; i += Width) 1814 if (N->getMaskElt(i) != i + Width - 1) 1815 return false; 1816 1817 return true; 1818 } 1819 1820 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 1821 return isXXBRShuffleMaskHelper(N, 2); 1822 } 1823 1824 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 1825 return isXXBRShuffleMaskHelper(N, 4); 1826 } 1827 1828 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 1829 return isXXBRShuffleMaskHelper(N, 8); 1830 } 1831 1832 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 1833 return isXXBRShuffleMaskHelper(N, 16); 1834 } 1835 1836 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 1837 /// if the inputs to the instruction should be swapped and set \p DM to the 1838 /// value for the immediate. 1839 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 1840 /// AND element 0 of the result comes from the first input (LE) or second input 1841 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 1842 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 1843 /// mask. 1844 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 1845 bool &Swap, bool IsLE) { 1846 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1847 1848 // Ensure each byte index of the double word is consecutive. 1849 if (!isNByteElemShuffleMask(N, 8, 1)) 1850 return false; 1851 1852 unsigned M0 = N->getMaskElt(0) / 8; 1853 unsigned M1 = N->getMaskElt(8) / 8; 1854 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 1855 1856 // If both vector operands for the shuffle are the same vector, the mask will 1857 // contain only elements from the first one and the second one will be undef. 1858 if (N->getOperand(1).isUndef()) { 1859 if ((M0 | M1) < 2) { 1860 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 1861 Swap = false; 1862 return true; 1863 } else 1864 return false; 1865 } 1866 1867 if (IsLE) { 1868 if (M0 > 1 && M1 < 2) { 1869 Swap = false; 1870 } else if (M0 < 2 && M1 > 1) { 1871 M0 = (M0 + 2) % 4; 1872 M1 = (M1 + 2) % 4; 1873 Swap = true; 1874 } else 1875 return false; 1876 1877 // Note: if control flow comes here that means Swap is already set above 1878 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 1879 return true; 1880 } else { // BE 1881 if (M0 < 2 && M1 > 1) { 1882 Swap = false; 1883 } else if (M0 > 1 && M1 < 2) { 1884 M0 = (M0 + 2) % 4; 1885 M1 = (M1 + 2) % 4; 1886 Swap = true; 1887 } else 1888 return false; 1889 1890 // Note: if control flow comes here that means Swap is already set above 1891 DM = (M0 << 1) + (M1 & 1); 1892 return true; 1893 } 1894 } 1895 1896 1897 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1898 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1899 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1900 SelectionDAG &DAG) { 1901 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1902 assert(isSplatShuffleMask(SVOp, EltSize)); 1903 if (DAG.getDataLayout().isLittleEndian()) 1904 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1905 else 1906 return SVOp->getMaskElt(0) / EltSize; 1907 } 1908 1909 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1910 /// by using a vspltis[bhw] instruction of the specified element size, return 1911 /// the constant being splatted. The ByteSize field indicates the number of 1912 /// bytes of each element [124] -> [bhw]. 1913 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1914 SDValue OpVal(nullptr, 0); 1915 1916 // If ByteSize of the splat is bigger than the element size of the 1917 // build_vector, then we have a case where we are checking for a splat where 1918 // multiple elements of the buildvector are folded together into a single 1919 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1920 unsigned EltSize = 16/N->getNumOperands(); 1921 if (EltSize < ByteSize) { 1922 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1923 SDValue UniquedVals[4]; 1924 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1925 1926 // See if all of the elements in the buildvector agree across. 1927 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1928 if (N->getOperand(i).isUndef()) continue; 1929 // If the element isn't a constant, bail fully out. 1930 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1931 1932 if (!UniquedVals[i&(Multiple-1)].getNode()) 1933 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1934 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1935 return SDValue(); // no match. 1936 } 1937 1938 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1939 // either constant or undef values that are identical for each chunk. See 1940 // if these chunks can form into a larger vspltis*. 1941 1942 // Check to see if all of the leading entries are either 0 or -1. If 1943 // neither, then this won't fit into the immediate field. 1944 bool LeadingZero = true; 1945 bool LeadingOnes = true; 1946 for (unsigned i = 0; i != Multiple-1; ++i) { 1947 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1948 1949 LeadingZero &= isNullConstant(UniquedVals[i]); 1950 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1951 } 1952 // Finally, check the least significant entry. 1953 if (LeadingZero) { 1954 if (!UniquedVals[Multiple-1].getNode()) 1955 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1956 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1957 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1958 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1959 } 1960 if (LeadingOnes) { 1961 if (!UniquedVals[Multiple-1].getNode()) 1962 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1963 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1964 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1965 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1966 } 1967 1968 return SDValue(); 1969 } 1970 1971 // Check to see if this buildvec has a single non-undef value in its elements. 1972 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1973 if (N->getOperand(i).isUndef()) continue; 1974 if (!OpVal.getNode()) 1975 OpVal = N->getOperand(i); 1976 else if (OpVal != N->getOperand(i)) 1977 return SDValue(); 1978 } 1979 1980 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1981 1982 unsigned ValSizeInBytes = EltSize; 1983 uint64_t Value = 0; 1984 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1985 Value = CN->getZExtValue(); 1986 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1987 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1988 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1989 } 1990 1991 // If the splat value is larger than the element value, then we can never do 1992 // this splat. The only case that we could fit the replicated bits into our 1993 // immediate field for would be zero, and we prefer to use vxor for it. 1994 if (ValSizeInBytes < ByteSize) return SDValue(); 1995 1996 // If the element value is larger than the splat value, check if it consists 1997 // of a repeated bit pattern of size ByteSize. 1998 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1999 return SDValue(); 2000 2001 // Properly sign extend the value. 2002 int MaskVal = SignExtend32(Value, ByteSize * 8); 2003 2004 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2005 if (MaskVal == 0) return SDValue(); 2006 2007 // Finally, if this value fits in a 5 bit sext field, return it 2008 if (SignExtend32<5>(MaskVal) == MaskVal) 2009 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2010 return SDValue(); 2011 } 2012 2013 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2014 /// amount, otherwise return -1. 2015 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2016 EVT VT = N->getValueType(0); 2017 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2018 return -1; 2019 2020 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2021 2022 // Find the first non-undef value in the shuffle mask. 2023 unsigned i; 2024 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2025 /*search*/; 2026 2027 if (i == 4) return -1; // all undef. 2028 2029 // Otherwise, check to see if the rest of the elements are consecutively 2030 // numbered from this value. 2031 unsigned ShiftAmt = SVOp->getMaskElt(i); 2032 if (ShiftAmt < i) return -1; 2033 ShiftAmt -= i; 2034 2035 // Check the rest of the elements to see if they are consecutive. 2036 for (++i; i != 4; ++i) 2037 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2038 return -1; 2039 2040 return ShiftAmt; 2041 } 2042 2043 //===----------------------------------------------------------------------===// 2044 // Addressing Mode Selection 2045 //===----------------------------------------------------------------------===// 2046 2047 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2048 /// or 64-bit immediate, and if the value can be accurately represented as a 2049 /// sign extension from a 16-bit value. If so, this returns true and the 2050 /// immediate. 2051 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2052 if (!isa<ConstantSDNode>(N)) 2053 return false; 2054 2055 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2056 if (N->getValueType(0) == MVT::i32) 2057 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2058 else 2059 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2060 } 2061 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2062 return isIntS16Immediate(Op.getNode(), Imm); 2063 } 2064 2065 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2066 /// can be represented as an indexed [r+r] operation. Returns false if it 2067 /// can be more efficiently represented with [r+imm]. 2068 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2069 SDValue &Index, 2070 SelectionDAG &DAG) const { 2071 int16_t imm = 0; 2072 if (N.getOpcode() == ISD::ADD) { 2073 if (isIntS16Immediate(N.getOperand(1), imm)) 2074 return false; // r+i 2075 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2076 return false; // r+i 2077 2078 Base = N.getOperand(0); 2079 Index = N.getOperand(1); 2080 return true; 2081 } else if (N.getOpcode() == ISD::OR) { 2082 if (isIntS16Immediate(N.getOperand(1), imm)) 2083 return false; // r+i can fold it if we can. 2084 2085 // If this is an or of disjoint bitfields, we can codegen this as an add 2086 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2087 // disjoint. 2088 KnownBits LHSKnown, RHSKnown; 2089 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2090 2091 if (LHSKnown.Zero.getBoolValue()) { 2092 DAG.computeKnownBits(N.getOperand(1), RHSKnown); 2093 // If all of the bits are known zero on the LHS or RHS, the add won't 2094 // carry. 2095 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2096 Base = N.getOperand(0); 2097 Index = N.getOperand(1); 2098 return true; 2099 } 2100 } 2101 } 2102 2103 return false; 2104 } 2105 2106 // If we happen to be doing an i64 load or store into a stack slot that has 2107 // less than a 4-byte alignment, then the frame-index elimination may need to 2108 // use an indexed load or store instruction (because the offset may not be a 2109 // multiple of 4). The extra register needed to hold the offset comes from the 2110 // register scavenger, and it is possible that the scavenger will need to use 2111 // an emergency spill slot. As a result, we need to make sure that a spill slot 2112 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2113 // stack slot. 2114 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2115 // FIXME: This does not handle the LWA case. 2116 if (VT != MVT::i64) 2117 return; 2118 2119 // NOTE: We'll exclude negative FIs here, which come from argument 2120 // lowering, because there are no known test cases triggering this problem 2121 // using packed structures (or similar). We can remove this exclusion if 2122 // we find such a test case. The reason why this is so test-case driven is 2123 // because this entire 'fixup' is only to prevent crashes (from the 2124 // register scavenger) on not-really-valid inputs. For example, if we have: 2125 // %a = alloca i1 2126 // %b = bitcast i1* %a to i64* 2127 // store i64* a, i64 b 2128 // then the store should really be marked as 'align 1', but is not. If it 2129 // were marked as 'align 1' then the indexed form would have been 2130 // instruction-selected initially, and the problem this 'fixup' is preventing 2131 // won't happen regardless. 2132 if (FrameIdx < 0) 2133 return; 2134 2135 MachineFunction &MF = DAG.getMachineFunction(); 2136 MachineFrameInfo &MFI = MF.getFrameInfo(); 2137 2138 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2139 if (Align >= 4) 2140 return; 2141 2142 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2143 FuncInfo->setHasNonRISpills(); 2144 } 2145 2146 /// Returns true if the address N can be represented by a base register plus 2147 /// a signed 16-bit displacement [r+imm], and if it is not better 2148 /// represented as reg+reg. If \p Alignment is non-zero, only accept 2149 /// displacements that are multiples of that value. 2150 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2151 SDValue &Base, 2152 SelectionDAG &DAG, 2153 unsigned Alignment) const { 2154 // FIXME dl should come from parent load or store, not from address 2155 SDLoc dl(N); 2156 // If this can be more profitably realized as r+r, fail. 2157 if (SelectAddressRegReg(N, Disp, Base, DAG)) 2158 return false; 2159 2160 if (N.getOpcode() == ISD::ADD) { 2161 int16_t imm = 0; 2162 if (isIntS16Immediate(N.getOperand(1), imm) && 2163 (!Alignment || (imm % Alignment) == 0)) { 2164 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2165 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2166 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2167 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2168 } else { 2169 Base = N.getOperand(0); 2170 } 2171 return true; // [r+i] 2172 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2173 // Match LOAD (ADD (X, Lo(G))). 2174 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2175 && "Cannot handle constant offsets yet!"); 2176 Disp = N.getOperand(1).getOperand(0); // The global address. 2177 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2178 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2179 Disp.getOpcode() == ISD::TargetConstantPool || 2180 Disp.getOpcode() == ISD::TargetJumpTable); 2181 Base = N.getOperand(0); 2182 return true; // [&g+r] 2183 } 2184 } else if (N.getOpcode() == ISD::OR) { 2185 int16_t imm = 0; 2186 if (isIntS16Immediate(N.getOperand(1), imm) && 2187 (!Alignment || (imm % Alignment) == 0)) { 2188 // If this is an or of disjoint bitfields, we can codegen this as an add 2189 // (for better address arithmetic) if the LHS and RHS of the OR are 2190 // provably disjoint. 2191 KnownBits LHSKnown; 2192 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2193 2194 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2195 // If all of the bits are known zero on the LHS or RHS, the add won't 2196 // carry. 2197 if (FrameIndexSDNode *FI = 2198 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2199 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2200 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2201 } else { 2202 Base = N.getOperand(0); 2203 } 2204 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2205 return true; 2206 } 2207 } 2208 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2209 // Loading from a constant address. 2210 2211 // If this address fits entirely in a 16-bit sext immediate field, codegen 2212 // this as "d, 0" 2213 int16_t Imm; 2214 if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) { 2215 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2216 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2217 CN->getValueType(0)); 2218 return true; 2219 } 2220 2221 // Handle 32-bit sext immediates with LIS + addr mode. 2222 if ((CN->getValueType(0) == MVT::i32 || 2223 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2224 (!Alignment || (CN->getZExtValue() % Alignment) == 0)) { 2225 int Addr = (int)CN->getZExtValue(); 2226 2227 // Otherwise, break this down into an LIS + disp. 2228 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2229 2230 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2231 MVT::i32); 2232 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2233 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2234 return true; 2235 } 2236 } 2237 2238 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2239 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2240 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2241 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2242 } else 2243 Base = N; 2244 return true; // [r+0] 2245 } 2246 2247 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2248 /// represented as an indexed [r+r] operation. 2249 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2250 SDValue &Index, 2251 SelectionDAG &DAG) const { 2252 // Check to see if we can easily represent this as an [r+r] address. This 2253 // will fail if it thinks that the address is more profitably represented as 2254 // reg+imm, e.g. where imm = 0. 2255 if (SelectAddressRegReg(N, Base, Index, DAG)) 2256 return true; 2257 2258 // If the address is the result of an add, we will utilize the fact that the 2259 // address calculation includes an implicit add. However, we can reduce 2260 // register pressure if we do not materialize a constant just for use as the 2261 // index register. We only get rid of the add if it is not an add of a 2262 // value and a 16-bit signed constant and both have a single use. 2263 int16_t imm = 0; 2264 if (N.getOpcode() == ISD::ADD && 2265 (!isIntS16Immediate(N.getOperand(1), imm) || 2266 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2267 Base = N.getOperand(0); 2268 Index = N.getOperand(1); 2269 return true; 2270 } 2271 2272 // Otherwise, do it the hard way, using R0 as the base register. 2273 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2274 N.getValueType()); 2275 Index = N; 2276 return true; 2277 } 2278 2279 /// getPreIndexedAddressParts - returns true by value, base pointer and 2280 /// offset pointer and addressing mode by reference if the node's address 2281 /// can be legally represented as pre-indexed load / store address. 2282 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2283 SDValue &Offset, 2284 ISD::MemIndexedMode &AM, 2285 SelectionDAG &DAG) const { 2286 if (DisablePPCPreinc) return false; 2287 2288 bool isLoad = true; 2289 SDValue Ptr; 2290 EVT VT; 2291 unsigned Alignment; 2292 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2293 Ptr = LD->getBasePtr(); 2294 VT = LD->getMemoryVT(); 2295 Alignment = LD->getAlignment(); 2296 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2297 Ptr = ST->getBasePtr(); 2298 VT = ST->getMemoryVT(); 2299 Alignment = ST->getAlignment(); 2300 isLoad = false; 2301 } else 2302 return false; 2303 2304 // PowerPC doesn't have preinc load/store instructions for vectors (except 2305 // for QPX, which does have preinc r+r forms). 2306 if (VT.isVector()) { 2307 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2308 return false; 2309 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2310 AM = ISD::PRE_INC; 2311 return true; 2312 } 2313 } 2314 2315 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2316 // Common code will reject creating a pre-inc form if the base pointer 2317 // is a frame index, or if N is a store and the base pointer is either 2318 // the same as or a predecessor of the value being stored. Check for 2319 // those situations here, and try with swapped Base/Offset instead. 2320 bool Swap = false; 2321 2322 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2323 Swap = true; 2324 else if (!isLoad) { 2325 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2326 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2327 Swap = true; 2328 } 2329 2330 if (Swap) 2331 std::swap(Base, Offset); 2332 2333 AM = ISD::PRE_INC; 2334 return true; 2335 } 2336 2337 // LDU/STU can only handle immediates that are a multiple of 4. 2338 if (VT != MVT::i64) { 2339 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2340 return false; 2341 } else { 2342 // LDU/STU need an address with at least 4-byte alignment. 2343 if (Alignment < 4) 2344 return false; 2345 2346 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2347 return false; 2348 } 2349 2350 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2351 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2352 // sext i32 to i64 when addr mode is r+i. 2353 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2354 LD->getExtensionType() == ISD::SEXTLOAD && 2355 isa<ConstantSDNode>(Offset)) 2356 return false; 2357 } 2358 2359 AM = ISD::PRE_INC; 2360 return true; 2361 } 2362 2363 //===----------------------------------------------------------------------===// 2364 // LowerOperation implementation 2365 //===----------------------------------------------------------------------===// 2366 2367 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2368 /// and LoOpFlags to the target MO flags. 2369 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2370 unsigned &HiOpFlags, unsigned &LoOpFlags, 2371 const GlobalValue *GV = nullptr) { 2372 HiOpFlags = PPCII::MO_HA; 2373 LoOpFlags = PPCII::MO_LO; 2374 2375 // Don't use the pic base if not in PIC relocation model. 2376 if (IsPIC) { 2377 HiOpFlags |= PPCII::MO_PIC_FLAG; 2378 LoOpFlags |= PPCII::MO_PIC_FLAG; 2379 } 2380 2381 // If this is a reference to a global value that requires a non-lazy-ptr, make 2382 // sure that instruction lowering adds it. 2383 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2384 HiOpFlags |= PPCII::MO_NLP_FLAG; 2385 LoOpFlags |= PPCII::MO_NLP_FLAG; 2386 2387 if (GV->hasHiddenVisibility()) { 2388 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2389 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2390 } 2391 } 2392 } 2393 2394 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2395 SelectionDAG &DAG) { 2396 SDLoc DL(HiPart); 2397 EVT PtrVT = HiPart.getValueType(); 2398 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2399 2400 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2401 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2402 2403 // With PIC, the first instruction is actually "GR+hi(&G)". 2404 if (isPIC) 2405 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2406 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2407 2408 // Generate non-pic code that has direct accesses to the constant pool. 2409 // The address of the global is just (hi(&g)+lo(&g)). 2410 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2411 } 2412 2413 static void setUsesTOCBasePtr(MachineFunction &MF) { 2414 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2415 FuncInfo->setUsesTOCBasePtr(); 2416 } 2417 2418 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2419 setUsesTOCBasePtr(DAG.getMachineFunction()); 2420 } 2421 2422 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2423 SDValue GA) { 2424 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2425 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2426 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2427 2428 SDValue Ops[] = { GA, Reg }; 2429 return DAG.getMemIntrinsicNode( 2430 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2431 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2432 false, 0); 2433 } 2434 2435 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2436 SelectionDAG &DAG) const { 2437 EVT PtrVT = Op.getValueType(); 2438 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2439 const Constant *C = CP->getConstVal(); 2440 2441 // 64-bit SVR4 ABI code is always position-independent. 2442 // The actual address of the GlobalValue is stored in the TOC. 2443 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2444 setUsesTOCBasePtr(DAG); 2445 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2446 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2447 } 2448 2449 unsigned MOHiFlag, MOLoFlag; 2450 bool IsPIC = isPositionIndependent(); 2451 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2452 2453 if (IsPIC && Subtarget.isSVR4ABI()) { 2454 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2455 PPCII::MO_PIC_FLAG); 2456 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2457 } 2458 2459 SDValue CPIHi = 2460 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2461 SDValue CPILo = 2462 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2463 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2464 } 2465 2466 // For 64-bit PowerPC, prefer the more compact relative encodings. 2467 // This trades 32 bits per jump table entry for one or two instructions 2468 // on the jump site. 2469 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2470 if (isJumpTableRelative()) 2471 return MachineJumpTableInfo::EK_LabelDifference32; 2472 2473 return TargetLowering::getJumpTableEncoding(); 2474 } 2475 2476 bool PPCTargetLowering::isJumpTableRelative() const { 2477 if (Subtarget.isPPC64()) 2478 return true; 2479 return TargetLowering::isJumpTableRelative(); 2480 } 2481 2482 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2483 SelectionDAG &DAG) const { 2484 if (!Subtarget.isPPC64()) 2485 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2486 2487 switch (getTargetMachine().getCodeModel()) { 2488 case CodeModel::Small: 2489 case CodeModel::Medium: 2490 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2491 default: 2492 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2493 getPointerTy(DAG.getDataLayout())); 2494 } 2495 } 2496 2497 const MCExpr * 2498 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2499 unsigned JTI, 2500 MCContext &Ctx) const { 2501 if (!Subtarget.isPPC64()) 2502 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2503 2504 switch (getTargetMachine().getCodeModel()) { 2505 case CodeModel::Small: 2506 case CodeModel::Medium: 2507 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2508 default: 2509 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2510 } 2511 } 2512 2513 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2514 EVT PtrVT = Op.getValueType(); 2515 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2516 2517 // 64-bit SVR4 ABI code is always position-independent. 2518 // The actual address of the GlobalValue is stored in the TOC. 2519 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2520 setUsesTOCBasePtr(DAG); 2521 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2522 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2523 } 2524 2525 unsigned MOHiFlag, MOLoFlag; 2526 bool IsPIC = isPositionIndependent(); 2527 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2528 2529 if (IsPIC && Subtarget.isSVR4ABI()) { 2530 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2531 PPCII::MO_PIC_FLAG); 2532 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2533 } 2534 2535 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2536 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2537 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2538 } 2539 2540 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2541 SelectionDAG &DAG) const { 2542 EVT PtrVT = Op.getValueType(); 2543 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2544 const BlockAddress *BA = BASDN->getBlockAddress(); 2545 2546 // 64-bit SVR4 ABI code is always position-independent. 2547 // The actual BlockAddress is stored in the TOC. 2548 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2549 setUsesTOCBasePtr(DAG); 2550 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2551 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2552 } 2553 2554 unsigned MOHiFlag, MOLoFlag; 2555 bool IsPIC = isPositionIndependent(); 2556 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2557 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2558 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2559 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2560 } 2561 2562 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2563 SelectionDAG &DAG) const { 2564 // FIXME: TLS addresses currently use medium model code sequences, 2565 // which is the most useful form. Eventually support for small and 2566 // large models could be added if users need it, at the cost of 2567 // additional complexity. 2568 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2569 if (DAG.getTarget().Options.EmulatedTLS) 2570 return LowerToTLSEmulatedModel(GA, DAG); 2571 2572 SDLoc dl(GA); 2573 const GlobalValue *GV = GA->getGlobal(); 2574 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2575 bool is64bit = Subtarget.isPPC64(); 2576 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2577 PICLevel::Level picLevel = M->getPICLevel(); 2578 2579 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2580 2581 if (Model == TLSModel::LocalExec) { 2582 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2583 PPCII::MO_TPREL_HA); 2584 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2585 PPCII::MO_TPREL_LO); 2586 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2587 : DAG.getRegister(PPC::R2, MVT::i32); 2588 2589 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2590 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2591 } 2592 2593 if (Model == TLSModel::InitialExec) { 2594 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2595 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2596 PPCII::MO_TLS); 2597 SDValue GOTPtr; 2598 if (is64bit) { 2599 setUsesTOCBasePtr(DAG); 2600 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2601 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2602 PtrVT, GOTReg, TGA); 2603 } else 2604 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2605 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2606 PtrVT, TGA, GOTPtr); 2607 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2608 } 2609 2610 if (Model == TLSModel::GeneralDynamic) { 2611 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2612 SDValue GOTPtr; 2613 if (is64bit) { 2614 setUsesTOCBasePtr(DAG); 2615 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2616 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2617 GOTReg, TGA); 2618 } else { 2619 if (picLevel == PICLevel::SmallPIC) 2620 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2621 else 2622 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2623 } 2624 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2625 GOTPtr, TGA, TGA); 2626 } 2627 2628 if (Model == TLSModel::LocalDynamic) { 2629 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2630 SDValue GOTPtr; 2631 if (is64bit) { 2632 setUsesTOCBasePtr(DAG); 2633 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2634 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2635 GOTReg, TGA); 2636 } else { 2637 if (picLevel == PICLevel::SmallPIC) 2638 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2639 else 2640 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2641 } 2642 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2643 PtrVT, GOTPtr, TGA, TGA); 2644 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2645 PtrVT, TLSAddr, TGA); 2646 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2647 } 2648 2649 llvm_unreachable("Unknown TLS model!"); 2650 } 2651 2652 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2653 SelectionDAG &DAG) const { 2654 EVT PtrVT = Op.getValueType(); 2655 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2656 SDLoc DL(GSDN); 2657 const GlobalValue *GV = GSDN->getGlobal(); 2658 2659 // 64-bit SVR4 ABI code is always position-independent. 2660 // The actual address of the GlobalValue is stored in the TOC. 2661 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2662 setUsesTOCBasePtr(DAG); 2663 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2664 return getTOCEntry(DAG, DL, true, GA); 2665 } 2666 2667 unsigned MOHiFlag, MOLoFlag; 2668 bool IsPIC = isPositionIndependent(); 2669 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2670 2671 if (IsPIC && Subtarget.isSVR4ABI()) { 2672 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2673 GSDN->getOffset(), 2674 PPCII::MO_PIC_FLAG); 2675 return getTOCEntry(DAG, DL, false, GA); 2676 } 2677 2678 SDValue GAHi = 2679 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2680 SDValue GALo = 2681 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2682 2683 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2684 2685 // If the global reference is actually to a non-lazy-pointer, we have to do an 2686 // extra load to get the address of the global. 2687 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2688 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2689 return Ptr; 2690 } 2691 2692 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2693 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2694 SDLoc dl(Op); 2695 2696 if (Op.getValueType() == MVT::v2i64) { 2697 // When the operands themselves are v2i64 values, we need to do something 2698 // special because VSX has no underlying comparison operations for these. 2699 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2700 // Equality can be handled by casting to the legal type for Altivec 2701 // comparisons, everything else needs to be expanded. 2702 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2703 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2704 DAG.getSetCC(dl, MVT::v4i32, 2705 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2706 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2707 CC)); 2708 } 2709 2710 return SDValue(); 2711 } 2712 2713 // We handle most of these in the usual way. 2714 return Op; 2715 } 2716 2717 // If we're comparing for equality to zero, expose the fact that this is 2718 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2719 // fold the new nodes. 2720 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 2721 return V; 2722 2723 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2724 // Leave comparisons against 0 and -1 alone for now, since they're usually 2725 // optimized. FIXME: revisit this when we can custom lower all setcc 2726 // optimizations. 2727 if (C->isAllOnesValue() || C->isNullValue()) 2728 return SDValue(); 2729 } 2730 2731 // If we have an integer seteq/setne, turn it into a compare against zero 2732 // by xor'ing the rhs with the lhs, which is faster than setting a 2733 // condition register, reading it back out, and masking the correct bit. The 2734 // normal approach here uses sub to do this instead of xor. Using xor exposes 2735 // the result to other bit-twiddling opportunities. 2736 EVT LHSVT = Op.getOperand(0).getValueType(); 2737 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2738 EVT VT = Op.getValueType(); 2739 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2740 Op.getOperand(1)); 2741 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2742 } 2743 return SDValue(); 2744 } 2745 2746 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2747 SDNode *Node = Op.getNode(); 2748 EVT VT = Node->getValueType(0); 2749 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2750 SDValue InChain = Node->getOperand(0); 2751 SDValue VAListPtr = Node->getOperand(1); 2752 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2753 SDLoc dl(Node); 2754 2755 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2756 2757 // gpr_index 2758 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2759 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2760 InChain = GprIndex.getValue(1); 2761 2762 if (VT == MVT::i64) { 2763 // Check if GprIndex is even 2764 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2765 DAG.getConstant(1, dl, MVT::i32)); 2766 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2767 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2768 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2769 DAG.getConstant(1, dl, MVT::i32)); 2770 // Align GprIndex to be even if it isn't 2771 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2772 GprIndex); 2773 } 2774 2775 // fpr index is 1 byte after gpr 2776 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2777 DAG.getConstant(1, dl, MVT::i32)); 2778 2779 // fpr 2780 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2781 FprPtr, MachinePointerInfo(SV), MVT::i8); 2782 InChain = FprIndex.getValue(1); 2783 2784 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2785 DAG.getConstant(8, dl, MVT::i32)); 2786 2787 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2788 DAG.getConstant(4, dl, MVT::i32)); 2789 2790 // areas 2791 SDValue OverflowArea = 2792 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2793 InChain = OverflowArea.getValue(1); 2794 2795 SDValue RegSaveArea = 2796 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2797 InChain = RegSaveArea.getValue(1); 2798 2799 // select overflow_area if index > 8 2800 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2801 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2802 2803 // adjustment constant gpr_index * 4/8 2804 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2805 VT.isInteger() ? GprIndex : FprIndex, 2806 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2807 MVT::i32)); 2808 2809 // OurReg = RegSaveArea + RegConstant 2810 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2811 RegConstant); 2812 2813 // Floating types are 32 bytes into RegSaveArea 2814 if (VT.isFloatingPoint()) 2815 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2816 DAG.getConstant(32, dl, MVT::i32)); 2817 2818 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2819 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2820 VT.isInteger() ? GprIndex : FprIndex, 2821 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2822 MVT::i32)); 2823 2824 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2825 VT.isInteger() ? VAListPtr : FprPtr, 2826 MachinePointerInfo(SV), MVT::i8); 2827 2828 // determine if we should load from reg_save_area or overflow_area 2829 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2830 2831 // increase overflow_area by 4/8 if gpr/fpr > 8 2832 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2833 DAG.getConstant(VT.isInteger() ? 4 : 8, 2834 dl, MVT::i32)); 2835 2836 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2837 OverflowAreaPlusN); 2838 2839 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 2840 MachinePointerInfo(), MVT::i32); 2841 2842 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 2843 } 2844 2845 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2846 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2847 2848 // We have to copy the entire va_list struct: 2849 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2850 return DAG.getMemcpy(Op.getOperand(0), Op, 2851 Op.getOperand(1), Op.getOperand(2), 2852 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2853 false, MachinePointerInfo(), MachinePointerInfo()); 2854 } 2855 2856 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2857 SelectionDAG &DAG) const { 2858 return Op.getOperand(0); 2859 } 2860 2861 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2862 SelectionDAG &DAG) const { 2863 SDValue Chain = Op.getOperand(0); 2864 SDValue Trmp = Op.getOperand(1); // trampoline 2865 SDValue FPtr = Op.getOperand(2); // nested function 2866 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2867 SDLoc dl(Op); 2868 2869 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2870 bool isPPC64 = (PtrVT == MVT::i64); 2871 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2872 2873 TargetLowering::ArgListTy Args; 2874 TargetLowering::ArgListEntry Entry; 2875 2876 Entry.Ty = IntPtrTy; 2877 Entry.Node = Trmp; Args.push_back(Entry); 2878 2879 // TrampSize == (isPPC64 ? 48 : 40); 2880 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2881 isPPC64 ? MVT::i64 : MVT::i32); 2882 Args.push_back(Entry); 2883 2884 Entry.Node = FPtr; Args.push_back(Entry); 2885 Entry.Node = Nest; Args.push_back(Entry); 2886 2887 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2888 TargetLowering::CallLoweringInfo CLI(DAG); 2889 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 2890 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2891 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 2892 2893 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2894 return CallResult.second; 2895 } 2896 2897 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2898 MachineFunction &MF = DAG.getMachineFunction(); 2899 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2900 EVT PtrVT = getPointerTy(MF.getDataLayout()); 2901 2902 SDLoc dl(Op); 2903 2904 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2905 // vastart just stores the address of the VarArgsFrameIndex slot into the 2906 // memory location argument. 2907 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2908 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2909 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2910 MachinePointerInfo(SV)); 2911 } 2912 2913 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2914 // We suppose the given va_list is already allocated. 2915 // 2916 // typedef struct { 2917 // char gpr; /* index into the array of 8 GPRs 2918 // * stored in the register save area 2919 // * gpr=0 corresponds to r3, 2920 // * gpr=1 to r4, etc. 2921 // */ 2922 // char fpr; /* index into the array of 8 FPRs 2923 // * stored in the register save area 2924 // * fpr=0 corresponds to f1, 2925 // * fpr=1 to f2, etc. 2926 // */ 2927 // char *overflow_arg_area; 2928 // /* location on stack that holds 2929 // * the next overflow argument 2930 // */ 2931 // char *reg_save_area; 2932 // /* where r3:r10 and f1:f8 (if saved) 2933 // * are stored 2934 // */ 2935 // } va_list[1]; 2936 2937 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2938 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2939 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2940 PtrVT); 2941 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2942 PtrVT); 2943 2944 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2945 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2946 2947 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2948 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2949 2950 uint64_t FPROffset = 1; 2951 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2952 2953 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2954 2955 // Store first byte : number of int regs 2956 SDValue firstStore = 2957 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 2958 MachinePointerInfo(SV), MVT::i8); 2959 uint64_t nextOffset = FPROffset; 2960 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2961 ConstFPROffset); 2962 2963 // Store second byte : number of float regs 2964 SDValue secondStore = 2965 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2966 MachinePointerInfo(SV, nextOffset), MVT::i8); 2967 nextOffset += StackOffset; 2968 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2969 2970 // Store second word : arguments given on stack 2971 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2972 MachinePointerInfo(SV, nextOffset)); 2973 nextOffset += FrameOffset; 2974 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2975 2976 // Store third word : arguments given in registers 2977 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2978 MachinePointerInfo(SV, nextOffset)); 2979 } 2980 2981 #include "PPCGenCallingConv.inc" 2982 2983 // Function whose sole purpose is to kill compiler warnings 2984 // stemming from unused functions included from PPCGenCallingConv.inc. 2985 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2986 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2987 } 2988 2989 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2990 CCValAssign::LocInfo &LocInfo, 2991 ISD::ArgFlagsTy &ArgFlags, 2992 CCState &State) { 2993 return true; 2994 } 2995 2996 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2997 MVT &LocVT, 2998 CCValAssign::LocInfo &LocInfo, 2999 ISD::ArgFlagsTy &ArgFlags, 3000 CCState &State) { 3001 static const MCPhysReg ArgRegs[] = { 3002 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3003 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3004 }; 3005 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3006 3007 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3008 3009 // Skip one register if the first unallocated register has an even register 3010 // number and there are still argument registers available which have not been 3011 // allocated yet. RegNum is actually an index into ArgRegs, which means we 3012 // need to skip a register if RegNum is odd. 3013 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 3014 State.AllocateReg(ArgRegs[RegNum]); 3015 } 3016 3017 // Always return false here, as this function only makes sure that the first 3018 // unallocated register has an odd register number and does not actually 3019 // allocate a register for the current argument. 3020 return false; 3021 } 3022 3023 bool 3024 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 3025 MVT &LocVT, 3026 CCValAssign::LocInfo &LocInfo, 3027 ISD::ArgFlagsTy &ArgFlags, 3028 CCState &State) { 3029 static const MCPhysReg ArgRegs[] = { 3030 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3031 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3032 }; 3033 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3034 3035 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3036 int RegsLeft = NumArgRegs - RegNum; 3037 3038 // Skip if there is not enough registers left for long double type (4 gpr regs 3039 // in soft float mode) and put long double argument on the stack. 3040 if (RegNum != NumArgRegs && RegsLeft < 4) { 3041 for (int i = 0; i < RegsLeft; i++) { 3042 State.AllocateReg(ArgRegs[RegNum + i]); 3043 } 3044 } 3045 3046 return false; 3047 } 3048 3049 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 3050 MVT &LocVT, 3051 CCValAssign::LocInfo &LocInfo, 3052 ISD::ArgFlagsTy &ArgFlags, 3053 CCState &State) { 3054 static const MCPhysReg ArgRegs[] = { 3055 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3056 PPC::F8 3057 }; 3058 3059 const unsigned NumArgRegs = array_lengthof(ArgRegs); 3060 3061 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 3062 3063 // If there is only one Floating-point register left we need to put both f64 3064 // values of a split ppc_fp128 value on the stack. 3065 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 3066 State.AllocateReg(ArgRegs[RegNum]); 3067 } 3068 3069 // Always return false here, as this function only makes sure that the two f64 3070 // values a ppc_fp128 value is split into are both passed in registers or both 3071 // passed on the stack and does not actually allocate a register for the 3072 // current argument. 3073 return false; 3074 } 3075 3076 /// FPR - The set of FP registers that should be allocated for arguments, 3077 /// on Darwin. 3078 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3079 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3080 PPC::F11, PPC::F12, PPC::F13}; 3081 3082 /// QFPR - The set of QPX registers that should be allocated for arguments. 3083 static const MCPhysReg QFPR[] = { 3084 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3085 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3086 3087 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3088 /// the stack. 3089 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3090 unsigned PtrByteSize) { 3091 unsigned ArgSize = ArgVT.getStoreSize(); 3092 if (Flags.isByVal()) 3093 ArgSize = Flags.getByValSize(); 3094 3095 // Round up to multiples of the pointer size, except for array members, 3096 // which are always packed. 3097 if (!Flags.isInConsecutiveRegs()) 3098 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3099 3100 return ArgSize; 3101 } 3102 3103 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3104 /// on the stack. 3105 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3106 ISD::ArgFlagsTy Flags, 3107 unsigned PtrByteSize) { 3108 unsigned Align = PtrByteSize; 3109 3110 // Altivec parameters are padded to a 16 byte boundary. 3111 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3112 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3113 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3114 ArgVT == MVT::v1i128) 3115 Align = 16; 3116 // QPX vector types stored in double-precision are padded to a 32 byte 3117 // boundary. 3118 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3119 Align = 32; 3120 3121 // ByVal parameters are aligned as requested. 3122 if (Flags.isByVal()) { 3123 unsigned BVAlign = Flags.getByValAlign(); 3124 if (BVAlign > PtrByteSize) { 3125 if (BVAlign % PtrByteSize != 0) 3126 llvm_unreachable( 3127 "ByVal alignment is not a multiple of the pointer size"); 3128 3129 Align = BVAlign; 3130 } 3131 } 3132 3133 // Array members are always packed to their original alignment. 3134 if (Flags.isInConsecutiveRegs()) { 3135 // If the array member was split into multiple registers, the first 3136 // needs to be aligned to the size of the full type. (Except for 3137 // ppcf128, which is only aligned as its f64 components.) 3138 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3139 Align = OrigVT.getStoreSize(); 3140 else 3141 Align = ArgVT.getStoreSize(); 3142 } 3143 3144 return Align; 3145 } 3146 3147 /// CalculateStackSlotUsed - Return whether this argument will use its 3148 /// stack slot (instead of being passed in registers). ArgOffset, 3149 /// AvailableFPRs, and AvailableVRs must hold the current argument 3150 /// position, and will be updated to account for this argument. 3151 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3152 ISD::ArgFlagsTy Flags, 3153 unsigned PtrByteSize, 3154 unsigned LinkageSize, 3155 unsigned ParamAreaSize, 3156 unsigned &ArgOffset, 3157 unsigned &AvailableFPRs, 3158 unsigned &AvailableVRs, bool HasQPX) { 3159 bool UseMemory = false; 3160 3161 // Respect alignment of argument on the stack. 3162 unsigned Align = 3163 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3164 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3165 // If there's no space left in the argument save area, we must 3166 // use memory (this check also catches zero-sized arguments). 3167 if (ArgOffset >= LinkageSize + ParamAreaSize) 3168 UseMemory = true; 3169 3170 // Allocate argument on the stack. 3171 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3172 if (Flags.isInConsecutiveRegsLast()) 3173 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3174 // If we overran the argument save area, we must use memory 3175 // (this check catches arguments passed partially in memory) 3176 if (ArgOffset > LinkageSize + ParamAreaSize) 3177 UseMemory = true; 3178 3179 // However, if the argument is actually passed in an FPR or a VR, 3180 // we don't use memory after all. 3181 if (!Flags.isByVal()) { 3182 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3183 // QPX registers overlap with the scalar FP registers. 3184 (HasQPX && (ArgVT == MVT::v4f32 || 3185 ArgVT == MVT::v4f64 || 3186 ArgVT == MVT::v4i1))) 3187 if (AvailableFPRs > 0) { 3188 --AvailableFPRs; 3189 return false; 3190 } 3191 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3192 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3193 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3194 ArgVT == MVT::v1i128) 3195 if (AvailableVRs > 0) { 3196 --AvailableVRs; 3197 return false; 3198 } 3199 } 3200 3201 return UseMemory; 3202 } 3203 3204 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3205 /// ensure minimum alignment required for target. 3206 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3207 unsigned NumBytes) { 3208 unsigned TargetAlign = Lowering->getStackAlignment(); 3209 unsigned AlignMask = TargetAlign - 1; 3210 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3211 return NumBytes; 3212 } 3213 3214 SDValue PPCTargetLowering::LowerFormalArguments( 3215 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3216 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3217 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3218 if (Subtarget.isSVR4ABI()) { 3219 if (Subtarget.isPPC64()) 3220 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 3221 dl, DAG, InVals); 3222 else 3223 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 3224 dl, DAG, InVals); 3225 } else { 3226 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 3227 dl, DAG, InVals); 3228 } 3229 } 3230 3231 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3232 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3233 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3234 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3235 3236 // 32-bit SVR4 ABI Stack Frame Layout: 3237 // +-----------------------------------+ 3238 // +--> | Back chain | 3239 // | +-----------------------------------+ 3240 // | | Floating-point register save area | 3241 // | +-----------------------------------+ 3242 // | | General register save area | 3243 // | +-----------------------------------+ 3244 // | | CR save word | 3245 // | +-----------------------------------+ 3246 // | | VRSAVE save word | 3247 // | +-----------------------------------+ 3248 // | | Alignment padding | 3249 // | +-----------------------------------+ 3250 // | | Vector register save area | 3251 // | +-----------------------------------+ 3252 // | | Local variable space | 3253 // | +-----------------------------------+ 3254 // | | Parameter list area | 3255 // | +-----------------------------------+ 3256 // | | LR save word | 3257 // | +-----------------------------------+ 3258 // SP--> +--- | Back chain | 3259 // +-----------------------------------+ 3260 // 3261 // Specifications: 3262 // System V Application Binary Interface PowerPC Processor Supplement 3263 // AltiVec Technology Programming Interface Manual 3264 3265 MachineFunction &MF = DAG.getMachineFunction(); 3266 MachineFrameInfo &MFI = MF.getFrameInfo(); 3267 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3268 3269 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3270 // Potential tail calls could cause overwriting of argument stack slots. 3271 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3272 (CallConv == CallingConv::Fast)); 3273 unsigned PtrByteSize = 4; 3274 3275 // Assign locations to all of the incoming arguments. 3276 SmallVector<CCValAssign, 16> ArgLocs; 3277 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3278 *DAG.getContext()); 3279 3280 // Reserve space for the linkage area on the stack. 3281 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3282 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3283 if (useSoftFloat()) 3284 CCInfo.PreAnalyzeFormalArguments(Ins); 3285 3286 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3287 CCInfo.clearWasPPCF128(); 3288 3289 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3290 CCValAssign &VA = ArgLocs[i]; 3291 3292 // Arguments stored in registers. 3293 if (VA.isRegLoc()) { 3294 const TargetRegisterClass *RC; 3295 EVT ValVT = VA.getValVT(); 3296 3297 switch (ValVT.getSimpleVT().SimpleTy) { 3298 default: 3299 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3300 case MVT::i1: 3301 case MVT::i32: 3302 RC = &PPC::GPRCRegClass; 3303 break; 3304 case MVT::f32: 3305 if (Subtarget.hasP8Vector()) 3306 RC = &PPC::VSSRCRegClass; 3307 else 3308 RC = &PPC::F4RCRegClass; 3309 break; 3310 case MVT::f64: 3311 if (Subtarget.hasVSX()) 3312 RC = &PPC::VSFRCRegClass; 3313 else 3314 RC = &PPC::F8RCRegClass; 3315 break; 3316 case MVT::v16i8: 3317 case MVT::v8i16: 3318 case MVT::v4i32: 3319 RC = &PPC::VRRCRegClass; 3320 break; 3321 case MVT::v4f32: 3322 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3323 break; 3324 case MVT::v2f64: 3325 case MVT::v2i64: 3326 RC = &PPC::VRRCRegClass; 3327 break; 3328 case MVT::v4f64: 3329 RC = &PPC::QFRCRegClass; 3330 break; 3331 case MVT::v4i1: 3332 RC = &PPC::QBRCRegClass; 3333 break; 3334 } 3335 3336 // Transform the arguments stored in physical registers into virtual ones. 3337 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3338 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3339 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3340 3341 if (ValVT == MVT::i1) 3342 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3343 3344 InVals.push_back(ArgValue); 3345 } else { 3346 // Argument stored in memory. 3347 assert(VA.isMemLoc()); 3348 3349 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3350 int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(), 3351 isImmutable); 3352 3353 // Create load nodes to retrieve arguments from the stack. 3354 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3355 InVals.push_back( 3356 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3357 } 3358 } 3359 3360 // Assign locations to all of the incoming aggregate by value arguments. 3361 // Aggregates passed by value are stored in the local variable space of the 3362 // caller's stack frame, right above the parameter list area. 3363 SmallVector<CCValAssign, 16> ByValArgLocs; 3364 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3365 ByValArgLocs, *DAG.getContext()); 3366 3367 // Reserve stack space for the allocations in CCInfo. 3368 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3369 3370 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3371 3372 // Area that is at least reserved in the caller of this function. 3373 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3374 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3375 3376 // Set the size that is at least reserved in caller of this function. Tail 3377 // call optimized function's reserved stack space needs to be aligned so that 3378 // taking the difference between two stack areas will result in an aligned 3379 // stack. 3380 MinReservedArea = 3381 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3382 FuncInfo->setMinReservedArea(MinReservedArea); 3383 3384 SmallVector<SDValue, 8> MemOps; 3385 3386 // If the function takes variable number of arguments, make a frame index for 3387 // the start of the first vararg value... for expansion of llvm.va_start. 3388 if (isVarArg) { 3389 static const MCPhysReg GPArgRegs[] = { 3390 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3391 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3392 }; 3393 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3394 3395 static const MCPhysReg FPArgRegs[] = { 3396 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3397 PPC::F8 3398 }; 3399 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3400 3401 if (useSoftFloat()) 3402 NumFPArgRegs = 0; 3403 3404 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3405 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3406 3407 // Make room for NumGPArgRegs and NumFPArgRegs. 3408 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3409 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3410 3411 FuncInfo->setVarArgsStackOffset( 3412 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3413 CCInfo.getNextStackOffset(), true)); 3414 3415 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3416 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3417 3418 // The fixed integer arguments of a variadic function are stored to the 3419 // VarArgsFrameIndex on the stack so that they may be loaded by 3420 // dereferencing the result of va_next. 3421 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3422 // Get an existing live-in vreg, or add a new one. 3423 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3424 if (!VReg) 3425 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3426 3427 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3428 SDValue Store = 3429 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3430 MemOps.push_back(Store); 3431 // Increment the address by four for the next argument to store 3432 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3433 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3434 } 3435 3436 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3437 // is set. 3438 // The double arguments are stored to the VarArgsFrameIndex 3439 // on the stack. 3440 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3441 // Get an existing live-in vreg, or add a new one. 3442 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3443 if (!VReg) 3444 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3445 3446 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3447 SDValue Store = 3448 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3449 MemOps.push_back(Store); 3450 // Increment the address by eight for the next argument to store 3451 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3452 PtrVT); 3453 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3454 } 3455 } 3456 3457 if (!MemOps.empty()) 3458 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3459 3460 return Chain; 3461 } 3462 3463 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3464 // value to MVT::i64 and then truncate to the correct register size. 3465 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3466 EVT ObjectVT, SelectionDAG &DAG, 3467 SDValue ArgVal, 3468 const SDLoc &dl) const { 3469 if (Flags.isSExt()) 3470 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3471 DAG.getValueType(ObjectVT)); 3472 else if (Flags.isZExt()) 3473 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3474 DAG.getValueType(ObjectVT)); 3475 3476 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3477 } 3478 3479 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3480 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3481 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3482 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3483 // TODO: add description of PPC stack frame format, or at least some docs. 3484 // 3485 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3486 bool isLittleEndian = Subtarget.isLittleEndian(); 3487 MachineFunction &MF = DAG.getMachineFunction(); 3488 MachineFrameInfo &MFI = MF.getFrameInfo(); 3489 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3490 3491 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3492 "fastcc not supported on varargs functions"); 3493 3494 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3495 // Potential tail calls could cause overwriting of argument stack slots. 3496 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3497 (CallConv == CallingConv::Fast)); 3498 unsigned PtrByteSize = 8; 3499 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3500 3501 static const MCPhysReg GPR[] = { 3502 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3503 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3504 }; 3505 static const MCPhysReg VR[] = { 3506 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3507 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3508 }; 3509 3510 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3511 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3512 const unsigned Num_VR_Regs = array_lengthof(VR); 3513 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3514 3515 // Do a first pass over the arguments to determine whether the ABI 3516 // guarantees that our caller has allocated the parameter save area 3517 // on its stack frame. In the ELFv1 ABI, this is always the case; 3518 // in the ELFv2 ABI, it is true if this is a vararg function or if 3519 // any parameter is located in a stack slot. 3520 3521 bool HasParameterArea = !isELFv2ABI || isVarArg; 3522 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3523 unsigned NumBytes = LinkageSize; 3524 unsigned AvailableFPRs = Num_FPR_Regs; 3525 unsigned AvailableVRs = Num_VR_Regs; 3526 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3527 if (Ins[i].Flags.isNest()) 3528 continue; 3529 3530 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3531 PtrByteSize, LinkageSize, ParamAreaSize, 3532 NumBytes, AvailableFPRs, AvailableVRs, 3533 Subtarget.hasQPX())) 3534 HasParameterArea = true; 3535 } 3536 3537 // Add DAG nodes to load the arguments or copy them out of registers. On 3538 // entry to a function on PPC, the arguments start after the linkage area, 3539 // although the first ones are often in registers. 3540 3541 unsigned ArgOffset = LinkageSize; 3542 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3543 unsigned &QFPR_idx = FPR_idx; 3544 SmallVector<SDValue, 8> MemOps; 3545 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3546 unsigned CurArgIdx = 0; 3547 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3548 SDValue ArgVal; 3549 bool needsLoad = false; 3550 EVT ObjectVT = Ins[ArgNo].VT; 3551 EVT OrigVT = Ins[ArgNo].ArgVT; 3552 unsigned ObjSize = ObjectVT.getStoreSize(); 3553 unsigned ArgSize = ObjSize; 3554 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3555 if (Ins[ArgNo].isOrigArg()) { 3556 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3557 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3558 } 3559 // We re-align the argument offset for each argument, except when using the 3560 // fast calling convention, when we need to make sure we do that only when 3561 // we'll actually use a stack slot. 3562 unsigned CurArgOffset, Align; 3563 auto ComputeArgOffset = [&]() { 3564 /* Respect alignment of argument on the stack. */ 3565 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3566 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3567 CurArgOffset = ArgOffset; 3568 }; 3569 3570 if (CallConv != CallingConv::Fast) { 3571 ComputeArgOffset(); 3572 3573 /* Compute GPR index associated with argument offset. */ 3574 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3575 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3576 } 3577 3578 // FIXME the codegen can be much improved in some cases. 3579 // We do not have to keep everything in memory. 3580 if (Flags.isByVal()) { 3581 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3582 3583 if (CallConv == CallingConv::Fast) 3584 ComputeArgOffset(); 3585 3586 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3587 ObjSize = Flags.getByValSize(); 3588 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3589 // Empty aggregate parameters do not take up registers. Examples: 3590 // struct { } a; 3591 // union { } b; 3592 // int c[0]; 3593 // etc. However, we have to provide a place-holder in InVals, so 3594 // pretend we have an 8-byte item at the current address for that 3595 // purpose. 3596 if (!ObjSize) { 3597 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3598 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3599 InVals.push_back(FIN); 3600 continue; 3601 } 3602 3603 // Create a stack object covering all stack doublewords occupied 3604 // by the argument. If the argument is (fully or partially) on 3605 // the stack, or if the argument is fully in registers but the 3606 // caller has allocated the parameter save anyway, we can refer 3607 // directly to the caller's stack frame. Otherwise, create a 3608 // local copy in our own frame. 3609 int FI; 3610 if (HasParameterArea || 3611 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3612 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3613 else 3614 FI = MFI.CreateStackObject(ArgSize, Align, false); 3615 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3616 3617 // Handle aggregates smaller than 8 bytes. 3618 if (ObjSize < PtrByteSize) { 3619 // The value of the object is its address, which differs from the 3620 // address of the enclosing doubleword on big-endian systems. 3621 SDValue Arg = FIN; 3622 if (!isLittleEndian) { 3623 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3624 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3625 } 3626 InVals.push_back(Arg); 3627 3628 if (GPR_idx != Num_GPR_Regs) { 3629 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3630 FuncInfo->addLiveInAttr(VReg, Flags); 3631 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3632 SDValue Store; 3633 3634 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3635 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3636 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3637 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3638 MachinePointerInfo(&*FuncArg), ObjType); 3639 } else { 3640 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3641 // store the whole register as-is to the parameter save area 3642 // slot. 3643 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3644 MachinePointerInfo(&*FuncArg)); 3645 } 3646 3647 MemOps.push_back(Store); 3648 } 3649 // Whether we copied from a register or not, advance the offset 3650 // into the parameter save area by a full doubleword. 3651 ArgOffset += PtrByteSize; 3652 continue; 3653 } 3654 3655 // The value of the object is its address, which is the address of 3656 // its first stack doubleword. 3657 InVals.push_back(FIN); 3658 3659 // Store whatever pieces of the object are in registers to memory. 3660 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3661 if (GPR_idx == Num_GPR_Regs) 3662 break; 3663 3664 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3665 FuncInfo->addLiveInAttr(VReg, Flags); 3666 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3667 SDValue Addr = FIN; 3668 if (j) { 3669 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3670 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3671 } 3672 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3673 MachinePointerInfo(&*FuncArg, j)); 3674 MemOps.push_back(Store); 3675 ++GPR_idx; 3676 } 3677 ArgOffset += ArgSize; 3678 continue; 3679 } 3680 3681 switch (ObjectVT.getSimpleVT().SimpleTy) { 3682 default: llvm_unreachable("Unhandled argument type!"); 3683 case MVT::i1: 3684 case MVT::i32: 3685 case MVT::i64: 3686 if (Flags.isNest()) { 3687 // The 'nest' parameter, if any, is passed in R11. 3688 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3689 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3690 3691 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3692 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3693 3694 break; 3695 } 3696 3697 // These can be scalar arguments or elements of an integer array type 3698 // passed directly. Clang may use those instead of "byval" aggregate 3699 // types to avoid forcing arguments to memory unnecessarily. 3700 if (GPR_idx != Num_GPR_Regs) { 3701 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3702 FuncInfo->addLiveInAttr(VReg, Flags); 3703 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3704 3705 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3706 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3707 // value to MVT::i64 and then truncate to the correct register size. 3708 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3709 } else { 3710 if (CallConv == CallingConv::Fast) 3711 ComputeArgOffset(); 3712 3713 needsLoad = true; 3714 ArgSize = PtrByteSize; 3715 } 3716 if (CallConv != CallingConv::Fast || needsLoad) 3717 ArgOffset += 8; 3718 break; 3719 3720 case MVT::f32: 3721 case MVT::f64: 3722 // These can be scalar arguments or elements of a float array type 3723 // passed directly. The latter are used to implement ELFv2 homogenous 3724 // float aggregates. 3725 if (FPR_idx != Num_FPR_Regs) { 3726 unsigned VReg; 3727 3728 if (ObjectVT == MVT::f32) 3729 VReg = MF.addLiveIn(FPR[FPR_idx], 3730 Subtarget.hasP8Vector() 3731 ? &PPC::VSSRCRegClass 3732 : &PPC::F4RCRegClass); 3733 else 3734 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3735 ? &PPC::VSFRCRegClass 3736 : &PPC::F8RCRegClass); 3737 3738 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3739 ++FPR_idx; 3740 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3741 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3742 // once we support fp <-> gpr moves. 3743 3744 // This can only ever happen in the presence of f32 array types, 3745 // since otherwise we never run out of FPRs before running out 3746 // of GPRs. 3747 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3748 FuncInfo->addLiveInAttr(VReg, Flags); 3749 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3750 3751 if (ObjectVT == MVT::f32) { 3752 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3753 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3754 DAG.getConstant(32, dl, MVT::i32)); 3755 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3756 } 3757 3758 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3759 } else { 3760 if (CallConv == CallingConv::Fast) 3761 ComputeArgOffset(); 3762 3763 needsLoad = true; 3764 } 3765 3766 // When passing an array of floats, the array occupies consecutive 3767 // space in the argument area; only round up to the next doubleword 3768 // at the end of the array. Otherwise, each float takes 8 bytes. 3769 if (CallConv != CallingConv::Fast || needsLoad) { 3770 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3771 ArgOffset += ArgSize; 3772 if (Flags.isInConsecutiveRegsLast()) 3773 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3774 } 3775 break; 3776 case MVT::v4f32: 3777 case MVT::v4i32: 3778 case MVT::v8i16: 3779 case MVT::v16i8: 3780 case MVT::v2f64: 3781 case MVT::v2i64: 3782 case MVT::v1i128: 3783 if (!Subtarget.hasQPX()) { 3784 // These can be scalar arguments or elements of a vector array type 3785 // passed directly. The latter are used to implement ELFv2 homogenous 3786 // vector aggregates. 3787 if (VR_idx != Num_VR_Regs) { 3788 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3789 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3790 ++VR_idx; 3791 } else { 3792 if (CallConv == CallingConv::Fast) 3793 ComputeArgOffset(); 3794 3795 needsLoad = true; 3796 } 3797 if (CallConv != CallingConv::Fast || needsLoad) 3798 ArgOffset += 16; 3799 break; 3800 } // not QPX 3801 3802 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3803 "Invalid QPX parameter type"); 3804 /* fall through */ 3805 3806 case MVT::v4f64: 3807 case MVT::v4i1: 3808 // QPX vectors are treated like their scalar floating-point subregisters 3809 // (except that they're larger). 3810 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3811 if (QFPR_idx != Num_QFPR_Regs) { 3812 const TargetRegisterClass *RC; 3813 switch (ObjectVT.getSimpleVT().SimpleTy) { 3814 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3815 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3816 default: RC = &PPC::QBRCRegClass; break; 3817 } 3818 3819 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3820 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3821 ++QFPR_idx; 3822 } else { 3823 if (CallConv == CallingConv::Fast) 3824 ComputeArgOffset(); 3825 needsLoad = true; 3826 } 3827 if (CallConv != CallingConv::Fast || needsLoad) 3828 ArgOffset += Sz; 3829 break; 3830 } 3831 3832 // We need to load the argument to a virtual register if we determined 3833 // above that we ran out of physical registers of the appropriate type. 3834 if (needsLoad) { 3835 if (ObjSize < ArgSize && !isLittleEndian) 3836 CurArgOffset += ArgSize - ObjSize; 3837 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3838 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3839 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3840 } 3841 3842 InVals.push_back(ArgVal); 3843 } 3844 3845 // Area that is at least reserved in the caller of this function. 3846 unsigned MinReservedArea; 3847 if (HasParameterArea) 3848 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3849 else 3850 MinReservedArea = LinkageSize; 3851 3852 // Set the size that is at least reserved in caller of this function. Tail 3853 // call optimized functions' reserved stack space needs to be aligned so that 3854 // taking the difference between two stack areas will result in an aligned 3855 // stack. 3856 MinReservedArea = 3857 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3858 FuncInfo->setMinReservedArea(MinReservedArea); 3859 3860 // If the function takes variable number of arguments, make a frame index for 3861 // the start of the first vararg value... for expansion of llvm.va_start. 3862 if (isVarArg) { 3863 int Depth = ArgOffset; 3864 3865 FuncInfo->setVarArgsFrameIndex( 3866 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 3867 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3868 3869 // If this function is vararg, store any remaining integer argument regs 3870 // to their spots on the stack so that they may be loaded by dereferencing 3871 // the result of va_next. 3872 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3873 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3874 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3875 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3876 SDValue Store = 3877 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3878 MemOps.push_back(Store); 3879 // Increment the address by four for the next argument to store 3880 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3881 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3882 } 3883 } 3884 3885 if (!MemOps.empty()) 3886 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3887 3888 return Chain; 3889 } 3890 3891 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 3892 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3893 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3894 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3895 // TODO: add description of PPC stack frame format, or at least some docs. 3896 // 3897 MachineFunction &MF = DAG.getMachineFunction(); 3898 MachineFrameInfo &MFI = MF.getFrameInfo(); 3899 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3900 3901 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3902 bool isPPC64 = PtrVT == MVT::i64; 3903 // Potential tail calls could cause overwriting of argument stack slots. 3904 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3905 (CallConv == CallingConv::Fast)); 3906 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3907 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3908 unsigned ArgOffset = LinkageSize; 3909 // Area that is at least reserved in caller of this function. 3910 unsigned MinReservedArea = ArgOffset; 3911 3912 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3913 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3914 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3915 }; 3916 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3917 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3918 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3919 }; 3920 static const MCPhysReg VR[] = { 3921 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3922 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3923 }; 3924 3925 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3926 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3927 const unsigned Num_VR_Regs = array_lengthof( VR); 3928 3929 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3930 3931 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3932 3933 // In 32-bit non-varargs functions, the stack space for vectors is after the 3934 // stack space for non-vectors. We do not use this space unless we have 3935 // too many vectors to fit in registers, something that only occurs in 3936 // constructed examples:), but we have to walk the arglist to figure 3937 // that out...for the pathological case, compute VecArgOffset as the 3938 // start of the vector parameter area. Computing VecArgOffset is the 3939 // entire point of the following loop. 3940 unsigned VecArgOffset = ArgOffset; 3941 if (!isVarArg && !isPPC64) { 3942 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3943 ++ArgNo) { 3944 EVT ObjectVT = Ins[ArgNo].VT; 3945 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3946 3947 if (Flags.isByVal()) { 3948 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3949 unsigned ObjSize = Flags.getByValSize(); 3950 unsigned ArgSize = 3951 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3952 VecArgOffset += ArgSize; 3953 continue; 3954 } 3955 3956 switch(ObjectVT.getSimpleVT().SimpleTy) { 3957 default: llvm_unreachable("Unhandled argument type!"); 3958 case MVT::i1: 3959 case MVT::i32: 3960 case MVT::f32: 3961 VecArgOffset += 4; 3962 break; 3963 case MVT::i64: // PPC64 3964 case MVT::f64: 3965 // FIXME: We are guaranteed to be !isPPC64 at this point. 3966 // Does MVT::i64 apply? 3967 VecArgOffset += 8; 3968 break; 3969 case MVT::v4f32: 3970 case MVT::v4i32: 3971 case MVT::v8i16: 3972 case MVT::v16i8: 3973 // Nothing to do, we're only looking at Nonvector args here. 3974 break; 3975 } 3976 } 3977 } 3978 // We've found where the vector parameter area in memory is. Skip the 3979 // first 12 parameters; these don't use that memory. 3980 VecArgOffset = ((VecArgOffset+15)/16)*16; 3981 VecArgOffset += 12*16; 3982 3983 // Add DAG nodes to load the arguments or copy them out of registers. On 3984 // entry to a function on PPC, the arguments start after the linkage area, 3985 // although the first ones are often in registers. 3986 3987 SmallVector<SDValue, 8> MemOps; 3988 unsigned nAltivecParamsAtEnd = 0; 3989 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3990 unsigned CurArgIdx = 0; 3991 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3992 SDValue ArgVal; 3993 bool needsLoad = false; 3994 EVT ObjectVT = Ins[ArgNo].VT; 3995 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3996 unsigned ArgSize = ObjSize; 3997 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3998 if (Ins[ArgNo].isOrigArg()) { 3999 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4000 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4001 } 4002 unsigned CurArgOffset = ArgOffset; 4003 4004 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4005 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4006 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4007 if (isVarArg || isPPC64) { 4008 MinReservedArea = ((MinReservedArea+15)/16)*16; 4009 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4010 Flags, 4011 PtrByteSize); 4012 } else nAltivecParamsAtEnd++; 4013 } else 4014 // Calculate min reserved area. 4015 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4016 Flags, 4017 PtrByteSize); 4018 4019 // FIXME the codegen can be much improved in some cases. 4020 // We do not have to keep everything in memory. 4021 if (Flags.isByVal()) { 4022 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4023 4024 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4025 ObjSize = Flags.getByValSize(); 4026 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4027 // Objects of size 1 and 2 are right justified, everything else is 4028 // left justified. This means the memory address is adjusted forwards. 4029 if (ObjSize==1 || ObjSize==2) { 4030 CurArgOffset = CurArgOffset + (4 - ObjSize); 4031 } 4032 // The value of the object is its address. 4033 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4034 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4035 InVals.push_back(FIN); 4036 if (ObjSize==1 || ObjSize==2) { 4037 if (GPR_idx != Num_GPR_Regs) { 4038 unsigned VReg; 4039 if (isPPC64) 4040 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4041 else 4042 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4043 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4044 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4045 SDValue Store = 4046 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4047 MachinePointerInfo(&*FuncArg), ObjType); 4048 MemOps.push_back(Store); 4049 ++GPR_idx; 4050 } 4051 4052 ArgOffset += PtrByteSize; 4053 4054 continue; 4055 } 4056 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4057 // Store whatever pieces of the object are in registers 4058 // to memory. ArgOffset will be the address of the beginning 4059 // of the object. 4060 if (GPR_idx != Num_GPR_Regs) { 4061 unsigned VReg; 4062 if (isPPC64) 4063 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4064 else 4065 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4066 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4067 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4068 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4069 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4070 MachinePointerInfo(&*FuncArg, j)); 4071 MemOps.push_back(Store); 4072 ++GPR_idx; 4073 ArgOffset += PtrByteSize; 4074 } else { 4075 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4076 break; 4077 } 4078 } 4079 continue; 4080 } 4081 4082 switch (ObjectVT.getSimpleVT().SimpleTy) { 4083 default: llvm_unreachable("Unhandled argument type!"); 4084 case MVT::i1: 4085 case MVT::i32: 4086 if (!isPPC64) { 4087 if (GPR_idx != Num_GPR_Regs) { 4088 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4089 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4090 4091 if (ObjectVT == MVT::i1) 4092 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4093 4094 ++GPR_idx; 4095 } else { 4096 needsLoad = true; 4097 ArgSize = PtrByteSize; 4098 } 4099 // All int arguments reserve stack space in the Darwin ABI. 4100 ArgOffset += PtrByteSize; 4101 break; 4102 } 4103 LLVM_FALLTHROUGH; 4104 case MVT::i64: // PPC64 4105 if (GPR_idx != Num_GPR_Regs) { 4106 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4107 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4108 4109 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4110 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4111 // value to MVT::i64 and then truncate to the correct register size. 4112 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4113 4114 ++GPR_idx; 4115 } else { 4116 needsLoad = true; 4117 ArgSize = PtrByteSize; 4118 } 4119 // All int arguments reserve stack space in the Darwin ABI. 4120 ArgOffset += 8; 4121 break; 4122 4123 case MVT::f32: 4124 case MVT::f64: 4125 // Every 4 bytes of argument space consumes one of the GPRs available for 4126 // argument passing. 4127 if (GPR_idx != Num_GPR_Regs) { 4128 ++GPR_idx; 4129 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4130 ++GPR_idx; 4131 } 4132 if (FPR_idx != Num_FPR_Regs) { 4133 unsigned VReg; 4134 4135 if (ObjectVT == MVT::f32) 4136 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4137 else 4138 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4139 4140 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4141 ++FPR_idx; 4142 } else { 4143 needsLoad = true; 4144 } 4145 4146 // All FP arguments reserve stack space in the Darwin ABI. 4147 ArgOffset += isPPC64 ? 8 : ObjSize; 4148 break; 4149 case MVT::v4f32: 4150 case MVT::v4i32: 4151 case MVT::v8i16: 4152 case MVT::v16i8: 4153 // Note that vector arguments in registers don't reserve stack space, 4154 // except in varargs functions. 4155 if (VR_idx != Num_VR_Regs) { 4156 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4157 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4158 if (isVarArg) { 4159 while ((ArgOffset % 16) != 0) { 4160 ArgOffset += PtrByteSize; 4161 if (GPR_idx != Num_GPR_Regs) 4162 GPR_idx++; 4163 } 4164 ArgOffset += 16; 4165 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4166 } 4167 ++VR_idx; 4168 } else { 4169 if (!isVarArg && !isPPC64) { 4170 // Vectors go after all the nonvectors. 4171 CurArgOffset = VecArgOffset; 4172 VecArgOffset += 16; 4173 } else { 4174 // Vectors are aligned. 4175 ArgOffset = ((ArgOffset+15)/16)*16; 4176 CurArgOffset = ArgOffset; 4177 ArgOffset += 16; 4178 } 4179 needsLoad = true; 4180 } 4181 break; 4182 } 4183 4184 // We need to load the argument to a virtual register if we determined above 4185 // that we ran out of physical registers of the appropriate type. 4186 if (needsLoad) { 4187 int FI = MFI.CreateFixedObject(ObjSize, 4188 CurArgOffset + (ArgSize - ObjSize), 4189 isImmutable); 4190 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4191 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4192 } 4193 4194 InVals.push_back(ArgVal); 4195 } 4196 4197 // Allow for Altivec parameters at the end, if needed. 4198 if (nAltivecParamsAtEnd) { 4199 MinReservedArea = ((MinReservedArea+15)/16)*16; 4200 MinReservedArea += 16*nAltivecParamsAtEnd; 4201 } 4202 4203 // Area that is at least reserved in the caller of this function. 4204 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4205 4206 // Set the size that is at least reserved in caller of this function. Tail 4207 // call optimized functions' reserved stack space needs to be aligned so that 4208 // taking the difference between two stack areas will result in an aligned 4209 // stack. 4210 MinReservedArea = 4211 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4212 FuncInfo->setMinReservedArea(MinReservedArea); 4213 4214 // If the function takes variable number of arguments, make a frame index for 4215 // the start of the first vararg value... for expansion of llvm.va_start. 4216 if (isVarArg) { 4217 int Depth = ArgOffset; 4218 4219 FuncInfo->setVarArgsFrameIndex( 4220 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4221 Depth, true)); 4222 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4223 4224 // If this function is vararg, store any remaining integer argument regs 4225 // to their spots on the stack so that they may be loaded by dereferencing 4226 // the result of va_next. 4227 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4228 unsigned VReg; 4229 4230 if (isPPC64) 4231 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4232 else 4233 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4234 4235 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4236 SDValue Store = 4237 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4238 MemOps.push_back(Store); 4239 // Increment the address by four for the next argument to store 4240 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4241 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4242 } 4243 } 4244 4245 if (!MemOps.empty()) 4246 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4247 4248 return Chain; 4249 } 4250 4251 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4252 /// adjusted to accommodate the arguments for the tailcall. 4253 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4254 unsigned ParamSize) { 4255 4256 if (!isTailCall) return 0; 4257 4258 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4259 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4260 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4261 // Remember only if the new adjustement is bigger. 4262 if (SPDiff < FI->getTailCallSPDelta()) 4263 FI->setTailCallSPDelta(SPDiff); 4264 4265 return SPDiff; 4266 } 4267 4268 static bool isFunctionGlobalAddress(SDValue Callee); 4269 4270 static bool 4271 callsShareTOCBase(const Function *Caller, SDValue Callee, 4272 const TargetMachine &TM) { 4273 // If !G, Callee can be an external symbol. 4274 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4275 if (!G) 4276 return false; 4277 4278 // The medium and large code models are expected to provide a sufficiently 4279 // large TOC to provide all data addressing needs of a module with a 4280 // single TOC. Since each module will be addressed with a single TOC then we 4281 // only need to check that caller and callee don't cross dso boundaries. 4282 if (CodeModel::Medium == TM.getCodeModel() || 4283 CodeModel::Large == TM.getCodeModel()) 4284 return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal()); 4285 4286 // Otherwise we need to ensure callee and caller are in the same section, 4287 // since the linker may allocate multiple TOCs, and we don't know which 4288 // sections will belong to the same TOC base. 4289 4290 const GlobalValue *GV = G->getGlobal(); 4291 if (!GV->isStrongDefinitionForLinker()) 4292 return false; 4293 4294 // Any explicitly-specified sections and section prefixes must also match. 4295 // Also, if we're using -ffunction-sections, then each function is always in 4296 // a different section (the same is true for COMDAT functions). 4297 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4298 GV->getSection() != Caller->getSection()) 4299 return false; 4300 if (const auto *F = dyn_cast<Function>(GV)) { 4301 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4302 return false; 4303 } 4304 4305 // If the callee might be interposed, then we can't assume the ultimate call 4306 // target will be in the same section. Even in cases where we can assume that 4307 // interposition won't happen, in any case where the linker might insert a 4308 // stub to allow for interposition, we must generate code as though 4309 // interposition might occur. To understand why this matters, consider a 4310 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4311 // in the same section, but a is in a different module (i.e. has a different 4312 // TOC base pointer). If the linker allows for interposition between b and c, 4313 // then it will generate a stub for the call edge between b and c which will 4314 // save the TOC pointer into the designated stack slot allocated by b. If we 4315 // return true here, and therefore allow a tail call between b and c, that 4316 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4317 // pointer into the stack slot allocated by a (where the a -> b stub saved 4318 // a's TOC base pointer). If we're not considering a tail call, but rather, 4319 // whether a nop is needed after the call instruction in b, because the linker 4320 // will insert a stub, it might complain about a missing nop if we omit it 4321 // (although many don't complain in this case). 4322 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4323 return false; 4324 4325 return true; 4326 } 4327 4328 static bool 4329 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4330 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4331 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 4332 4333 const unsigned PtrByteSize = 8; 4334 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4335 4336 static const MCPhysReg GPR[] = { 4337 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4338 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4339 }; 4340 static const MCPhysReg VR[] = { 4341 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4342 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4343 }; 4344 4345 const unsigned NumGPRs = array_lengthof(GPR); 4346 const unsigned NumFPRs = 13; 4347 const unsigned NumVRs = array_lengthof(VR); 4348 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4349 4350 unsigned NumBytes = LinkageSize; 4351 unsigned AvailableFPRs = NumFPRs; 4352 unsigned AvailableVRs = NumVRs; 4353 4354 for (const ISD::OutputArg& Param : Outs) { 4355 if (Param.Flags.isNest()) continue; 4356 4357 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4358 PtrByteSize, LinkageSize, ParamAreaSize, 4359 NumBytes, AvailableFPRs, AvailableVRs, 4360 Subtarget.hasQPX())) 4361 return true; 4362 } 4363 return false; 4364 } 4365 4366 static bool 4367 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4368 if (CS.arg_size() != CallerFn->arg_size()) 4369 return false; 4370 4371 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4372 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4373 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4374 4375 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4376 const Value* CalleeArg = *CalleeArgIter; 4377 const Value* CallerArg = &(*CallerArgIter); 4378 if (CalleeArg == CallerArg) 4379 continue; 4380 4381 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4382 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4383 // } 4384 // 1st argument of callee is undef and has the same type as caller. 4385 if (CalleeArg->getType() == CallerArg->getType() && 4386 isa<UndefValue>(CalleeArg)) 4387 continue; 4388 4389 return false; 4390 } 4391 4392 return true; 4393 } 4394 4395 // Returns true if TCO is possible between the callers and callees 4396 // calling conventions. 4397 static bool 4398 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4399 CallingConv::ID CalleeCC) { 4400 // tail calls are possible with fastcc and ccc. 4401 auto isTailCallableCC = [] (CallingConv::ID CC){ 4402 return CC == CallingConv::C || CC == CallingConv::Fast; 4403 }; 4404 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4405 return false; 4406 4407 // We can safely tail call both fastcc and ccc callees from a c calling 4408 // convention caller. If the caller is fastcc, we may have less stack space 4409 // then a non-fastcc caller with the same signature so disable tail-calls in 4410 // that case. 4411 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4412 } 4413 4414 bool 4415 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4416 SDValue Callee, 4417 CallingConv::ID CalleeCC, 4418 ImmutableCallSite CS, 4419 bool isVarArg, 4420 const SmallVectorImpl<ISD::OutputArg> &Outs, 4421 const SmallVectorImpl<ISD::InputArg> &Ins, 4422 SelectionDAG& DAG) const { 4423 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4424 4425 if (DisableSCO && !TailCallOpt) return false; 4426 4427 // Variadic argument functions are not supported. 4428 if (isVarArg) return false; 4429 4430 auto *Caller = DAG.getMachineFunction().getFunction(); 4431 // Check that the calling conventions are compatible for tco. 4432 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), CalleeCC)) 4433 return false; 4434 4435 // Caller contains any byval parameter is not supported. 4436 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4437 return false; 4438 4439 // Callee contains any byval parameter is not supported, too. 4440 // Note: This is a quick work around, because in some cases, e.g. 4441 // caller's stack size > callee's stack size, we are still able to apply 4442 // sibling call optimization. See: https://reviews.llvm.org/D23441#513574 4443 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4444 return false; 4445 4446 // No TCO/SCO on indirect call because Caller have to restore its TOC 4447 if (!isFunctionGlobalAddress(Callee) && 4448 !isa<ExternalSymbolSDNode>(Callee)) 4449 return false; 4450 4451 // If the caller and callee potentially have different TOC bases then we 4452 // cannot tail call since we need to restore the TOC pointer after the call. 4453 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4454 if (!callsShareTOCBase(Caller, Callee, getTargetMachine())) 4455 return false; 4456 4457 // TCO allows altering callee ABI, so we don't have to check further. 4458 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4459 return true; 4460 4461 if (DisableSCO) return false; 4462 4463 // If callee use the same argument list that caller is using, then we can 4464 // apply SCO on this case. If it is not, then we need to check if callee needs 4465 // stack for passing arguments. 4466 if (!hasSameArgumentList(Caller, CS) && 4467 needStackSlotPassParameters(Subtarget, Outs)) { 4468 return false; 4469 } 4470 4471 return true; 4472 } 4473 4474 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4475 /// for tail call optimization. Targets which want to do tail call 4476 /// optimization should implement this function. 4477 bool 4478 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4479 CallingConv::ID CalleeCC, 4480 bool isVarArg, 4481 const SmallVectorImpl<ISD::InputArg> &Ins, 4482 SelectionDAG& DAG) const { 4483 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4484 return false; 4485 4486 // Variable argument functions are not supported. 4487 if (isVarArg) 4488 return false; 4489 4490 MachineFunction &MF = DAG.getMachineFunction(); 4491 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4492 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4493 // Functions containing by val parameters are not supported. 4494 for (unsigned i = 0; i != Ins.size(); i++) { 4495 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4496 if (Flags.isByVal()) return false; 4497 } 4498 4499 // Non-PIC/GOT tail calls are supported. 4500 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4501 return true; 4502 4503 // At the moment we can only do local tail calls (in same module, hidden 4504 // or protected) if we are generating PIC. 4505 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4506 return G->getGlobal()->hasHiddenVisibility() 4507 || G->getGlobal()->hasProtectedVisibility(); 4508 } 4509 4510 return false; 4511 } 4512 4513 /// isCallCompatibleAddress - Return the immediate to use if the specified 4514 /// 32-bit value is representable in the immediate field of a BxA instruction. 4515 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4516 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4517 if (!C) return nullptr; 4518 4519 int Addr = C->getZExtValue(); 4520 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4521 SignExtend32<26>(Addr) != Addr) 4522 return nullptr; // Top 6 bits have to be sext of immediate. 4523 4524 return DAG 4525 .getConstant( 4526 (int)C->getZExtValue() >> 2, SDLoc(Op), 4527 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4528 .getNode(); 4529 } 4530 4531 namespace { 4532 4533 struct TailCallArgumentInfo { 4534 SDValue Arg; 4535 SDValue FrameIdxOp; 4536 int FrameIdx = 0; 4537 4538 TailCallArgumentInfo() = default; 4539 }; 4540 4541 } // end anonymous namespace 4542 4543 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4544 static void StoreTailCallArgumentsToStackSlot( 4545 SelectionDAG &DAG, SDValue Chain, 4546 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4547 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4548 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4549 SDValue Arg = TailCallArgs[i].Arg; 4550 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4551 int FI = TailCallArgs[i].FrameIdx; 4552 // Store relative to framepointer. 4553 MemOpChains.push_back(DAG.getStore( 4554 Chain, dl, Arg, FIN, 4555 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4556 } 4557 } 4558 4559 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4560 /// the appropriate stack slot for the tail call optimized function call. 4561 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4562 SDValue OldRetAddr, SDValue OldFP, 4563 int SPDiff, const SDLoc &dl) { 4564 if (SPDiff) { 4565 // Calculate the new stack slot for the return address. 4566 MachineFunction &MF = DAG.getMachineFunction(); 4567 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4568 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4569 bool isPPC64 = Subtarget.isPPC64(); 4570 int SlotSize = isPPC64 ? 8 : 4; 4571 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4572 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4573 NewRetAddrLoc, true); 4574 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4575 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4576 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4577 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4578 4579 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4580 // slot as the FP is never overwritten. 4581 if (Subtarget.isDarwinABI()) { 4582 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4583 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4584 true); 4585 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4586 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4587 MachinePointerInfo::getFixedStack( 4588 DAG.getMachineFunction(), NewFPIdx)); 4589 } 4590 } 4591 return Chain; 4592 } 4593 4594 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4595 /// the position of the argument. 4596 static void 4597 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4598 SDValue Arg, int SPDiff, unsigned ArgOffset, 4599 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4600 int Offset = ArgOffset + SPDiff; 4601 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4602 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4603 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4604 SDValue FIN = DAG.getFrameIndex(FI, VT); 4605 TailCallArgumentInfo Info; 4606 Info.Arg = Arg; 4607 Info.FrameIdxOp = FIN; 4608 Info.FrameIdx = FI; 4609 TailCallArguments.push_back(Info); 4610 } 4611 4612 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4613 /// stack slot. Returns the chain as result and the loaded frame pointers in 4614 /// LROpOut/FPOpout. Used when tail calling. 4615 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4616 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4617 SDValue &FPOpOut, const SDLoc &dl) const { 4618 if (SPDiff) { 4619 // Load the LR and FP stack slot for later adjusting. 4620 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4621 LROpOut = getReturnAddrFrameIndex(DAG); 4622 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4623 Chain = SDValue(LROpOut.getNode(), 1); 4624 4625 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4626 // slot as the FP is never overwritten. 4627 if (Subtarget.isDarwinABI()) { 4628 FPOpOut = getFramePointerFrameIndex(DAG); 4629 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4630 Chain = SDValue(FPOpOut.getNode(), 1); 4631 } 4632 } 4633 return Chain; 4634 } 4635 4636 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4637 /// by "Src" to address "Dst" of size "Size". Alignment information is 4638 /// specified by the specific parameter attribute. The copy will be passed as 4639 /// a byval function parameter. 4640 /// Sometimes what we are copying is the end of a larger object, the part that 4641 /// does not fit in registers. 4642 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4643 SDValue Chain, ISD::ArgFlagsTy Flags, 4644 SelectionDAG &DAG, const SDLoc &dl) { 4645 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4646 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4647 false, false, false, MachinePointerInfo(), 4648 MachinePointerInfo()); 4649 } 4650 4651 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4652 /// tail calls. 4653 static void LowerMemOpCallTo( 4654 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4655 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4656 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4657 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4658 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4659 if (!isTailCall) { 4660 if (isVector) { 4661 SDValue StackPtr; 4662 if (isPPC64) 4663 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4664 else 4665 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4666 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4667 DAG.getConstant(ArgOffset, dl, PtrVT)); 4668 } 4669 MemOpChains.push_back( 4670 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4671 // Calculate and remember argument location. 4672 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4673 TailCallArguments); 4674 } 4675 4676 static void 4677 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4678 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4679 SDValue FPOp, 4680 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4681 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4682 // might overwrite each other in case of tail call optimization. 4683 SmallVector<SDValue, 8> MemOpChains2; 4684 // Do not flag preceding copytoreg stuff together with the following stuff. 4685 InFlag = SDValue(); 4686 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4687 MemOpChains2, dl); 4688 if (!MemOpChains2.empty()) 4689 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4690 4691 // Store the return address to the appropriate stack slot. 4692 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4693 4694 // Emit callseq_end just before tailcall node. 4695 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4696 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4697 InFlag = Chain.getValue(1); 4698 } 4699 4700 // Is this global address that of a function that can be called by name? (as 4701 // opposed to something that must hold a descriptor for an indirect call). 4702 static bool isFunctionGlobalAddress(SDValue Callee) { 4703 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4704 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4705 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4706 return false; 4707 4708 return G->getGlobal()->getValueType()->isFunctionTy(); 4709 } 4710 4711 return false; 4712 } 4713 4714 static unsigned 4715 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4716 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4717 bool isPatchPoint, bool hasNest, 4718 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4719 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4720 ImmutableCallSite CS, const PPCSubtarget &Subtarget) { 4721 bool isPPC64 = Subtarget.isPPC64(); 4722 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4723 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4724 4725 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4726 NodeTys.push_back(MVT::Other); // Returns a chain 4727 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4728 4729 unsigned CallOpc = PPCISD::CALL; 4730 4731 bool needIndirectCall = true; 4732 if (!isSVR4ABI || !isPPC64) 4733 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4734 // If this is an absolute destination address, use the munged value. 4735 Callee = SDValue(Dest, 0); 4736 needIndirectCall = false; 4737 } 4738 4739 // PC-relative references to external symbols should go through $stub, unless 4740 // we're building with the leopard linker or later, which automatically 4741 // synthesizes these stubs. 4742 const TargetMachine &TM = DAG.getTarget(); 4743 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent(); 4744 const GlobalValue *GV = nullptr; 4745 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4746 GV = G->getGlobal(); 4747 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4748 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4749 4750 if (isFunctionGlobalAddress(Callee)) { 4751 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4752 // A call to a TLS address is actually an indirect call to a 4753 // thread-specific pointer. 4754 unsigned OpFlags = 0; 4755 if (UsePlt) 4756 OpFlags = PPCII::MO_PLT; 4757 4758 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4759 // every direct call is) turn it into a TargetGlobalAddress / 4760 // TargetExternalSymbol node so that legalize doesn't hack it. 4761 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4762 Callee.getValueType(), 0, OpFlags); 4763 needIndirectCall = false; 4764 } 4765 4766 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4767 unsigned char OpFlags = 0; 4768 4769 if (UsePlt) 4770 OpFlags = PPCII::MO_PLT; 4771 4772 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4773 OpFlags); 4774 needIndirectCall = false; 4775 } 4776 4777 if (isPatchPoint) { 4778 // We'll form an invalid direct call when lowering a patchpoint; the full 4779 // sequence for an indirect call is complicated, and many of the 4780 // instructions introduced might have side effects (and, thus, can't be 4781 // removed later). The call itself will be removed as soon as the 4782 // argument/return lowering is complete, so the fact that it has the wrong 4783 // kind of operands should not really matter. 4784 needIndirectCall = false; 4785 } 4786 4787 if (needIndirectCall) { 4788 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4789 // to do the call, we can't use PPCISD::CALL. 4790 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4791 4792 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4793 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4794 // entry point, but to the function descriptor (the function entry point 4795 // address is part of the function descriptor though). 4796 // The function descriptor is a three doubleword structure with the 4797 // following fields: function entry point, TOC base address and 4798 // environment pointer. 4799 // Thus for a call through a function pointer, the following actions need 4800 // to be performed: 4801 // 1. Save the TOC of the caller in the TOC save area of its stack 4802 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4803 // 2. Load the address of the function entry point from the function 4804 // descriptor. 4805 // 3. Load the TOC of the callee from the function descriptor into r2. 4806 // 4. Load the environment pointer from the function descriptor into 4807 // r11. 4808 // 5. Branch to the function entry point address. 4809 // 6. On return of the callee, the TOC of the caller needs to be 4810 // restored (this is done in FinishCall()). 4811 // 4812 // The loads are scheduled at the beginning of the call sequence, and the 4813 // register copies are flagged together to ensure that no other 4814 // operations can be scheduled in between. E.g. without flagging the 4815 // copies together, a TOC access in the caller could be scheduled between 4816 // the assignment of the callee TOC and the branch to the callee, which 4817 // results in the TOC access going through the TOC of the callee instead 4818 // of going through the TOC of the caller, which leads to incorrect code. 4819 4820 // Load the address of the function entry point from the function 4821 // descriptor. 4822 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4823 if (LDChain.getValueType() == MVT::Glue) 4824 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4825 4826 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 4827 ? (MachineMemOperand::MODereferenceable | 4828 MachineMemOperand::MOInvariant) 4829 : MachineMemOperand::MONone; 4830 4831 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 4832 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4833 /* Alignment = */ 8, MMOFlags); 4834 4835 // Load environment pointer into r11. 4836 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4837 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4838 SDValue LoadEnvPtr = 4839 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 4840 /* Alignment = */ 8, MMOFlags); 4841 4842 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4843 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4844 SDValue TOCPtr = 4845 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 4846 /* Alignment = */ 8, MMOFlags); 4847 4848 setUsesTOCBasePtr(DAG); 4849 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4850 InFlag); 4851 Chain = TOCVal.getValue(0); 4852 InFlag = TOCVal.getValue(1); 4853 4854 // If the function call has an explicit 'nest' parameter, it takes the 4855 // place of the environment pointer. 4856 if (!hasNest) { 4857 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4858 InFlag); 4859 4860 Chain = EnvVal.getValue(0); 4861 InFlag = EnvVal.getValue(1); 4862 } 4863 4864 MTCTROps[0] = Chain; 4865 MTCTROps[1] = LoadFuncPtr; 4866 MTCTROps[2] = InFlag; 4867 } 4868 4869 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4870 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4871 InFlag = Chain.getValue(1); 4872 4873 NodeTys.clear(); 4874 NodeTys.push_back(MVT::Other); 4875 NodeTys.push_back(MVT::Glue); 4876 Ops.push_back(Chain); 4877 CallOpc = PPCISD::BCTRL; 4878 Callee.setNode(nullptr); 4879 // Add use of X11 (holding environment pointer) 4880 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4881 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4882 // Add CTR register as callee so a bctr can be emitted later. 4883 if (isTailCall) 4884 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4885 } 4886 4887 // If this is a direct call, pass the chain and the callee. 4888 if (Callee.getNode()) { 4889 Ops.push_back(Chain); 4890 Ops.push_back(Callee); 4891 } 4892 // If this is a tail call add stack pointer delta. 4893 if (isTailCall) 4894 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4895 4896 // Add argument registers to the end of the list so that they are known live 4897 // into the call. 4898 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4899 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4900 RegsToPass[i].second.getValueType())); 4901 4902 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4903 // into the call. 4904 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 4905 setUsesTOCBasePtr(DAG); 4906 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4907 } 4908 4909 return CallOpc; 4910 } 4911 4912 SDValue PPCTargetLowering::LowerCallResult( 4913 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4914 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4915 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4916 SmallVector<CCValAssign, 16> RVLocs; 4917 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4918 *DAG.getContext()); 4919 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4920 4921 // Copy all of the result registers out of their specified physreg. 4922 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4923 CCValAssign &VA = RVLocs[i]; 4924 assert(VA.isRegLoc() && "Can only return in registers!"); 4925 4926 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4927 VA.getLocReg(), VA.getLocVT(), InFlag); 4928 Chain = Val.getValue(1); 4929 InFlag = Val.getValue(2); 4930 4931 switch (VA.getLocInfo()) { 4932 default: llvm_unreachable("Unknown loc info!"); 4933 case CCValAssign::Full: break; 4934 case CCValAssign::AExt: 4935 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4936 break; 4937 case CCValAssign::ZExt: 4938 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4939 DAG.getValueType(VA.getValVT())); 4940 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4941 break; 4942 case CCValAssign::SExt: 4943 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4944 DAG.getValueType(VA.getValVT())); 4945 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4946 break; 4947 } 4948 4949 InVals.push_back(Val); 4950 } 4951 4952 return Chain; 4953 } 4954 4955 SDValue PPCTargetLowering::FinishCall( 4956 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 4957 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 4958 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 4959 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 4960 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 4961 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 4962 std::vector<EVT> NodeTys; 4963 SmallVector<SDValue, 8> Ops; 4964 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4965 SPDiff, isTailCall, isPatchPoint, hasNest, 4966 RegsToPass, Ops, NodeTys, CS, Subtarget); 4967 4968 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4969 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4970 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4971 4972 // When performing tail call optimization the callee pops its arguments off 4973 // the stack. Account for this here so these bytes can be pushed back on in 4974 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4975 int BytesCalleePops = 4976 (CallConv == CallingConv::Fast && 4977 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4978 4979 // Add a register mask operand representing the call-preserved registers. 4980 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4981 const uint32_t *Mask = 4982 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4983 assert(Mask && "Missing call preserved mask for calling convention"); 4984 Ops.push_back(DAG.getRegisterMask(Mask)); 4985 4986 if (InFlag.getNode()) 4987 Ops.push_back(InFlag); 4988 4989 // Emit tail call. 4990 if (isTailCall) { 4991 assert(((Callee.getOpcode() == ISD::Register && 4992 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4993 Callee.getOpcode() == ISD::TargetExternalSymbol || 4994 Callee.getOpcode() == ISD::TargetGlobalAddress || 4995 isa<ConstantSDNode>(Callee)) && 4996 "Expecting an global address, external symbol, absolute value or register"); 4997 4998 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 4999 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 5000 } 5001 5002 // Add a NOP immediately after the branch instruction when using the 64-bit 5003 // SVR4 ABI. At link time, if caller and callee are in a different module and 5004 // thus have a different TOC, the call will be replaced with a call to a stub 5005 // function which saves the current TOC, loads the TOC of the callee and 5006 // branches to the callee. The NOP will be replaced with a load instruction 5007 // which restores the TOC of the caller from the TOC save slot of the current 5008 // stack frame. If caller and callee belong to the same module (and have the 5009 // same TOC), the NOP will remain unchanged. 5010 5011 MachineFunction &MF = DAG.getMachineFunction(); 5012 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 5013 !isPatchPoint) { 5014 if (CallOpc == PPCISD::BCTRL) { 5015 // This is a call through a function pointer. 5016 // Restore the caller TOC from the save area into R2. 5017 // See PrepareCall() for more information about calls through function 5018 // pointers in the 64-bit SVR4 ABI. 5019 // We are using a target-specific load with r2 hard coded, because the 5020 // result of a target-independent load would never go directly into r2, 5021 // since r2 is a reserved register (which prevents the register allocator 5022 // from allocating it), resulting in an additional register being 5023 // allocated and an unnecessary move instruction being generated. 5024 CallOpc = PPCISD::BCTRL_LOAD_TOC; 5025 5026 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5027 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 5028 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5029 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5030 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 5031 5032 // The address needs to go after the chain input but before the flag (or 5033 // any other variadic arguments). 5034 Ops.insert(std::next(Ops.begin()), AddTOC); 5035 } else if (CallOpc == PPCISD::CALL && 5036 !callsShareTOCBase(MF.getFunction(), Callee, DAG.getTarget())) { 5037 // Otherwise insert NOP for non-local calls. 5038 CallOpc = PPCISD::CALL_NOP; 5039 } 5040 } 5041 5042 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 5043 InFlag = Chain.getValue(1); 5044 5045 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5046 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5047 InFlag, dl); 5048 if (!Ins.empty()) 5049 InFlag = Chain.getValue(1); 5050 5051 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 5052 Ins, dl, DAG, InVals); 5053 } 5054 5055 SDValue 5056 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5057 SmallVectorImpl<SDValue> &InVals) const { 5058 SelectionDAG &DAG = CLI.DAG; 5059 SDLoc &dl = CLI.DL; 5060 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5061 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5062 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5063 SDValue Chain = CLI.Chain; 5064 SDValue Callee = CLI.Callee; 5065 bool &isTailCall = CLI.IsTailCall; 5066 CallingConv::ID CallConv = CLI.CallConv; 5067 bool isVarArg = CLI.IsVarArg; 5068 bool isPatchPoint = CLI.IsPatchPoint; 5069 ImmutableCallSite CS = CLI.CS; 5070 5071 if (isTailCall) { 5072 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5073 isTailCall = false; 5074 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5075 isTailCall = 5076 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5077 isVarArg, Outs, Ins, DAG); 5078 else 5079 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5080 Ins, DAG); 5081 if (isTailCall) { 5082 ++NumTailCalls; 5083 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5084 ++NumSiblingCalls; 5085 5086 assert(isa<GlobalAddressSDNode>(Callee) && 5087 "Callee should be an llvm::Function object."); 5088 DEBUG( 5089 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5090 const unsigned Width = 80 - strlen("TCO caller: ") 5091 - strlen(", callee linkage: 0, 0"); 5092 dbgs() << "TCO caller: " 5093 << left_justify(DAG.getMachineFunction().getName(), Width) 5094 << ", callee linkage: " 5095 << GV->getVisibility() << ", " << GV->getLinkage() << "\n" 5096 ); 5097 } 5098 } 5099 5100 if (!isTailCall && CS && CS.isMustTailCall()) 5101 report_fatal_error("failed to perform tail call elimination on a call " 5102 "site marked musttail"); 5103 5104 // When long calls (i.e. indirect calls) are always used, calls are always 5105 // made via function pointer. If we have a function name, first translate it 5106 // into a pointer. 5107 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5108 !isTailCall) 5109 Callee = LowerGlobalAddress(Callee, DAG); 5110 5111 if (Subtarget.isSVR4ABI()) { 5112 if (Subtarget.isPPC64()) 5113 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5114 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5115 dl, DAG, InVals, CS); 5116 else 5117 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5118 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5119 dl, DAG, InVals, CS); 5120 } 5121 5122 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5123 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5124 dl, DAG, InVals, CS); 5125 } 5126 5127 SDValue PPCTargetLowering::LowerCall_32SVR4( 5128 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5129 bool isTailCall, bool isPatchPoint, 5130 const SmallVectorImpl<ISD::OutputArg> &Outs, 5131 const SmallVectorImpl<SDValue> &OutVals, 5132 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5133 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5134 ImmutableCallSite CS) const { 5135 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5136 // of the 32-bit SVR4 ABI stack frame layout. 5137 5138 assert((CallConv == CallingConv::C || 5139 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5140 5141 unsigned PtrByteSize = 4; 5142 5143 MachineFunction &MF = DAG.getMachineFunction(); 5144 5145 // Mark this function as potentially containing a function that contains a 5146 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5147 // and restoring the callers stack pointer in this functions epilog. This is 5148 // done because by tail calling the called function might overwrite the value 5149 // in this function's (MF) stack pointer stack slot 0(SP). 5150 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5151 CallConv == CallingConv::Fast) 5152 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5153 5154 // Count how many bytes are to be pushed on the stack, including the linkage 5155 // area, parameter list area and the part of the local variable space which 5156 // contains copies of aggregates which are passed by value. 5157 5158 // Assign locations to all of the outgoing arguments. 5159 SmallVector<CCValAssign, 16> ArgLocs; 5160 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5161 5162 // Reserve space for the linkage area on the stack. 5163 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5164 PtrByteSize); 5165 if (useSoftFloat()) 5166 CCInfo.PreAnalyzeCallOperands(Outs); 5167 5168 if (isVarArg) { 5169 // Handle fixed and variable vector arguments differently. 5170 // Fixed vector arguments go into registers as long as registers are 5171 // available. Variable vector arguments always go into memory. 5172 unsigned NumArgs = Outs.size(); 5173 5174 for (unsigned i = 0; i != NumArgs; ++i) { 5175 MVT ArgVT = Outs[i].VT; 5176 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5177 bool Result; 5178 5179 if (Outs[i].IsFixed) { 5180 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5181 CCInfo); 5182 } else { 5183 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5184 ArgFlags, CCInfo); 5185 } 5186 5187 if (Result) { 5188 #ifndef NDEBUG 5189 errs() << "Call operand #" << i << " has unhandled type " 5190 << EVT(ArgVT).getEVTString() << "\n"; 5191 #endif 5192 llvm_unreachable(nullptr); 5193 } 5194 } 5195 } else { 5196 // All arguments are treated the same. 5197 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5198 } 5199 CCInfo.clearWasPPCF128(); 5200 5201 // Assign locations to all of the outgoing aggregate by value arguments. 5202 SmallVector<CCValAssign, 16> ByValArgLocs; 5203 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5204 5205 // Reserve stack space for the allocations in CCInfo. 5206 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5207 5208 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5209 5210 // Size of the linkage area, parameter list area and the part of the local 5211 // space variable where copies of aggregates which are passed by value are 5212 // stored. 5213 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5214 5215 // Calculate by how many bytes the stack has to be adjusted in case of tail 5216 // call optimization. 5217 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5218 5219 // Adjust the stack pointer for the new arguments... 5220 // These operations are automatically eliminated by the prolog/epilog pass 5221 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5222 SDValue CallSeqStart = Chain; 5223 5224 // Load the return address and frame pointer so it can be moved somewhere else 5225 // later. 5226 SDValue LROp, FPOp; 5227 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5228 5229 // Set up a copy of the stack pointer for use loading and storing any 5230 // arguments that may not fit in the registers available for argument 5231 // passing. 5232 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5233 5234 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5235 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5236 SmallVector<SDValue, 8> MemOpChains; 5237 5238 bool seenFloatArg = false; 5239 // Walk the register/memloc assignments, inserting copies/loads. 5240 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 5241 i != e; 5242 ++i) { 5243 CCValAssign &VA = ArgLocs[i]; 5244 SDValue Arg = OutVals[i]; 5245 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5246 5247 if (Flags.isByVal()) { 5248 // Argument is an aggregate which is passed by value, thus we need to 5249 // create a copy of it in the local variable space of the current stack 5250 // frame (which is the stack frame of the caller) and pass the address of 5251 // this copy to the callee. 5252 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5253 CCValAssign &ByValVA = ByValArgLocs[j++]; 5254 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5255 5256 // Memory reserved in the local variable space of the callers stack frame. 5257 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5258 5259 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5260 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5261 StackPtr, PtrOff); 5262 5263 // Create a copy of the argument in the local area of the current 5264 // stack frame. 5265 SDValue MemcpyCall = 5266 CreateCopyOfByValArgument(Arg, PtrOff, 5267 CallSeqStart.getNode()->getOperand(0), 5268 Flags, DAG, dl); 5269 5270 // This must go outside the CALLSEQ_START..END. 5271 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5272 SDLoc(MemcpyCall)); 5273 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5274 NewCallSeqStart.getNode()); 5275 Chain = CallSeqStart = NewCallSeqStart; 5276 5277 // Pass the address of the aggregate copy on the stack either in a 5278 // physical register or in the parameter list area of the current stack 5279 // frame to the callee. 5280 Arg = PtrOff; 5281 } 5282 5283 if (VA.isRegLoc()) { 5284 if (Arg.getValueType() == MVT::i1) 5285 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 5286 5287 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5288 // Put argument in a physical register. 5289 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5290 } else { 5291 // Put argument in the parameter list area of the current stack frame. 5292 assert(VA.isMemLoc()); 5293 unsigned LocMemOffset = VA.getLocMemOffset(); 5294 5295 if (!isTailCall) { 5296 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5297 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5298 StackPtr, PtrOff); 5299 5300 MemOpChains.push_back( 5301 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5302 } else { 5303 // Calculate and remember argument location. 5304 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5305 TailCallArguments); 5306 } 5307 } 5308 } 5309 5310 if (!MemOpChains.empty()) 5311 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5312 5313 // Build a sequence of copy-to-reg nodes chained together with token chain 5314 // and flag operands which copy the outgoing args into the appropriate regs. 5315 SDValue InFlag; 5316 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5317 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5318 RegsToPass[i].second, InFlag); 5319 InFlag = Chain.getValue(1); 5320 } 5321 5322 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5323 // registers. 5324 if (isVarArg) { 5325 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5326 SDValue Ops[] = { Chain, InFlag }; 5327 5328 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5329 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5330 5331 InFlag = Chain.getValue(1); 5332 } 5333 5334 if (isTailCall) 5335 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5336 TailCallArguments); 5337 5338 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5339 /* unused except on PPC64 ELFv1 */ false, DAG, 5340 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5341 NumBytes, Ins, InVals, CS); 5342 } 5343 5344 // Copy an argument into memory, being careful to do this outside the 5345 // call sequence for the call to which the argument belongs. 5346 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5347 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5348 SelectionDAG &DAG, const SDLoc &dl) const { 5349 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5350 CallSeqStart.getNode()->getOperand(0), 5351 Flags, DAG, dl); 5352 // The MEMCPY must go outside the CALLSEQ_START..END. 5353 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5354 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5355 SDLoc(MemcpyCall)); 5356 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5357 NewCallSeqStart.getNode()); 5358 return NewCallSeqStart; 5359 } 5360 5361 SDValue PPCTargetLowering::LowerCall_64SVR4( 5362 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5363 bool isTailCall, bool isPatchPoint, 5364 const SmallVectorImpl<ISD::OutputArg> &Outs, 5365 const SmallVectorImpl<SDValue> &OutVals, 5366 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5367 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5368 ImmutableCallSite CS) const { 5369 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5370 bool isLittleEndian = Subtarget.isLittleEndian(); 5371 unsigned NumOps = Outs.size(); 5372 bool hasNest = false; 5373 bool IsSibCall = false; 5374 5375 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5376 unsigned PtrByteSize = 8; 5377 5378 MachineFunction &MF = DAG.getMachineFunction(); 5379 5380 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5381 IsSibCall = true; 5382 5383 // Mark this function as potentially containing a function that contains a 5384 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5385 // and restoring the callers stack pointer in this functions epilog. This is 5386 // done because by tail calling the called function might overwrite the value 5387 // in this function's (MF) stack pointer stack slot 0(SP). 5388 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5389 CallConv == CallingConv::Fast) 5390 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5391 5392 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5393 "fastcc not supported on varargs functions"); 5394 5395 // Count how many bytes are to be pushed on the stack, including the linkage 5396 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5397 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5398 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5399 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5400 unsigned NumBytes = LinkageSize; 5401 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5402 unsigned &QFPR_idx = FPR_idx; 5403 5404 static const MCPhysReg GPR[] = { 5405 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5406 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5407 }; 5408 static const MCPhysReg VR[] = { 5409 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5410 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5411 }; 5412 5413 const unsigned NumGPRs = array_lengthof(GPR); 5414 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5415 const unsigned NumVRs = array_lengthof(VR); 5416 const unsigned NumQFPRs = NumFPRs; 5417 5418 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5419 // can be passed to the callee in registers. 5420 // For the fast calling convention, there is another check below. 5421 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5422 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5423 if (!HasParameterArea) { 5424 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5425 unsigned AvailableFPRs = NumFPRs; 5426 unsigned AvailableVRs = NumVRs; 5427 unsigned NumBytesTmp = NumBytes; 5428 for (unsigned i = 0; i != NumOps; ++i) { 5429 if (Outs[i].Flags.isNest()) continue; 5430 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5431 PtrByteSize, LinkageSize, ParamAreaSize, 5432 NumBytesTmp, AvailableFPRs, AvailableVRs, 5433 Subtarget.hasQPX())) 5434 HasParameterArea = true; 5435 } 5436 } 5437 5438 // When using the fast calling convention, we don't provide backing for 5439 // arguments that will be in registers. 5440 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5441 5442 // Add up all the space actually used. 5443 for (unsigned i = 0; i != NumOps; ++i) { 5444 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5445 EVT ArgVT = Outs[i].VT; 5446 EVT OrigVT = Outs[i].ArgVT; 5447 5448 if (Flags.isNest()) 5449 continue; 5450 5451 if (CallConv == CallingConv::Fast) { 5452 if (Flags.isByVal()) 5453 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5454 else 5455 switch (ArgVT.getSimpleVT().SimpleTy) { 5456 default: llvm_unreachable("Unexpected ValueType for argument!"); 5457 case MVT::i1: 5458 case MVT::i32: 5459 case MVT::i64: 5460 if (++NumGPRsUsed <= NumGPRs) 5461 continue; 5462 break; 5463 case MVT::v4i32: 5464 case MVT::v8i16: 5465 case MVT::v16i8: 5466 case MVT::v2f64: 5467 case MVT::v2i64: 5468 case MVT::v1i128: 5469 if (++NumVRsUsed <= NumVRs) 5470 continue; 5471 break; 5472 case MVT::v4f32: 5473 // When using QPX, this is handled like a FP register, otherwise, it 5474 // is an Altivec register. 5475 if (Subtarget.hasQPX()) { 5476 if (++NumFPRsUsed <= NumFPRs) 5477 continue; 5478 } else { 5479 if (++NumVRsUsed <= NumVRs) 5480 continue; 5481 } 5482 break; 5483 case MVT::f32: 5484 case MVT::f64: 5485 case MVT::v4f64: // QPX 5486 case MVT::v4i1: // QPX 5487 if (++NumFPRsUsed <= NumFPRs) 5488 continue; 5489 break; 5490 } 5491 } 5492 5493 /* Respect alignment of argument on the stack. */ 5494 unsigned Align = 5495 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5496 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5497 5498 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5499 if (Flags.isInConsecutiveRegsLast()) 5500 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5501 } 5502 5503 unsigned NumBytesActuallyUsed = NumBytes; 5504 5505 // In the old ELFv1 ABI, 5506 // the prolog code of the callee may store up to 8 GPR argument registers to 5507 // the stack, allowing va_start to index over them in memory if its varargs. 5508 // Because we cannot tell if this is needed on the caller side, we have to 5509 // conservatively assume that it is needed. As such, make sure we have at 5510 // least enough stack space for the caller to store the 8 GPRs. 5511 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5512 // really requires memory operands, e.g. a vararg function. 5513 if (HasParameterArea) 5514 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5515 else 5516 NumBytes = LinkageSize; 5517 5518 // Tail call needs the stack to be aligned. 5519 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5520 CallConv == CallingConv::Fast) 5521 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5522 5523 int SPDiff = 0; 5524 5525 // Calculate by how many bytes the stack has to be adjusted in case of tail 5526 // call optimization. 5527 if (!IsSibCall) 5528 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5529 5530 // To protect arguments on the stack from being clobbered in a tail call, 5531 // force all the loads to happen before doing any other lowering. 5532 if (isTailCall) 5533 Chain = DAG.getStackArgumentTokenFactor(Chain); 5534 5535 // Adjust the stack pointer for the new arguments... 5536 // These operations are automatically eliminated by the prolog/epilog pass 5537 if (!IsSibCall) 5538 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5539 SDValue CallSeqStart = Chain; 5540 5541 // Load the return address and frame pointer so it can be move somewhere else 5542 // later. 5543 SDValue LROp, FPOp; 5544 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5545 5546 // Set up a copy of the stack pointer for use loading and storing any 5547 // arguments that may not fit in the registers available for argument 5548 // passing. 5549 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5550 5551 // Figure out which arguments are going to go in registers, and which in 5552 // memory. Also, if this is a vararg function, floating point operations 5553 // must be stored to our stack, and loaded into integer regs as well, if 5554 // any integer regs are available for argument passing. 5555 unsigned ArgOffset = LinkageSize; 5556 5557 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5558 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5559 5560 SmallVector<SDValue, 8> MemOpChains; 5561 for (unsigned i = 0; i != NumOps; ++i) { 5562 SDValue Arg = OutVals[i]; 5563 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5564 EVT ArgVT = Outs[i].VT; 5565 EVT OrigVT = Outs[i].ArgVT; 5566 5567 // PtrOff will be used to store the current argument to the stack if a 5568 // register cannot be found for it. 5569 SDValue PtrOff; 5570 5571 // We re-align the argument offset for each argument, except when using the 5572 // fast calling convention, when we need to make sure we do that only when 5573 // we'll actually use a stack slot. 5574 auto ComputePtrOff = [&]() { 5575 /* Respect alignment of argument on the stack. */ 5576 unsigned Align = 5577 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5578 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5579 5580 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5581 5582 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5583 }; 5584 5585 if (CallConv != CallingConv::Fast) { 5586 ComputePtrOff(); 5587 5588 /* Compute GPR index associated with argument offset. */ 5589 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5590 GPR_idx = std::min(GPR_idx, NumGPRs); 5591 } 5592 5593 // Promote integers to 64-bit values. 5594 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5595 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5596 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5597 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5598 } 5599 5600 // FIXME memcpy is used way more than necessary. Correctness first. 5601 // Note: "by value" is code for passing a structure by value, not 5602 // basic types. 5603 if (Flags.isByVal()) { 5604 // Note: Size includes alignment padding, so 5605 // struct x { short a; char b; } 5606 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5607 // These are the proper values we need for right-justifying the 5608 // aggregate in a parameter register. 5609 unsigned Size = Flags.getByValSize(); 5610 5611 // An empty aggregate parameter takes up no storage and no 5612 // registers. 5613 if (Size == 0) 5614 continue; 5615 5616 if (CallConv == CallingConv::Fast) 5617 ComputePtrOff(); 5618 5619 // All aggregates smaller than 8 bytes must be passed right-justified. 5620 if (Size==1 || Size==2 || Size==4) { 5621 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5622 if (GPR_idx != NumGPRs) { 5623 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5624 MachinePointerInfo(), VT); 5625 MemOpChains.push_back(Load.getValue(1)); 5626 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5627 5628 ArgOffset += PtrByteSize; 5629 continue; 5630 } 5631 } 5632 5633 if (GPR_idx == NumGPRs && Size < 8) { 5634 SDValue AddPtr = PtrOff; 5635 if (!isLittleEndian) { 5636 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5637 PtrOff.getValueType()); 5638 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5639 } 5640 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5641 CallSeqStart, 5642 Flags, DAG, dl); 5643 ArgOffset += PtrByteSize; 5644 continue; 5645 } 5646 // Copy entire object into memory. There are cases where gcc-generated 5647 // code assumes it is there, even if it could be put entirely into 5648 // registers. (This is not what the doc says.) 5649 5650 // FIXME: The above statement is likely due to a misunderstanding of the 5651 // documents. All arguments must be copied into the parameter area BY 5652 // THE CALLEE in the event that the callee takes the address of any 5653 // formal argument. That has not yet been implemented. However, it is 5654 // reasonable to use the stack area as a staging area for the register 5655 // load. 5656 5657 // Skip this for small aggregates, as we will use the same slot for a 5658 // right-justified copy, below. 5659 if (Size >= 8) 5660 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5661 CallSeqStart, 5662 Flags, DAG, dl); 5663 5664 // When a register is available, pass a small aggregate right-justified. 5665 if (Size < 8 && GPR_idx != NumGPRs) { 5666 // The easiest way to get this right-justified in a register 5667 // is to copy the structure into the rightmost portion of a 5668 // local variable slot, then load the whole slot into the 5669 // register. 5670 // FIXME: The memcpy seems to produce pretty awful code for 5671 // small aggregates, particularly for packed ones. 5672 // FIXME: It would be preferable to use the slot in the 5673 // parameter save area instead of a new local variable. 5674 SDValue AddPtr = PtrOff; 5675 if (!isLittleEndian) { 5676 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5677 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5678 } 5679 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5680 CallSeqStart, 5681 Flags, DAG, dl); 5682 5683 // Load the slot into the register. 5684 SDValue Load = 5685 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5686 MemOpChains.push_back(Load.getValue(1)); 5687 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5688 5689 // Done with this argument. 5690 ArgOffset += PtrByteSize; 5691 continue; 5692 } 5693 5694 // For aggregates larger than PtrByteSize, copy the pieces of the 5695 // object that fit into registers from the parameter save area. 5696 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5697 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5698 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5699 if (GPR_idx != NumGPRs) { 5700 SDValue Load = 5701 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5702 MemOpChains.push_back(Load.getValue(1)); 5703 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5704 ArgOffset += PtrByteSize; 5705 } else { 5706 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5707 break; 5708 } 5709 } 5710 continue; 5711 } 5712 5713 switch (Arg.getSimpleValueType().SimpleTy) { 5714 default: llvm_unreachable("Unexpected ValueType for argument!"); 5715 case MVT::i1: 5716 case MVT::i32: 5717 case MVT::i64: 5718 if (Flags.isNest()) { 5719 // The 'nest' parameter, if any, is passed in R11. 5720 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5721 hasNest = true; 5722 break; 5723 } 5724 5725 // These can be scalar arguments or elements of an integer array type 5726 // passed directly. Clang may use those instead of "byval" aggregate 5727 // types to avoid forcing arguments to memory unnecessarily. 5728 if (GPR_idx != NumGPRs) { 5729 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5730 } else { 5731 if (CallConv == CallingConv::Fast) 5732 ComputePtrOff(); 5733 5734 assert(HasParameterArea && 5735 "Parameter area must exist to pass an argument in memory."); 5736 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5737 true, isTailCall, false, MemOpChains, 5738 TailCallArguments, dl); 5739 if (CallConv == CallingConv::Fast) 5740 ArgOffset += PtrByteSize; 5741 } 5742 if (CallConv != CallingConv::Fast) 5743 ArgOffset += PtrByteSize; 5744 break; 5745 case MVT::f32: 5746 case MVT::f64: { 5747 // These can be scalar arguments or elements of a float array type 5748 // passed directly. The latter are used to implement ELFv2 homogenous 5749 // float aggregates. 5750 5751 // Named arguments go into FPRs first, and once they overflow, the 5752 // remaining arguments go into GPRs and then the parameter save area. 5753 // Unnamed arguments for vararg functions always go to GPRs and 5754 // then the parameter save area. For now, put all arguments to vararg 5755 // routines always in both locations (FPR *and* GPR or stack slot). 5756 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5757 bool NeededLoad = false; 5758 5759 // First load the argument into the next available FPR. 5760 if (FPR_idx != NumFPRs) 5761 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5762 5763 // Next, load the argument into GPR or stack slot if needed. 5764 if (!NeedGPROrStack) 5765 ; 5766 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5767 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5768 // once we support fp <-> gpr moves. 5769 5770 // In the non-vararg case, this can only ever happen in the 5771 // presence of f32 array types, since otherwise we never run 5772 // out of FPRs before running out of GPRs. 5773 SDValue ArgVal; 5774 5775 // Double values are always passed in a single GPR. 5776 if (Arg.getValueType() != MVT::f32) { 5777 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5778 5779 // Non-array float values are extended and passed in a GPR. 5780 } else if (!Flags.isInConsecutiveRegs()) { 5781 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5782 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5783 5784 // If we have an array of floats, we collect every odd element 5785 // together with its predecessor into one GPR. 5786 } else if (ArgOffset % PtrByteSize != 0) { 5787 SDValue Lo, Hi; 5788 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5789 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5790 if (!isLittleEndian) 5791 std::swap(Lo, Hi); 5792 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5793 5794 // The final element, if even, goes into the first half of a GPR. 5795 } else if (Flags.isInConsecutiveRegsLast()) { 5796 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5797 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5798 if (!isLittleEndian) 5799 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5800 DAG.getConstant(32, dl, MVT::i32)); 5801 5802 // Non-final even elements are skipped; they will be handled 5803 // together the with subsequent argument on the next go-around. 5804 } else 5805 ArgVal = SDValue(); 5806 5807 if (ArgVal.getNode()) 5808 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5809 } else { 5810 if (CallConv == CallingConv::Fast) 5811 ComputePtrOff(); 5812 5813 // Single-precision floating-point values are mapped to the 5814 // second (rightmost) word of the stack doubleword. 5815 if (Arg.getValueType() == MVT::f32 && 5816 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5817 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5818 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5819 } 5820 5821 assert(HasParameterArea && 5822 "Parameter area must exist to pass an argument in memory."); 5823 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5824 true, isTailCall, false, MemOpChains, 5825 TailCallArguments, dl); 5826 5827 NeededLoad = true; 5828 } 5829 // When passing an array of floats, the array occupies consecutive 5830 // space in the argument area; only round up to the next doubleword 5831 // at the end of the array. Otherwise, each float takes 8 bytes. 5832 if (CallConv != CallingConv::Fast || NeededLoad) { 5833 ArgOffset += (Arg.getValueType() == MVT::f32 && 5834 Flags.isInConsecutiveRegs()) ? 4 : 8; 5835 if (Flags.isInConsecutiveRegsLast()) 5836 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5837 } 5838 break; 5839 } 5840 case MVT::v4f32: 5841 case MVT::v4i32: 5842 case MVT::v8i16: 5843 case MVT::v16i8: 5844 case MVT::v2f64: 5845 case MVT::v2i64: 5846 case MVT::v1i128: 5847 if (!Subtarget.hasQPX()) { 5848 // These can be scalar arguments or elements of a vector array type 5849 // passed directly. The latter are used to implement ELFv2 homogenous 5850 // vector aggregates. 5851 5852 // For a varargs call, named arguments go into VRs or on the stack as 5853 // usual; unnamed arguments always go to the stack or the corresponding 5854 // GPRs when within range. For now, we always put the value in both 5855 // locations (or even all three). 5856 if (isVarArg) { 5857 assert(HasParameterArea && 5858 "Parameter area must exist if we have a varargs call."); 5859 // We could elide this store in the case where the object fits 5860 // entirely in R registers. Maybe later. 5861 SDValue Store = 5862 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5863 MemOpChains.push_back(Store); 5864 if (VR_idx != NumVRs) { 5865 SDValue Load = 5866 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 5867 MemOpChains.push_back(Load.getValue(1)); 5868 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5869 } 5870 ArgOffset += 16; 5871 for (unsigned i=0; i<16; i+=PtrByteSize) { 5872 if (GPR_idx == NumGPRs) 5873 break; 5874 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5875 DAG.getConstant(i, dl, PtrVT)); 5876 SDValue Load = 5877 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5878 MemOpChains.push_back(Load.getValue(1)); 5879 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5880 } 5881 break; 5882 } 5883 5884 // Non-varargs Altivec params go into VRs or on the stack. 5885 if (VR_idx != NumVRs) { 5886 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5887 } else { 5888 if (CallConv == CallingConv::Fast) 5889 ComputePtrOff(); 5890 5891 assert(HasParameterArea && 5892 "Parameter area must exist to pass an argument in memory."); 5893 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5894 true, isTailCall, true, MemOpChains, 5895 TailCallArguments, dl); 5896 if (CallConv == CallingConv::Fast) 5897 ArgOffset += 16; 5898 } 5899 5900 if (CallConv != CallingConv::Fast) 5901 ArgOffset += 16; 5902 break; 5903 } // not QPX 5904 5905 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5906 "Invalid QPX parameter type"); 5907 5908 /* fall through */ 5909 case MVT::v4f64: 5910 case MVT::v4i1: { 5911 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5912 if (isVarArg) { 5913 assert(HasParameterArea && 5914 "Parameter area must exist if we have a varargs call."); 5915 // We could elide this store in the case where the object fits 5916 // entirely in R registers. Maybe later. 5917 SDValue Store = 5918 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5919 MemOpChains.push_back(Store); 5920 if (QFPR_idx != NumQFPRs) { 5921 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 5922 PtrOff, MachinePointerInfo()); 5923 MemOpChains.push_back(Load.getValue(1)); 5924 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5925 } 5926 ArgOffset += (IsF32 ? 16 : 32); 5927 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5928 if (GPR_idx == NumGPRs) 5929 break; 5930 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5931 DAG.getConstant(i, dl, PtrVT)); 5932 SDValue Load = 5933 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5934 MemOpChains.push_back(Load.getValue(1)); 5935 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5936 } 5937 break; 5938 } 5939 5940 // Non-varargs QPX params go into registers or on the stack. 5941 if (QFPR_idx != NumQFPRs) { 5942 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5943 } else { 5944 if (CallConv == CallingConv::Fast) 5945 ComputePtrOff(); 5946 5947 assert(HasParameterArea && 5948 "Parameter area must exist to pass an argument in memory."); 5949 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5950 true, isTailCall, true, MemOpChains, 5951 TailCallArguments, dl); 5952 if (CallConv == CallingConv::Fast) 5953 ArgOffset += (IsF32 ? 16 : 32); 5954 } 5955 5956 if (CallConv != CallingConv::Fast) 5957 ArgOffset += (IsF32 ? 16 : 32); 5958 break; 5959 } 5960 } 5961 } 5962 5963 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 5964 "mismatch in size of parameter area"); 5965 (void)NumBytesActuallyUsed; 5966 5967 if (!MemOpChains.empty()) 5968 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5969 5970 // Check if this is an indirect call (MTCTR/BCTRL). 5971 // See PrepareCall() for more information about calls through function 5972 // pointers in the 64-bit SVR4 ABI. 5973 if (!isTailCall && !isPatchPoint && 5974 !isFunctionGlobalAddress(Callee) && 5975 !isa<ExternalSymbolSDNode>(Callee)) { 5976 // Load r2 into a virtual register and store it to the TOC save area. 5977 setUsesTOCBasePtr(DAG); 5978 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5979 // TOC save area offset. 5980 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5981 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5982 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5983 Chain = DAG.getStore( 5984 Val.getValue(1), dl, Val, AddPtr, 5985 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 5986 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5987 // This does not mean the MTCTR instruction must use R12; it's easier 5988 // to model this as an extra parameter, so do that. 5989 if (isELFv2ABI && !isPatchPoint) 5990 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5991 } 5992 5993 // Build a sequence of copy-to-reg nodes chained together with token chain 5994 // and flag operands which copy the outgoing args into the appropriate regs. 5995 SDValue InFlag; 5996 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5997 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5998 RegsToPass[i].second, InFlag); 5999 InFlag = Chain.getValue(1); 6000 } 6001 6002 if (isTailCall && !IsSibCall) 6003 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6004 TailCallArguments); 6005 6006 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 6007 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 6008 SPDiff, NumBytes, Ins, InVals, CS); 6009 } 6010 6011 SDValue PPCTargetLowering::LowerCall_Darwin( 6012 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6013 bool isTailCall, bool isPatchPoint, 6014 const SmallVectorImpl<ISD::OutputArg> &Outs, 6015 const SmallVectorImpl<SDValue> &OutVals, 6016 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6017 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6018 ImmutableCallSite CS) const { 6019 unsigned NumOps = Outs.size(); 6020 6021 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6022 bool isPPC64 = PtrVT == MVT::i64; 6023 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6024 6025 MachineFunction &MF = DAG.getMachineFunction(); 6026 6027 // Mark this function as potentially containing a function that contains a 6028 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6029 // and restoring the callers stack pointer in this functions epilog. This is 6030 // done because by tail calling the called function might overwrite the value 6031 // in this function's (MF) stack pointer stack slot 0(SP). 6032 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6033 CallConv == CallingConv::Fast) 6034 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6035 6036 // Count how many bytes are to be pushed on the stack, including the linkage 6037 // area, and parameter passing area. We start with 24/48 bytes, which is 6038 // prereserved space for [SP][CR][LR][3 x unused]. 6039 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6040 unsigned NumBytes = LinkageSize; 6041 6042 // Add up all the space actually used. 6043 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6044 // they all go in registers, but we must reserve stack space for them for 6045 // possible use by the caller. In varargs or 64-bit calls, parameters are 6046 // assigned stack space in order, with padding so Altivec parameters are 6047 // 16-byte aligned. 6048 unsigned nAltivecParamsAtEnd = 0; 6049 for (unsigned i = 0; i != NumOps; ++i) { 6050 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6051 EVT ArgVT = Outs[i].VT; 6052 // Varargs Altivec parameters are padded to a 16 byte boundary. 6053 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6054 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6055 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6056 if (!isVarArg && !isPPC64) { 6057 // Non-varargs Altivec parameters go after all the non-Altivec 6058 // parameters; handle those later so we know how much padding we need. 6059 nAltivecParamsAtEnd++; 6060 continue; 6061 } 6062 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6063 NumBytes = ((NumBytes+15)/16)*16; 6064 } 6065 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6066 } 6067 6068 // Allow for Altivec parameters at the end, if needed. 6069 if (nAltivecParamsAtEnd) { 6070 NumBytes = ((NumBytes+15)/16)*16; 6071 NumBytes += 16*nAltivecParamsAtEnd; 6072 } 6073 6074 // The prolog code of the callee may store up to 8 GPR argument registers to 6075 // the stack, allowing va_start to index over them in memory if its varargs. 6076 // Because we cannot tell if this is needed on the caller side, we have to 6077 // conservatively assume that it is needed. As such, make sure we have at 6078 // least enough stack space for the caller to store the 8 GPRs. 6079 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6080 6081 // Tail call needs the stack to be aligned. 6082 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6083 CallConv == CallingConv::Fast) 6084 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6085 6086 // Calculate by how many bytes the stack has to be adjusted in case of tail 6087 // call optimization. 6088 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6089 6090 // To protect arguments on the stack from being clobbered in a tail call, 6091 // force all the loads to happen before doing any other lowering. 6092 if (isTailCall) 6093 Chain = DAG.getStackArgumentTokenFactor(Chain); 6094 6095 // Adjust the stack pointer for the new arguments... 6096 // These operations are automatically eliminated by the prolog/epilog pass 6097 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6098 SDValue CallSeqStart = Chain; 6099 6100 // Load the return address and frame pointer so it can be move somewhere else 6101 // later. 6102 SDValue LROp, FPOp; 6103 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6104 6105 // Set up a copy of the stack pointer for use loading and storing any 6106 // arguments that may not fit in the registers available for argument 6107 // passing. 6108 SDValue StackPtr; 6109 if (isPPC64) 6110 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6111 else 6112 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6113 6114 // Figure out which arguments are going to go in registers, and which in 6115 // memory. Also, if this is a vararg function, floating point operations 6116 // must be stored to our stack, and loaded into integer regs as well, if 6117 // any integer regs are available for argument passing. 6118 unsigned ArgOffset = LinkageSize; 6119 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6120 6121 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6122 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6123 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6124 }; 6125 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6126 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6127 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6128 }; 6129 static const MCPhysReg VR[] = { 6130 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6131 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6132 }; 6133 const unsigned NumGPRs = array_lengthof(GPR_32); 6134 const unsigned NumFPRs = 13; 6135 const unsigned NumVRs = array_lengthof(VR); 6136 6137 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6138 6139 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6140 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6141 6142 SmallVector<SDValue, 8> MemOpChains; 6143 for (unsigned i = 0; i != NumOps; ++i) { 6144 SDValue Arg = OutVals[i]; 6145 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6146 6147 // PtrOff will be used to store the current argument to the stack if a 6148 // register cannot be found for it. 6149 SDValue PtrOff; 6150 6151 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6152 6153 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6154 6155 // On PPC64, promote integers to 64-bit values. 6156 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6157 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6158 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6159 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6160 } 6161 6162 // FIXME memcpy is used way more than necessary. Correctness first. 6163 // Note: "by value" is code for passing a structure by value, not 6164 // basic types. 6165 if (Flags.isByVal()) { 6166 unsigned Size = Flags.getByValSize(); 6167 // Very small objects are passed right-justified. Everything else is 6168 // passed left-justified. 6169 if (Size==1 || Size==2) { 6170 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6171 if (GPR_idx != NumGPRs) { 6172 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6173 MachinePointerInfo(), VT); 6174 MemOpChains.push_back(Load.getValue(1)); 6175 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6176 6177 ArgOffset += PtrByteSize; 6178 } else { 6179 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6180 PtrOff.getValueType()); 6181 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6182 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6183 CallSeqStart, 6184 Flags, DAG, dl); 6185 ArgOffset += PtrByteSize; 6186 } 6187 continue; 6188 } 6189 // Copy entire object into memory. There are cases where gcc-generated 6190 // code assumes it is there, even if it could be put entirely into 6191 // registers. (This is not what the doc says.) 6192 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6193 CallSeqStart, 6194 Flags, DAG, dl); 6195 6196 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6197 // copy the pieces of the object that fit into registers from the 6198 // parameter save area. 6199 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6200 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6201 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6202 if (GPR_idx != NumGPRs) { 6203 SDValue Load = 6204 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6205 MemOpChains.push_back(Load.getValue(1)); 6206 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6207 ArgOffset += PtrByteSize; 6208 } else { 6209 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6210 break; 6211 } 6212 } 6213 continue; 6214 } 6215 6216 switch (Arg.getSimpleValueType().SimpleTy) { 6217 default: llvm_unreachable("Unexpected ValueType for argument!"); 6218 case MVT::i1: 6219 case MVT::i32: 6220 case MVT::i64: 6221 if (GPR_idx != NumGPRs) { 6222 if (Arg.getValueType() == MVT::i1) 6223 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6224 6225 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6226 } else { 6227 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6228 isPPC64, isTailCall, false, MemOpChains, 6229 TailCallArguments, dl); 6230 } 6231 ArgOffset += PtrByteSize; 6232 break; 6233 case MVT::f32: 6234 case MVT::f64: 6235 if (FPR_idx != NumFPRs) { 6236 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6237 6238 if (isVarArg) { 6239 SDValue Store = 6240 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6241 MemOpChains.push_back(Store); 6242 6243 // Float varargs are always shadowed in available integer registers 6244 if (GPR_idx != NumGPRs) { 6245 SDValue Load = 6246 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6247 MemOpChains.push_back(Load.getValue(1)); 6248 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6249 } 6250 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6251 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6252 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6253 SDValue Load = 6254 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6255 MemOpChains.push_back(Load.getValue(1)); 6256 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6257 } 6258 } else { 6259 // If we have any FPRs remaining, we may also have GPRs remaining. 6260 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6261 // GPRs. 6262 if (GPR_idx != NumGPRs) 6263 ++GPR_idx; 6264 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6265 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6266 ++GPR_idx; 6267 } 6268 } else 6269 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6270 isPPC64, isTailCall, false, MemOpChains, 6271 TailCallArguments, dl); 6272 if (isPPC64) 6273 ArgOffset += 8; 6274 else 6275 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6276 break; 6277 case MVT::v4f32: 6278 case MVT::v4i32: 6279 case MVT::v8i16: 6280 case MVT::v16i8: 6281 if (isVarArg) { 6282 // These go aligned on the stack, or in the corresponding R registers 6283 // when within range. The Darwin PPC ABI doc claims they also go in 6284 // V registers; in fact gcc does this only for arguments that are 6285 // prototyped, not for those that match the ... We do it for all 6286 // arguments, seems to work. 6287 while (ArgOffset % 16 !=0) { 6288 ArgOffset += PtrByteSize; 6289 if (GPR_idx != NumGPRs) 6290 GPR_idx++; 6291 } 6292 // We could elide this store in the case where the object fits 6293 // entirely in R registers. Maybe later. 6294 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6295 DAG.getConstant(ArgOffset, dl, PtrVT)); 6296 SDValue Store = 6297 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6298 MemOpChains.push_back(Store); 6299 if (VR_idx != NumVRs) { 6300 SDValue Load = 6301 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6302 MemOpChains.push_back(Load.getValue(1)); 6303 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6304 } 6305 ArgOffset += 16; 6306 for (unsigned i=0; i<16; i+=PtrByteSize) { 6307 if (GPR_idx == NumGPRs) 6308 break; 6309 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6310 DAG.getConstant(i, dl, PtrVT)); 6311 SDValue Load = 6312 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6313 MemOpChains.push_back(Load.getValue(1)); 6314 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6315 } 6316 break; 6317 } 6318 6319 // Non-varargs Altivec params generally go in registers, but have 6320 // stack space allocated at the end. 6321 if (VR_idx != NumVRs) { 6322 // Doesn't have GPR space allocated. 6323 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6324 } else if (nAltivecParamsAtEnd==0) { 6325 // We are emitting Altivec params in order. 6326 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6327 isPPC64, isTailCall, true, MemOpChains, 6328 TailCallArguments, dl); 6329 ArgOffset += 16; 6330 } 6331 break; 6332 } 6333 } 6334 // If all Altivec parameters fit in registers, as they usually do, 6335 // they get stack space following the non-Altivec parameters. We 6336 // don't track this here because nobody below needs it. 6337 // If there are more Altivec parameters than fit in registers emit 6338 // the stores here. 6339 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6340 unsigned j = 0; 6341 // Offset is aligned; skip 1st 12 params which go in V registers. 6342 ArgOffset = ((ArgOffset+15)/16)*16; 6343 ArgOffset += 12*16; 6344 for (unsigned i = 0; i != NumOps; ++i) { 6345 SDValue Arg = OutVals[i]; 6346 EVT ArgType = Outs[i].VT; 6347 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6348 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6349 if (++j > NumVRs) { 6350 SDValue PtrOff; 6351 // We are emitting Altivec params in order. 6352 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6353 isPPC64, isTailCall, true, MemOpChains, 6354 TailCallArguments, dl); 6355 ArgOffset += 16; 6356 } 6357 } 6358 } 6359 } 6360 6361 if (!MemOpChains.empty()) 6362 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6363 6364 // On Darwin, R12 must contain the address of an indirect callee. This does 6365 // not mean the MTCTR instruction must use R12; it's easier to model this as 6366 // an extra parameter, so do that. 6367 if (!isTailCall && 6368 !isFunctionGlobalAddress(Callee) && 6369 !isa<ExternalSymbolSDNode>(Callee) && 6370 !isBLACompatibleAddress(Callee, DAG)) 6371 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6372 PPC::R12), Callee)); 6373 6374 // Build a sequence of copy-to-reg nodes chained together with token chain 6375 // and flag operands which copy the outgoing args into the appropriate regs. 6376 SDValue InFlag; 6377 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6378 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6379 RegsToPass[i].second, InFlag); 6380 InFlag = Chain.getValue(1); 6381 } 6382 6383 if (isTailCall) 6384 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6385 TailCallArguments); 6386 6387 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6388 /* unused except on PPC64 ELFv1 */ false, DAG, 6389 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6390 NumBytes, Ins, InVals, CS); 6391 } 6392 6393 bool 6394 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6395 MachineFunction &MF, bool isVarArg, 6396 const SmallVectorImpl<ISD::OutputArg> &Outs, 6397 LLVMContext &Context) const { 6398 SmallVector<CCValAssign, 16> RVLocs; 6399 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6400 return CCInfo.CheckReturn(Outs, RetCC_PPC); 6401 } 6402 6403 SDValue 6404 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6405 bool isVarArg, 6406 const SmallVectorImpl<ISD::OutputArg> &Outs, 6407 const SmallVectorImpl<SDValue> &OutVals, 6408 const SDLoc &dl, SelectionDAG &DAG) const { 6409 SmallVector<CCValAssign, 16> RVLocs; 6410 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6411 *DAG.getContext()); 6412 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 6413 6414 SDValue Flag; 6415 SmallVector<SDValue, 4> RetOps(1, Chain); 6416 6417 // Copy the result values into the output registers. 6418 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6419 CCValAssign &VA = RVLocs[i]; 6420 assert(VA.isRegLoc() && "Can only return in registers!"); 6421 6422 SDValue Arg = OutVals[i]; 6423 6424 switch (VA.getLocInfo()) { 6425 default: llvm_unreachable("Unknown loc info!"); 6426 case CCValAssign::Full: break; 6427 case CCValAssign::AExt: 6428 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6429 break; 6430 case CCValAssign::ZExt: 6431 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6432 break; 6433 case CCValAssign::SExt: 6434 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6435 break; 6436 } 6437 6438 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6439 Flag = Chain.getValue(1); 6440 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6441 } 6442 6443 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6444 const MCPhysReg *I = 6445 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6446 if (I) { 6447 for (; *I; ++I) { 6448 6449 if (PPC::G8RCRegClass.contains(*I)) 6450 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6451 else if (PPC::F8RCRegClass.contains(*I)) 6452 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6453 else if (PPC::CRRCRegClass.contains(*I)) 6454 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6455 else if (PPC::VRRCRegClass.contains(*I)) 6456 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6457 else 6458 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6459 } 6460 } 6461 6462 RetOps[0] = Chain; // Update chain. 6463 6464 // Add the flag if we have it. 6465 if (Flag.getNode()) 6466 RetOps.push_back(Flag); 6467 6468 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6469 } 6470 6471 SDValue 6472 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6473 SelectionDAG &DAG) const { 6474 SDLoc dl(Op); 6475 6476 // Get the correct type for integers. 6477 EVT IntVT = Op.getValueType(); 6478 6479 // Get the inputs. 6480 SDValue Chain = Op.getOperand(0); 6481 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6482 // Build a DYNAREAOFFSET node. 6483 SDValue Ops[2] = {Chain, FPSIdx}; 6484 SDVTList VTs = DAG.getVTList(IntVT); 6485 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6486 } 6487 6488 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6489 SelectionDAG &DAG) const { 6490 // When we pop the dynamic allocation we need to restore the SP link. 6491 SDLoc dl(Op); 6492 6493 // Get the correct type for pointers. 6494 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6495 6496 // Construct the stack pointer operand. 6497 bool isPPC64 = Subtarget.isPPC64(); 6498 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6499 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6500 6501 // Get the operands for the STACKRESTORE. 6502 SDValue Chain = Op.getOperand(0); 6503 SDValue SaveSP = Op.getOperand(1); 6504 6505 // Load the old link SP. 6506 SDValue LoadLinkSP = 6507 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6508 6509 // Restore the stack pointer. 6510 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6511 6512 // Store the old link SP. 6513 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6514 } 6515 6516 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6517 MachineFunction &MF = DAG.getMachineFunction(); 6518 bool isPPC64 = Subtarget.isPPC64(); 6519 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6520 6521 // Get current frame pointer save index. The users of this index will be 6522 // primarily DYNALLOC instructions. 6523 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6524 int RASI = FI->getReturnAddrSaveIndex(); 6525 6526 // If the frame pointer save index hasn't been defined yet. 6527 if (!RASI) { 6528 // Find out what the fix offset of the frame pointer save area. 6529 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6530 // Allocate the frame index for frame pointer save area. 6531 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6532 // Save the result. 6533 FI->setReturnAddrSaveIndex(RASI); 6534 } 6535 return DAG.getFrameIndex(RASI, PtrVT); 6536 } 6537 6538 SDValue 6539 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6540 MachineFunction &MF = DAG.getMachineFunction(); 6541 bool isPPC64 = Subtarget.isPPC64(); 6542 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6543 6544 // Get current frame pointer save index. The users of this index will be 6545 // primarily DYNALLOC instructions. 6546 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6547 int FPSI = FI->getFramePointerSaveIndex(); 6548 6549 // If the frame pointer save index hasn't been defined yet. 6550 if (!FPSI) { 6551 // Find out what the fix offset of the frame pointer save area. 6552 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6553 // Allocate the frame index for frame pointer save area. 6554 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6555 // Save the result. 6556 FI->setFramePointerSaveIndex(FPSI); 6557 } 6558 return DAG.getFrameIndex(FPSI, PtrVT); 6559 } 6560 6561 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6562 SelectionDAG &DAG) const { 6563 // Get the inputs. 6564 SDValue Chain = Op.getOperand(0); 6565 SDValue Size = Op.getOperand(1); 6566 SDLoc dl(Op); 6567 6568 // Get the correct type for pointers. 6569 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6570 // Negate the size. 6571 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6572 DAG.getConstant(0, dl, PtrVT), Size); 6573 // Construct a node for the frame pointer save index. 6574 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6575 // Build a DYNALLOC node. 6576 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6577 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6578 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6579 } 6580 6581 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 6582 SelectionDAG &DAG) const { 6583 MachineFunction &MF = DAG.getMachineFunction(); 6584 6585 bool isPPC64 = Subtarget.isPPC64(); 6586 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6587 6588 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 6589 return DAG.getFrameIndex(FI, PtrVT); 6590 } 6591 6592 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6593 SelectionDAG &DAG) const { 6594 SDLoc DL(Op); 6595 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6596 DAG.getVTList(MVT::i32, MVT::Other), 6597 Op.getOperand(0), Op.getOperand(1)); 6598 } 6599 6600 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6601 SelectionDAG &DAG) const { 6602 SDLoc DL(Op); 6603 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6604 Op.getOperand(0), Op.getOperand(1)); 6605 } 6606 6607 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6608 if (Op.getValueType().isVector()) 6609 return LowerVectorLoad(Op, DAG); 6610 6611 assert(Op.getValueType() == MVT::i1 && 6612 "Custom lowering only for i1 loads"); 6613 6614 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6615 6616 SDLoc dl(Op); 6617 LoadSDNode *LD = cast<LoadSDNode>(Op); 6618 6619 SDValue Chain = LD->getChain(); 6620 SDValue BasePtr = LD->getBasePtr(); 6621 MachineMemOperand *MMO = LD->getMemOperand(); 6622 6623 SDValue NewLD = 6624 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6625 BasePtr, MVT::i8, MMO); 6626 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6627 6628 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6629 return DAG.getMergeValues(Ops, dl); 6630 } 6631 6632 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6633 if (Op.getOperand(1).getValueType().isVector()) 6634 return LowerVectorStore(Op, DAG); 6635 6636 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6637 "Custom lowering only for i1 stores"); 6638 6639 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6640 6641 SDLoc dl(Op); 6642 StoreSDNode *ST = cast<StoreSDNode>(Op); 6643 6644 SDValue Chain = ST->getChain(); 6645 SDValue BasePtr = ST->getBasePtr(); 6646 SDValue Value = ST->getValue(); 6647 MachineMemOperand *MMO = ST->getMemOperand(); 6648 6649 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6650 Value); 6651 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6652 } 6653 6654 // FIXME: Remove this once the ANDI glue bug is fixed: 6655 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6656 assert(Op.getValueType() == MVT::i1 && 6657 "Custom lowering only for i1 results"); 6658 6659 SDLoc DL(Op); 6660 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6661 Op.getOperand(0)); 6662 } 6663 6664 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6665 /// possible. 6666 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6667 // Not FP? Not a fsel. 6668 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6669 !Op.getOperand(2).getValueType().isFloatingPoint()) 6670 return Op; 6671 6672 // We might be able to do better than this under some circumstances, but in 6673 // general, fsel-based lowering of select is a finite-math-only optimization. 6674 // For more information, see section F.3 of the 2.06 ISA specification. 6675 if (!DAG.getTarget().Options.NoInfsFPMath || 6676 !DAG.getTarget().Options.NoNaNsFPMath) 6677 return Op; 6678 // TODO: Propagate flags from the select rather than global settings. 6679 SDNodeFlags Flags; 6680 Flags.setNoInfs(true); 6681 Flags.setNoNaNs(true); 6682 6683 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6684 6685 EVT ResVT = Op.getValueType(); 6686 EVT CmpVT = Op.getOperand(0).getValueType(); 6687 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6688 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6689 SDLoc dl(Op); 6690 6691 // If the RHS of the comparison is a 0.0, we don't need to do the 6692 // subtraction at all. 6693 SDValue Sel1; 6694 if (isFloatingPointZero(RHS)) 6695 switch (CC) { 6696 default: break; // SETUO etc aren't handled by fsel. 6697 case ISD::SETNE: 6698 std::swap(TV, FV); 6699 LLVM_FALLTHROUGH; 6700 case ISD::SETEQ: 6701 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6702 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6703 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6704 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6705 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6706 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6707 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6708 case ISD::SETULT: 6709 case ISD::SETLT: 6710 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6711 LLVM_FALLTHROUGH; 6712 case ISD::SETOGE: 6713 case ISD::SETGE: 6714 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6715 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6716 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6717 case ISD::SETUGT: 6718 case ISD::SETGT: 6719 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6720 LLVM_FALLTHROUGH; 6721 case ISD::SETOLE: 6722 case ISD::SETLE: 6723 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6724 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6725 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6726 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6727 } 6728 6729 SDValue Cmp; 6730 switch (CC) { 6731 default: break; // SETUO etc aren't handled by fsel. 6732 case ISD::SETNE: 6733 std::swap(TV, FV); 6734 LLVM_FALLTHROUGH; 6735 case ISD::SETEQ: 6736 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6737 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6738 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6739 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6740 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6741 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6742 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6743 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6744 case ISD::SETULT: 6745 case ISD::SETLT: 6746 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6747 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6748 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6749 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6750 case ISD::SETOGE: 6751 case ISD::SETGE: 6752 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6753 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6754 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6755 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6756 case ISD::SETUGT: 6757 case ISD::SETGT: 6758 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6759 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6760 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6761 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6762 case ISD::SETOLE: 6763 case ISD::SETLE: 6764 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6765 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6766 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6767 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6768 } 6769 return Op; 6770 } 6771 6772 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6773 SelectionDAG &DAG, 6774 const SDLoc &dl) const { 6775 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6776 SDValue Src = Op.getOperand(0); 6777 if (Src.getValueType() == MVT::f32) 6778 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6779 6780 SDValue Tmp; 6781 switch (Op.getSimpleValueType().SimpleTy) { 6782 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6783 case MVT::i32: 6784 Tmp = DAG.getNode( 6785 Op.getOpcode() == ISD::FP_TO_SINT 6786 ? PPCISD::FCTIWZ 6787 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6788 dl, MVT::f64, Src); 6789 break; 6790 case MVT::i64: 6791 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6792 "i64 FP_TO_UINT is supported only with FPCVT"); 6793 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6794 PPCISD::FCTIDUZ, 6795 dl, MVT::f64, Src); 6796 break; 6797 } 6798 6799 // Convert the FP value to an int value through memory. 6800 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6801 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6802 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6803 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6804 MachinePointerInfo MPI = 6805 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6806 6807 // Emit a store to the stack slot. 6808 SDValue Chain; 6809 if (i32Stack) { 6810 MachineFunction &MF = DAG.getMachineFunction(); 6811 MachineMemOperand *MMO = 6812 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6813 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6814 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6815 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6816 } else 6817 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 6818 6819 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6820 // add in a bias on big endian. 6821 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6822 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6823 DAG.getConstant(4, dl, FIPtr.getValueType())); 6824 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6825 } 6826 6827 RLI.Chain = Chain; 6828 RLI.Ptr = FIPtr; 6829 RLI.MPI = MPI; 6830 } 6831 6832 /// \brief Custom lowers floating point to integer conversions to use 6833 /// the direct move instructions available in ISA 2.07 to avoid the 6834 /// need for load/store combinations. 6835 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6836 SelectionDAG &DAG, 6837 const SDLoc &dl) const { 6838 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6839 SDValue Src = Op.getOperand(0); 6840 6841 if (Src.getValueType() == MVT::f32) 6842 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6843 6844 SDValue Tmp; 6845 switch (Op.getSimpleValueType().SimpleTy) { 6846 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6847 case MVT::i32: 6848 Tmp = DAG.getNode( 6849 Op.getOpcode() == ISD::FP_TO_SINT 6850 ? PPCISD::FCTIWZ 6851 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6852 dl, MVT::f64, Src); 6853 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6854 break; 6855 case MVT::i64: 6856 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6857 "i64 FP_TO_UINT is supported only with FPCVT"); 6858 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6859 PPCISD::FCTIDUZ, 6860 dl, MVT::f64, Src); 6861 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6862 break; 6863 } 6864 return Tmp; 6865 } 6866 6867 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6868 const SDLoc &dl) const { 6869 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6870 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6871 6872 ReuseLoadInfo RLI; 6873 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6874 6875 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 6876 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 6877 } 6878 6879 // We're trying to insert a regular store, S, and then a load, L. If the 6880 // incoming value, O, is a load, we might just be able to have our load use the 6881 // address used by O. However, we don't know if anything else will store to 6882 // that address before we can load from it. To prevent this situation, we need 6883 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6884 // the same chain operand as O, we create a token factor from the chain results 6885 // of O and L, and we replace all uses of O's chain result with that token 6886 // factor (see spliceIntoChain below for this last part). 6887 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6888 ReuseLoadInfo &RLI, 6889 SelectionDAG &DAG, 6890 ISD::LoadExtType ET) const { 6891 SDLoc dl(Op); 6892 if (ET == ISD::NON_EXTLOAD && 6893 (Op.getOpcode() == ISD::FP_TO_UINT || 6894 Op.getOpcode() == ISD::FP_TO_SINT) && 6895 isOperationLegalOrCustom(Op.getOpcode(), 6896 Op.getOperand(0).getValueType())) { 6897 6898 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6899 return true; 6900 } 6901 6902 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6903 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6904 LD->isNonTemporal()) 6905 return false; 6906 if (LD->getMemoryVT() != MemVT) 6907 return false; 6908 6909 RLI.Ptr = LD->getBasePtr(); 6910 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 6911 assert(LD->getAddressingMode() == ISD::PRE_INC && 6912 "Non-pre-inc AM on PPC?"); 6913 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6914 LD->getOffset()); 6915 } 6916 6917 RLI.Chain = LD->getChain(); 6918 RLI.MPI = LD->getPointerInfo(); 6919 RLI.IsDereferenceable = LD->isDereferenceable(); 6920 RLI.IsInvariant = LD->isInvariant(); 6921 RLI.Alignment = LD->getAlignment(); 6922 RLI.AAInfo = LD->getAAInfo(); 6923 RLI.Ranges = LD->getRanges(); 6924 6925 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6926 return true; 6927 } 6928 6929 // Given the head of the old chain, ResChain, insert a token factor containing 6930 // it and NewResChain, and make users of ResChain now be users of that token 6931 // factor. 6932 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 6933 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6934 SDValue NewResChain, 6935 SelectionDAG &DAG) const { 6936 if (!ResChain) 6937 return; 6938 6939 SDLoc dl(NewResChain); 6940 6941 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6942 NewResChain, DAG.getUNDEF(MVT::Other)); 6943 assert(TF.getNode() != NewResChain.getNode() && 6944 "A new TF really is required here"); 6945 6946 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6947 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6948 } 6949 6950 /// \brief Analyze profitability of direct move 6951 /// prefer float load to int load plus direct move 6952 /// when there is no integer use of int load 6953 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 6954 SDNode *Origin = Op.getOperand(0).getNode(); 6955 if (Origin->getOpcode() != ISD::LOAD) 6956 return true; 6957 6958 // If there is no LXSIBZX/LXSIHZX, like Power8, 6959 // prefer direct move if the memory size is 1 or 2 bytes. 6960 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 6961 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 6962 return true; 6963 6964 for (SDNode::use_iterator UI = Origin->use_begin(), 6965 UE = Origin->use_end(); 6966 UI != UE; ++UI) { 6967 6968 // Only look at the users of the loaded value. 6969 if (UI.getUse().get().getResNo() != 0) 6970 continue; 6971 6972 if (UI->getOpcode() != ISD::SINT_TO_FP && 6973 UI->getOpcode() != ISD::UINT_TO_FP) 6974 return true; 6975 } 6976 6977 return false; 6978 } 6979 6980 /// \brief Custom lowers integer to floating point conversions to use 6981 /// the direct move instructions available in ISA 2.07 to avoid the 6982 /// need for load/store combinations. 6983 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6984 SelectionDAG &DAG, 6985 const SDLoc &dl) const { 6986 assert((Op.getValueType() == MVT::f32 || 6987 Op.getValueType() == MVT::f64) && 6988 "Invalid floating point type as target of conversion"); 6989 assert(Subtarget.hasFPCVT() && 6990 "Int to FP conversions with direct moves require FPCVT"); 6991 SDValue FP; 6992 SDValue Src = Op.getOperand(0); 6993 bool SinglePrec = Op.getValueType() == MVT::f32; 6994 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6995 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6996 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6997 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6998 6999 if (WordInt) { 7000 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7001 dl, MVT::f64, Src); 7002 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7003 } 7004 else { 7005 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7006 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7007 } 7008 7009 return FP; 7010 } 7011 7012 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 7013 SelectionDAG &DAG) const { 7014 SDLoc dl(Op); 7015 7016 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 7017 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 7018 return SDValue(); 7019 7020 SDValue Value = Op.getOperand(0); 7021 // The values are now known to be -1 (false) or 1 (true). To convert this 7022 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7023 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7024 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7025 7026 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7027 7028 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7029 7030 if (Op.getValueType() != MVT::v4f64) 7031 Value = DAG.getNode(ISD::FP_ROUND, dl, 7032 Op.getValueType(), Value, 7033 DAG.getIntPtrConstant(1, dl)); 7034 return Value; 7035 } 7036 7037 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 7038 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 7039 return SDValue(); 7040 7041 if (Op.getOperand(0).getValueType() == MVT::i1) 7042 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 7043 DAG.getConstantFP(1.0, dl, Op.getValueType()), 7044 DAG.getConstantFP(0.0, dl, Op.getValueType())); 7045 7046 // If we have direct moves, we can do all the conversion, skip the store/load 7047 // however, without FPCVT we can't do most conversions. 7048 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 7049 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 7050 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 7051 7052 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 7053 "UINT_TO_FP is supported only with FPCVT"); 7054 7055 // If we have FCFIDS, then use it when converting to single-precision. 7056 // Otherwise, convert to double-precision and then round. 7057 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7058 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 7059 : PPCISD::FCFIDS) 7060 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 7061 : PPCISD::FCFID); 7062 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 7063 ? MVT::f32 7064 : MVT::f64; 7065 7066 if (Op.getOperand(0).getValueType() == MVT::i64) { 7067 SDValue SINT = Op.getOperand(0); 7068 // When converting to single-precision, we actually need to convert 7069 // to double-precision first and then round to single-precision. 7070 // To avoid double-rounding effects during that operation, we have 7071 // to prepare the input operand. Bits that might be truncated when 7072 // converting to double-precision are replaced by a bit that won't 7073 // be lost at this stage, but is below the single-precision rounding 7074 // position. 7075 // 7076 // However, if -enable-unsafe-fp-math is in effect, accept double 7077 // rounding to avoid the extra overhead. 7078 if (Op.getValueType() == MVT::f32 && 7079 !Subtarget.hasFPCVT() && 7080 !DAG.getTarget().Options.UnsafeFPMath) { 7081 7082 // Twiddle input to make sure the low 11 bits are zero. (If this 7083 // is the case, we are guaranteed the value will fit into the 53 bit 7084 // mantissa of an IEEE double-precision value without rounding.) 7085 // If any of those low 11 bits were not zero originally, make sure 7086 // bit 12 (value 2048) is set instead, so that the final rounding 7087 // to single-precision gets the correct result. 7088 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7089 SINT, DAG.getConstant(2047, dl, MVT::i64)); 7090 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 7091 Round, DAG.getConstant(2047, dl, MVT::i64)); 7092 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 7093 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 7094 Round, DAG.getConstant(-2048, dl, MVT::i64)); 7095 7096 // However, we cannot use that value unconditionally: if the magnitude 7097 // of the input value is small, the bit-twiddling we did above might 7098 // end up visibly changing the output. Fortunately, in that case, we 7099 // don't need to twiddle bits since the original input will convert 7100 // exactly to double-precision floating-point already. Therefore, 7101 // construct a conditional to use the original value if the top 11 7102 // bits are all sign-bit copies, and use the rounded value computed 7103 // above otherwise. 7104 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 7105 SINT, DAG.getConstant(53, dl, MVT::i32)); 7106 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 7107 Cond, DAG.getConstant(1, dl, MVT::i64)); 7108 Cond = DAG.getSetCC(dl, MVT::i32, 7109 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 7110 7111 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 7112 } 7113 7114 ReuseLoadInfo RLI; 7115 SDValue Bits; 7116 7117 MachineFunction &MF = DAG.getMachineFunction(); 7118 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 7119 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7120 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7121 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7122 } else if (Subtarget.hasLFIWAX() && 7123 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 7124 MachineMemOperand *MMO = 7125 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7126 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7127 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7128 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 7129 DAG.getVTList(MVT::f64, MVT::Other), 7130 Ops, MVT::i32, MMO); 7131 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7132 } else if (Subtarget.hasFPCVT() && 7133 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 7134 MachineMemOperand *MMO = 7135 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7136 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7137 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7138 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 7139 DAG.getVTList(MVT::f64, MVT::Other), 7140 Ops, MVT::i32, MMO); 7141 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 7142 } else if (((Subtarget.hasLFIWAX() && 7143 SINT.getOpcode() == ISD::SIGN_EXTEND) || 7144 (Subtarget.hasFPCVT() && 7145 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 7146 SINT.getOperand(0).getValueType() == MVT::i32) { 7147 MachineFrameInfo &MFI = MF.getFrameInfo(); 7148 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7149 7150 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7151 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7152 7153 SDValue Store = 7154 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 7155 MachinePointerInfo::getFixedStack( 7156 DAG.getMachineFunction(), FrameIdx)); 7157 7158 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7159 "Expected an i32 store"); 7160 7161 RLI.Ptr = FIdx; 7162 RLI.Chain = Store; 7163 RLI.MPI = 7164 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7165 RLI.Alignment = 4; 7166 7167 MachineMemOperand *MMO = 7168 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7169 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7170 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7171 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 7172 PPCISD::LFIWZX : PPCISD::LFIWAX, 7173 dl, DAG.getVTList(MVT::f64, MVT::Other), 7174 Ops, MVT::i32, MMO); 7175 } else 7176 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 7177 7178 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 7179 7180 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7181 FP = DAG.getNode(ISD::FP_ROUND, dl, 7182 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 7183 return FP; 7184 } 7185 7186 assert(Op.getOperand(0).getValueType() == MVT::i32 && 7187 "Unhandled INT_TO_FP type in custom expander!"); 7188 // Since we only generate this in 64-bit mode, we can take advantage of 7189 // 64-bit registers. In particular, sign extend the input value into the 7190 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 7191 // then lfd it and fcfid it. 7192 MachineFunction &MF = DAG.getMachineFunction(); 7193 MachineFrameInfo &MFI = MF.getFrameInfo(); 7194 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7195 7196 SDValue Ld; 7197 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 7198 ReuseLoadInfo RLI; 7199 bool ReusingLoad; 7200 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 7201 DAG))) { 7202 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7203 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7204 7205 SDValue Store = 7206 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7207 MachinePointerInfo::getFixedStack( 7208 DAG.getMachineFunction(), FrameIdx)); 7209 7210 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7211 "Expected an i32 store"); 7212 7213 RLI.Ptr = FIdx; 7214 RLI.Chain = Store; 7215 RLI.MPI = 7216 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7217 RLI.Alignment = 4; 7218 } 7219 7220 MachineMemOperand *MMO = 7221 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7222 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7223 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7224 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 7225 PPCISD::LFIWZX : PPCISD::LFIWAX, 7226 dl, DAG.getVTList(MVT::f64, MVT::Other), 7227 Ops, MVT::i32, MMO); 7228 if (ReusingLoad) 7229 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 7230 } else { 7231 assert(Subtarget.isPPC64() && 7232 "i32->FP without LFIWAX supported only on PPC64"); 7233 7234 int FrameIdx = MFI.CreateStackObject(8, 8, false); 7235 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7236 7237 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 7238 Op.getOperand(0)); 7239 7240 // STD the extended value into the stack slot. 7241 SDValue Store = DAG.getStore( 7242 DAG.getEntryNode(), dl, Ext64, FIdx, 7243 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7244 7245 // Load the value as a double. 7246 Ld = DAG.getLoad( 7247 MVT::f64, dl, Store, FIdx, 7248 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7249 } 7250 7251 // FCFID it and return it. 7252 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 7253 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7254 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 7255 DAG.getIntPtrConstant(0, dl)); 7256 return FP; 7257 } 7258 7259 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 7260 SelectionDAG &DAG) const { 7261 SDLoc dl(Op); 7262 /* 7263 The rounding mode is in bits 30:31 of FPSR, and has the following 7264 settings: 7265 00 Round to nearest 7266 01 Round to 0 7267 10 Round to +inf 7268 11 Round to -inf 7269 7270 FLT_ROUNDS, on the other hand, expects the following: 7271 -1 Undefined 7272 0 Round to 0 7273 1 Round to nearest 7274 2 Round to +inf 7275 3 Round to -inf 7276 7277 To perform the conversion, we do: 7278 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7279 */ 7280 7281 MachineFunction &MF = DAG.getMachineFunction(); 7282 EVT VT = Op.getValueType(); 7283 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7284 7285 // Save FP Control Word to register 7286 EVT NodeTys[] = { 7287 MVT::f64, // return register 7288 MVT::Glue // unused in this context 7289 }; 7290 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7291 7292 // Save FP register to stack slot 7293 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7294 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7295 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7296 MachinePointerInfo()); 7297 7298 // Load FP Control Word from low 32 bits of stack slot. 7299 SDValue Four = DAG.getConstant(4, dl, PtrVT); 7300 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 7301 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 7302 7303 // Transform as necessary 7304 SDValue CWD1 = 7305 DAG.getNode(ISD::AND, dl, MVT::i32, 7306 CWD, DAG.getConstant(3, dl, MVT::i32)); 7307 SDValue CWD2 = 7308 DAG.getNode(ISD::SRL, dl, MVT::i32, 7309 DAG.getNode(ISD::AND, dl, MVT::i32, 7310 DAG.getNode(ISD::XOR, dl, MVT::i32, 7311 CWD, DAG.getConstant(3, dl, MVT::i32)), 7312 DAG.getConstant(3, dl, MVT::i32)), 7313 DAG.getConstant(1, dl, MVT::i32)); 7314 7315 SDValue RetVal = 7316 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 7317 7318 return DAG.getNode((VT.getSizeInBits() < 16 ? 7319 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 7320 } 7321 7322 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7323 EVT VT = Op.getValueType(); 7324 unsigned BitWidth = VT.getSizeInBits(); 7325 SDLoc dl(Op); 7326 assert(Op.getNumOperands() == 3 && 7327 VT == Op.getOperand(1).getValueType() && 7328 "Unexpected SHL!"); 7329 7330 // Expand into a bunch of logical ops. Note that these ops 7331 // depend on the PPC behavior for oversized shift amounts. 7332 SDValue Lo = Op.getOperand(0); 7333 SDValue Hi = Op.getOperand(1); 7334 SDValue Amt = Op.getOperand(2); 7335 EVT AmtVT = Amt.getValueType(); 7336 7337 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7338 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7339 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 7340 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 7341 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 7342 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7343 DAG.getConstant(-BitWidth, dl, AmtVT)); 7344 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 7345 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7346 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 7347 SDValue OutOps[] = { OutLo, OutHi }; 7348 return DAG.getMergeValues(OutOps, dl); 7349 } 7350 7351 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7352 EVT VT = Op.getValueType(); 7353 SDLoc dl(Op); 7354 unsigned BitWidth = VT.getSizeInBits(); 7355 assert(Op.getNumOperands() == 3 && 7356 VT == Op.getOperand(1).getValueType() && 7357 "Unexpected SRL!"); 7358 7359 // Expand into a bunch of logical ops. Note that these ops 7360 // depend on the PPC behavior for oversized shift amounts. 7361 SDValue Lo = Op.getOperand(0); 7362 SDValue Hi = Op.getOperand(1); 7363 SDValue Amt = Op.getOperand(2); 7364 EVT AmtVT = Amt.getValueType(); 7365 7366 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7367 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7368 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7369 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7370 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7371 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7372 DAG.getConstant(-BitWidth, dl, AmtVT)); 7373 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 7374 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7375 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 7376 SDValue OutOps[] = { OutLo, OutHi }; 7377 return DAG.getMergeValues(OutOps, dl); 7378 } 7379 7380 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 7381 SDLoc dl(Op); 7382 EVT VT = Op.getValueType(); 7383 unsigned BitWidth = VT.getSizeInBits(); 7384 assert(Op.getNumOperands() == 3 && 7385 VT == Op.getOperand(1).getValueType() && 7386 "Unexpected SRA!"); 7387 7388 // Expand into a bunch of logical ops, followed by a select_cc. 7389 SDValue Lo = Op.getOperand(0); 7390 SDValue Hi = Op.getOperand(1); 7391 SDValue Amt = Op.getOperand(2); 7392 EVT AmtVT = Amt.getValueType(); 7393 7394 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7395 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7396 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7397 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7398 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7399 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7400 DAG.getConstant(-BitWidth, dl, AmtVT)); 7401 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 7402 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 7403 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 7404 Tmp4, Tmp6, ISD::SETLE); 7405 SDValue OutOps[] = { OutLo, OutHi }; 7406 return DAG.getMergeValues(OutOps, dl); 7407 } 7408 7409 //===----------------------------------------------------------------------===// 7410 // Vector related lowering. 7411 // 7412 7413 /// BuildSplatI - Build a canonical splati of Val with an element size of 7414 /// SplatSize. Cast the result to VT. 7415 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7416 SelectionDAG &DAG, const SDLoc &dl) { 7417 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7418 7419 static const MVT VTys[] = { // canonical VT to use for each size. 7420 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7421 }; 7422 7423 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7424 7425 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7426 if (Val == -1) 7427 SplatSize = 1; 7428 7429 EVT CanonicalVT = VTys[SplatSize-1]; 7430 7431 // Build a canonical splat for this value. 7432 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7433 } 7434 7435 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7436 /// specified intrinsic ID. 7437 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7438 const SDLoc &dl, EVT DestVT = MVT::Other) { 7439 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7440 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7441 DAG.getConstant(IID, dl, MVT::i32), Op); 7442 } 7443 7444 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7445 /// specified intrinsic ID. 7446 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7447 SelectionDAG &DAG, const SDLoc &dl, 7448 EVT DestVT = MVT::Other) { 7449 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7450 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7451 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7452 } 7453 7454 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7455 /// specified intrinsic ID. 7456 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7457 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7458 EVT DestVT = MVT::Other) { 7459 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7460 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7461 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7462 } 7463 7464 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7465 /// amount. The result has the specified value type. 7466 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7467 SelectionDAG &DAG, const SDLoc &dl) { 7468 // Force LHS/RHS to be the right type. 7469 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7470 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7471 7472 int Ops[16]; 7473 for (unsigned i = 0; i != 16; ++i) 7474 Ops[i] = i + Amt; 7475 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7476 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7477 } 7478 7479 /// Do we have an efficient pattern in a .td file for this node? 7480 /// 7481 /// \param V - pointer to the BuildVectorSDNode being matched 7482 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 7483 /// 7484 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 7485 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 7486 /// the opposite is true (expansion is beneficial) are: 7487 /// - The node builds a vector out of integers that are not 32 or 64-bits 7488 /// - The node builds a vector out of constants 7489 /// - The node is a "load-and-splat" 7490 /// In all other cases, we will choose to keep the BUILD_VECTOR. 7491 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 7492 bool HasDirectMove, 7493 bool HasP8Vector) { 7494 EVT VecVT = V->getValueType(0); 7495 bool RightType = VecVT == MVT::v2f64 || 7496 (HasP8Vector && VecVT == MVT::v4f32) || 7497 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 7498 if (!RightType) 7499 return false; 7500 7501 bool IsSplat = true; 7502 bool IsLoad = false; 7503 SDValue Op0 = V->getOperand(0); 7504 7505 // This function is called in a block that confirms the node is not a constant 7506 // splat. So a constant BUILD_VECTOR here means the vector is built out of 7507 // different constants. 7508 if (V->isConstant()) 7509 return false; 7510 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 7511 if (V->getOperand(i).isUndef()) 7512 return false; 7513 // We want to expand nodes that represent load-and-splat even if the 7514 // loaded value is a floating point truncation or conversion to int. 7515 if (V->getOperand(i).getOpcode() == ISD::LOAD || 7516 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 7517 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7518 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 7519 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7520 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 7521 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 7522 IsLoad = true; 7523 // If the operands are different or the input is not a load and has more 7524 // uses than just this BV node, then it isn't a splat. 7525 if (V->getOperand(i) != Op0 || 7526 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 7527 IsSplat = false; 7528 } 7529 return !(IsSplat && IsLoad); 7530 } 7531 7532 // If this is a case we can't handle, return null and let the default 7533 // expansion code take care of it. If we CAN select this case, and if it 7534 // selects to a single instruction, return Op. Otherwise, if we can codegen 7535 // this case more efficiently than a constant pool load, lower it to the 7536 // sequence of ops that should be used. 7537 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7538 SelectionDAG &DAG) const { 7539 SDLoc dl(Op); 7540 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7541 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7542 7543 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7544 // We first build an i32 vector, load it into a QPX register, 7545 // then convert it to a floating-point vector and compare it 7546 // to a zero vector to get the boolean result. 7547 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7548 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7549 MachinePointerInfo PtrInfo = 7550 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7551 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7552 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7553 7554 assert(BVN->getNumOperands() == 4 && 7555 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7556 7557 bool IsConst = true; 7558 for (unsigned i = 0; i < 4; ++i) { 7559 if (BVN->getOperand(i).isUndef()) continue; 7560 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7561 IsConst = false; 7562 break; 7563 } 7564 } 7565 7566 if (IsConst) { 7567 Constant *One = 7568 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7569 Constant *NegOne = 7570 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7571 7572 Constant *CV[4]; 7573 for (unsigned i = 0; i < 4; ++i) { 7574 if (BVN->getOperand(i).isUndef()) 7575 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7576 else if (isNullConstant(BVN->getOperand(i))) 7577 CV[i] = NegOne; 7578 else 7579 CV[i] = One; 7580 } 7581 7582 Constant *CP = ConstantVector::get(CV); 7583 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7584 16 /* alignment */); 7585 7586 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7587 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7588 return DAG.getMemIntrinsicNode( 7589 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7590 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7591 } 7592 7593 SmallVector<SDValue, 4> Stores; 7594 for (unsigned i = 0; i < 4; ++i) { 7595 if (BVN->getOperand(i).isUndef()) continue; 7596 7597 unsigned Offset = 4*i; 7598 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7599 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7600 7601 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7602 if (StoreSize > 4) { 7603 Stores.push_back( 7604 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7605 PtrInfo.getWithOffset(Offset), MVT::i32)); 7606 } else { 7607 SDValue StoreValue = BVN->getOperand(i); 7608 if (StoreSize < 4) 7609 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7610 7611 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7612 PtrInfo.getWithOffset(Offset))); 7613 } 7614 } 7615 7616 SDValue StoreChain; 7617 if (!Stores.empty()) 7618 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7619 else 7620 StoreChain = DAG.getEntryNode(); 7621 7622 // Now load from v4i32 into the QPX register; this will extend it to 7623 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7624 // is typed as v4f64 because the QPX register integer states are not 7625 // explicitly represented. 7626 7627 SDValue Ops[] = {StoreChain, 7628 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7629 FIdx}; 7630 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7631 7632 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7633 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7634 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7635 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7636 LoadedVect); 7637 7638 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7639 7640 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7641 } 7642 7643 // All other QPX vectors are handled by generic code. 7644 if (Subtarget.hasQPX()) 7645 return SDValue(); 7646 7647 // Check if this is a splat of a constant value. 7648 APInt APSplatBits, APSplatUndef; 7649 unsigned SplatBitSize; 7650 bool HasAnyUndefs; 7651 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7652 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7653 SplatBitSize > 32) { 7654 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 7655 // lowered to VSX instructions under certain conditions. 7656 // Without VSX, there is no pattern more efficient than expanding the node. 7657 if (Subtarget.hasVSX() && 7658 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 7659 Subtarget.hasP8Vector())) 7660 return Op; 7661 return SDValue(); 7662 } 7663 7664 unsigned SplatBits = APSplatBits.getZExtValue(); 7665 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7666 unsigned SplatSize = SplatBitSize / 8; 7667 7668 // First, handle single instruction cases. 7669 7670 // All zeros? 7671 if (SplatBits == 0) { 7672 // Canonicalize all zero vectors to be v4i32. 7673 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7674 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7675 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7676 } 7677 return Op; 7678 } 7679 7680 // We have XXSPLTIB for constant splats one byte wide 7681 if (Subtarget.hasP9Vector() && SplatSize == 1) { 7682 // This is a splat of 1-byte elements with some elements potentially undef. 7683 // Rather than trying to match undef in the SDAG patterns, ensure that all 7684 // elements are the same constant. 7685 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 7686 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 7687 dl, MVT::i32)); 7688 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 7689 if (Op.getValueType() != MVT::v16i8) 7690 return DAG.getBitcast(Op.getValueType(), NewBV); 7691 return NewBV; 7692 } 7693 7694 // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll 7695 // detect that constant splats like v8i16: 0xABAB are really just splats 7696 // of a 1-byte constant. In this case, we need to convert the node to a 7697 // splat of v16i8 and a bitcast. 7698 if (Op.getValueType() != MVT::v16i8) 7699 return DAG.getBitcast(Op.getValueType(), 7700 DAG.getConstant(SplatBits, dl, MVT::v16i8)); 7701 7702 return Op; 7703 } 7704 7705 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7706 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7707 (32-SplatBitSize)); 7708 if (SextVal >= -16 && SextVal <= 15) 7709 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7710 7711 // Two instruction sequences. 7712 7713 // If this value is in the range [-32,30] and is even, use: 7714 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7715 // If this value is in the range [17,31] and is odd, use: 7716 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7717 // If this value is in the range [-31,-17] and is odd, use: 7718 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7719 // Note the last two are three-instruction sequences. 7720 if (SextVal >= -32 && SextVal <= 31) { 7721 // To avoid having these optimizations undone by constant folding, 7722 // we convert to a pseudo that will be expanded later into one of 7723 // the above forms. 7724 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7725 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7726 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7727 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7728 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7729 if (VT == Op.getValueType()) 7730 return RetVal; 7731 else 7732 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7733 } 7734 7735 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7736 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7737 // for fneg/fabs. 7738 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7739 // Make -1 and vspltisw -1: 7740 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7741 7742 // Make the VSLW intrinsic, computing 0x8000_0000. 7743 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7744 OnesV, DAG, dl); 7745 7746 // xor by OnesV to invert it. 7747 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7748 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7749 } 7750 7751 // Check to see if this is a wide variety of vsplti*, binop self cases. 7752 static const signed char SplatCsts[] = { 7753 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7754 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7755 }; 7756 7757 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7758 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7759 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7760 int i = SplatCsts[idx]; 7761 7762 // Figure out what shift amount will be used by altivec if shifted by i in 7763 // this splat size. 7764 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7765 7766 // vsplti + shl self. 7767 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7768 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7769 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7770 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7771 Intrinsic::ppc_altivec_vslw 7772 }; 7773 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7774 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7775 } 7776 7777 // vsplti + srl self. 7778 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7779 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7780 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7781 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7782 Intrinsic::ppc_altivec_vsrw 7783 }; 7784 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7785 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7786 } 7787 7788 // vsplti + sra self. 7789 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7790 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7791 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7792 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7793 Intrinsic::ppc_altivec_vsraw 7794 }; 7795 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7796 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7797 } 7798 7799 // vsplti + rol self. 7800 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7801 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7802 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7803 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7804 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7805 Intrinsic::ppc_altivec_vrlw 7806 }; 7807 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7808 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7809 } 7810 7811 // t = vsplti c, result = vsldoi t, t, 1 7812 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7813 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7814 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7815 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7816 } 7817 // t = vsplti c, result = vsldoi t, t, 2 7818 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7819 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7820 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7821 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7822 } 7823 // t = vsplti c, result = vsldoi t, t, 3 7824 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7825 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7826 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7827 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7828 } 7829 } 7830 7831 return SDValue(); 7832 } 7833 7834 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7835 /// the specified operations to build the shuffle. 7836 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7837 SDValue RHS, SelectionDAG &DAG, 7838 const SDLoc &dl) { 7839 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7840 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7841 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7842 7843 enum { 7844 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7845 OP_VMRGHW, 7846 OP_VMRGLW, 7847 OP_VSPLTISW0, 7848 OP_VSPLTISW1, 7849 OP_VSPLTISW2, 7850 OP_VSPLTISW3, 7851 OP_VSLDOI4, 7852 OP_VSLDOI8, 7853 OP_VSLDOI12 7854 }; 7855 7856 if (OpNum == OP_COPY) { 7857 if (LHSID == (1*9+2)*9+3) return LHS; 7858 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7859 return RHS; 7860 } 7861 7862 SDValue OpLHS, OpRHS; 7863 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7864 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7865 7866 int ShufIdxs[16]; 7867 switch (OpNum) { 7868 default: llvm_unreachable("Unknown i32 permute!"); 7869 case OP_VMRGHW: 7870 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7871 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7872 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7873 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7874 break; 7875 case OP_VMRGLW: 7876 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7877 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7878 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7879 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7880 break; 7881 case OP_VSPLTISW0: 7882 for (unsigned i = 0; i != 16; ++i) 7883 ShufIdxs[i] = (i&3)+0; 7884 break; 7885 case OP_VSPLTISW1: 7886 for (unsigned i = 0; i != 16; ++i) 7887 ShufIdxs[i] = (i&3)+4; 7888 break; 7889 case OP_VSPLTISW2: 7890 for (unsigned i = 0; i != 16; ++i) 7891 ShufIdxs[i] = (i&3)+8; 7892 break; 7893 case OP_VSPLTISW3: 7894 for (unsigned i = 0; i != 16; ++i) 7895 ShufIdxs[i] = (i&3)+12; 7896 break; 7897 case OP_VSLDOI4: 7898 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7899 case OP_VSLDOI8: 7900 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7901 case OP_VSLDOI12: 7902 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7903 } 7904 EVT VT = OpLHS.getValueType(); 7905 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7906 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7907 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7908 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7909 } 7910 7911 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 7912 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 7913 /// SDValue. 7914 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 7915 SelectionDAG &DAG) const { 7916 const unsigned BytesInVector = 16; 7917 bool IsLE = Subtarget.isLittleEndian(); 7918 SDLoc dl(N); 7919 SDValue V1 = N->getOperand(0); 7920 SDValue V2 = N->getOperand(1); 7921 unsigned ShiftElts = 0, InsertAtByte = 0; 7922 bool Swap = false; 7923 7924 // Shifts required to get the byte we want at element 7. 7925 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 7926 0, 15, 14, 13, 12, 11, 10, 9}; 7927 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 7928 1, 2, 3, 4, 5, 6, 7, 8}; 7929 7930 ArrayRef<int> Mask = N->getMask(); 7931 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 7932 7933 // For each mask element, find out if we're just inserting something 7934 // from V2 into V1 or vice versa. 7935 // Possible permutations inserting an element from V2 into V1: 7936 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 7937 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 7938 // ... 7939 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 7940 // Inserting from V1 into V2 will be similar, except mask range will be 7941 // [16,31]. 7942 7943 bool FoundCandidate = false; 7944 // If both vector operands for the shuffle are the same vector, the mask 7945 // will contain only elements from the first one and the second one will be 7946 // undef. 7947 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 7948 // Go through the mask of half-words to find an element that's being moved 7949 // from one vector to the other. 7950 for (unsigned i = 0; i < BytesInVector; ++i) { 7951 unsigned CurrentElement = Mask[i]; 7952 // If 2nd operand is undefined, we should only look for element 7 in the 7953 // Mask. 7954 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 7955 continue; 7956 7957 bool OtherElementsInOrder = true; 7958 // Examine the other elements in the Mask to see if they're in original 7959 // order. 7960 for (unsigned j = 0; j < BytesInVector; ++j) { 7961 if (j == i) 7962 continue; 7963 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 7964 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 7965 // in which we always assume we're always picking from the 1st operand. 7966 int MaskOffset = 7967 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 7968 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 7969 OtherElementsInOrder = false; 7970 break; 7971 } 7972 } 7973 // If other elements are in original order, we record the number of shifts 7974 // we need to get the element we want into element 7. Also record which byte 7975 // in the vector we should insert into. 7976 if (OtherElementsInOrder) { 7977 // If 2nd operand is undefined, we assume no shifts and no swapping. 7978 if (V2.isUndef()) { 7979 ShiftElts = 0; 7980 Swap = false; 7981 } else { 7982 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 7983 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 7984 : BigEndianShifts[CurrentElement & 0xF]; 7985 Swap = CurrentElement < BytesInVector; 7986 } 7987 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 7988 FoundCandidate = true; 7989 break; 7990 } 7991 } 7992 7993 if (!FoundCandidate) 7994 return SDValue(); 7995 7996 // Candidate found, construct the proper SDAG sequence with VINSERTB, 7997 // optionally with VECSHL if shift is required. 7998 if (Swap) 7999 std::swap(V1, V2); 8000 if (V2.isUndef()) 8001 V2 = V1; 8002 if (ShiftElts) { 8003 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8004 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8005 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 8006 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8007 } 8008 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 8009 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8010 } 8011 8012 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 8013 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 8014 /// SDValue. 8015 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 8016 SelectionDAG &DAG) const { 8017 const unsigned NumHalfWords = 8; 8018 const unsigned BytesInVector = NumHalfWords * 2; 8019 // Check that the shuffle is on half-words. 8020 if (!isNByteElemShuffleMask(N, 2, 1)) 8021 return SDValue(); 8022 8023 bool IsLE = Subtarget.isLittleEndian(); 8024 SDLoc dl(N); 8025 SDValue V1 = N->getOperand(0); 8026 SDValue V2 = N->getOperand(1); 8027 unsigned ShiftElts = 0, InsertAtByte = 0; 8028 bool Swap = false; 8029 8030 // Shifts required to get the half-word we want at element 3. 8031 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 8032 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 8033 8034 uint32_t Mask = 0; 8035 uint32_t OriginalOrderLow = 0x1234567; 8036 uint32_t OriginalOrderHigh = 0x89ABCDEF; 8037 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 8038 // 32-bit space, only need 4-bit nibbles per element. 8039 for (unsigned i = 0; i < NumHalfWords; ++i) { 8040 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8041 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 8042 } 8043 8044 // For each mask element, find out if we're just inserting something 8045 // from V2 into V1 or vice versa. Possible permutations inserting an element 8046 // from V2 into V1: 8047 // X, 1, 2, 3, 4, 5, 6, 7 8048 // 0, X, 2, 3, 4, 5, 6, 7 8049 // 0, 1, X, 3, 4, 5, 6, 7 8050 // 0, 1, 2, X, 4, 5, 6, 7 8051 // 0, 1, 2, 3, X, 5, 6, 7 8052 // 0, 1, 2, 3, 4, X, 6, 7 8053 // 0, 1, 2, 3, 4, 5, X, 7 8054 // 0, 1, 2, 3, 4, 5, 6, X 8055 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 8056 8057 bool FoundCandidate = false; 8058 // Go through the mask of half-words to find an element that's being moved 8059 // from one vector to the other. 8060 for (unsigned i = 0; i < NumHalfWords; ++i) { 8061 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 8062 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 8063 uint32_t MaskOtherElts = ~(0xF << MaskShift); 8064 uint32_t TargetOrder = 0x0; 8065 8066 // If both vector operands for the shuffle are the same vector, the mask 8067 // will contain only elements from the first one and the second one will be 8068 // undef. 8069 if (V2.isUndef()) { 8070 ShiftElts = 0; 8071 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 8072 TargetOrder = OriginalOrderLow; 8073 Swap = false; 8074 // Skip if not the correct element or mask of other elements don't equal 8075 // to our expected order. 8076 if (MaskOneElt == VINSERTHSrcElem && 8077 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8078 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8079 FoundCandidate = true; 8080 break; 8081 } 8082 } else { // If both operands are defined. 8083 // Target order is [8,15] if the current mask is between [0,7]. 8084 TargetOrder = 8085 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 8086 // Skip if mask of other elements don't equal our expected order. 8087 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 8088 // We only need the last 3 bits for the number of shifts. 8089 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 8090 : BigEndianShifts[MaskOneElt & 0x7]; 8091 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 8092 Swap = MaskOneElt < NumHalfWords; 8093 FoundCandidate = true; 8094 break; 8095 } 8096 } 8097 } 8098 8099 if (!FoundCandidate) 8100 return SDValue(); 8101 8102 // Candidate found, construct the proper SDAG sequence with VINSERTH, 8103 // optionally with VECSHL if shift is required. 8104 if (Swap) 8105 std::swap(V1, V2); 8106 if (V2.isUndef()) 8107 V2 = V1; 8108 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8109 if (ShiftElts) { 8110 // Double ShiftElts because we're left shifting on v16i8 type. 8111 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 8112 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 8113 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 8114 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8115 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8116 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8117 } 8118 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 8119 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 8120 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8121 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8122 } 8123 8124 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 8125 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 8126 /// return the code it can be lowered into. Worst case, it can always be 8127 /// lowered into a vperm. 8128 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 8129 SelectionDAG &DAG) const { 8130 SDLoc dl(Op); 8131 SDValue V1 = Op.getOperand(0); 8132 SDValue V2 = Op.getOperand(1); 8133 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 8134 EVT VT = Op.getValueType(); 8135 bool isLittleEndian = Subtarget.isLittleEndian(); 8136 8137 unsigned ShiftElts, InsertAtByte; 8138 bool Swap = false; 8139 if (Subtarget.hasP9Vector() && 8140 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 8141 isLittleEndian)) { 8142 if (Swap) 8143 std::swap(V1, V2); 8144 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8145 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 8146 if (ShiftElts) { 8147 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 8148 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8149 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 8150 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8151 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8152 } 8153 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 8154 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8155 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 8156 } 8157 8158 if (Subtarget.hasP9Altivec()) { 8159 SDValue NewISDNode; 8160 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 8161 return NewISDNode; 8162 8163 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 8164 return NewISDNode; 8165 } 8166 8167 if (Subtarget.hasVSX() && 8168 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8169 if (Swap) 8170 std::swap(V1, V2); 8171 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8172 SDValue Conv2 = 8173 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 8174 8175 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 8176 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8177 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 8178 } 8179 8180 if (Subtarget.hasVSX() && 8181 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 8182 if (Swap) 8183 std::swap(V1, V2); 8184 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8185 SDValue Conv2 = 8186 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 8187 8188 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 8189 DAG.getConstant(ShiftElts, dl, MVT::i32)); 8190 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 8191 } 8192 8193 if (Subtarget.hasP9Vector()) { 8194 if (PPC::isXXBRHShuffleMask(SVOp)) { 8195 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 8196 SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv); 8197 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 8198 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 8199 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8200 SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv); 8201 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 8202 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 8203 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 8204 SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv); 8205 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 8206 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 8207 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 8208 SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv); 8209 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 8210 } 8211 } 8212 8213 if (Subtarget.hasVSX()) { 8214 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 8215 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 8216 8217 // If the source for the shuffle is a scalar_to_vector that came from a 8218 // 32-bit load, it will have used LXVWSX so we don't need to splat again. 8219 if (Subtarget.hasP9Vector() && 8220 ((isLittleEndian && SplatIdx == 3) || 8221 (!isLittleEndian && SplatIdx == 0))) { 8222 SDValue Src = V1.getOperand(0); 8223 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR && 8224 Src.getOperand(0).getOpcode() == ISD::LOAD && 8225 Src.getOperand(0).hasOneUse()) 8226 return V1; 8227 } 8228 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 8229 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 8230 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8231 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 8232 } 8233 8234 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 8235 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 8236 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 8237 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 8238 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 8239 } 8240 } 8241 8242 if (Subtarget.hasQPX()) { 8243 if (VT.getVectorNumElements() != 4) 8244 return SDValue(); 8245 8246 if (V2.isUndef()) V2 = V1; 8247 8248 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 8249 if (AlignIdx != -1) { 8250 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 8251 DAG.getConstant(AlignIdx, dl, MVT::i32)); 8252 } else if (SVOp->isSplat()) { 8253 int SplatIdx = SVOp->getSplatIndex(); 8254 if (SplatIdx >= 4) { 8255 std::swap(V1, V2); 8256 SplatIdx -= 4; 8257 } 8258 8259 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 8260 DAG.getConstant(SplatIdx, dl, MVT::i32)); 8261 } 8262 8263 // Lower this into a qvgpci/qvfperm pair. 8264 8265 // Compute the qvgpci literal 8266 unsigned idx = 0; 8267 for (unsigned i = 0; i < 4; ++i) { 8268 int m = SVOp->getMaskElt(i); 8269 unsigned mm = m >= 0 ? (unsigned) m : i; 8270 idx |= mm << (3-i)*3; 8271 } 8272 8273 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 8274 DAG.getConstant(idx, dl, MVT::i32)); 8275 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 8276 } 8277 8278 // Cases that are handled by instructions that take permute immediates 8279 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 8280 // selected by the instruction selector. 8281 if (V2.isUndef()) { 8282 if (PPC::isSplatShuffleMask(SVOp, 1) || 8283 PPC::isSplatShuffleMask(SVOp, 2) || 8284 PPC::isSplatShuffleMask(SVOp, 4) || 8285 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 8286 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 8287 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 8288 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 8289 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 8290 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 8291 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 8292 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 8293 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 8294 (Subtarget.hasP8Altivec() && ( 8295 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 8296 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 8297 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 8298 return Op; 8299 } 8300 } 8301 8302 // Altivec has a variety of "shuffle immediates" that take two vector inputs 8303 // and produce a fixed permutation. If any of these match, do not lower to 8304 // VPERM. 8305 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 8306 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 8307 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 8308 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 8309 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8310 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8311 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8312 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 8313 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 8314 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 8315 (Subtarget.hasP8Altivec() && ( 8316 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 8317 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 8318 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 8319 return Op; 8320 8321 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 8322 // perfect shuffle table to emit an optimal matching sequence. 8323 ArrayRef<int> PermMask = SVOp->getMask(); 8324 8325 unsigned PFIndexes[4]; 8326 bool isFourElementShuffle = true; 8327 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 8328 unsigned EltNo = 8; // Start out undef. 8329 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 8330 if (PermMask[i*4+j] < 0) 8331 continue; // Undef, ignore it. 8332 8333 unsigned ByteSource = PermMask[i*4+j]; 8334 if ((ByteSource & 3) != j) { 8335 isFourElementShuffle = false; 8336 break; 8337 } 8338 8339 if (EltNo == 8) { 8340 EltNo = ByteSource/4; 8341 } else if (EltNo != ByteSource/4) { 8342 isFourElementShuffle = false; 8343 break; 8344 } 8345 } 8346 PFIndexes[i] = EltNo; 8347 } 8348 8349 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 8350 // perfect shuffle vector to determine if it is cost effective to do this as 8351 // discrete instructions, or whether we should use a vperm. 8352 // For now, we skip this for little endian until such time as we have a 8353 // little-endian perfect shuffle table. 8354 if (isFourElementShuffle && !isLittleEndian) { 8355 // Compute the index in the perfect shuffle table. 8356 unsigned PFTableIndex = 8357 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 8358 8359 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 8360 unsigned Cost = (PFEntry >> 30); 8361 8362 // Determining when to avoid vperm is tricky. Many things affect the cost 8363 // of vperm, particularly how many times the perm mask needs to be computed. 8364 // For example, if the perm mask can be hoisted out of a loop or is already 8365 // used (perhaps because there are multiple permutes with the same shuffle 8366 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 8367 // the loop requires an extra register. 8368 // 8369 // As a compromise, we only emit discrete instructions if the shuffle can be 8370 // generated in 3 or fewer operations. When we have loop information 8371 // available, if this block is within a loop, we should avoid using vperm 8372 // for 3-operation perms and use a constant pool load instead. 8373 if (Cost < 3) 8374 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8375 } 8376 8377 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 8378 // vector that will get spilled to the constant pool. 8379 if (V2.isUndef()) V2 = V1; 8380 8381 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 8382 // that it is in input element units, not in bytes. Convert now. 8383 8384 // For little endian, the order of the input vectors is reversed, and 8385 // the permutation mask is complemented with respect to 31. This is 8386 // necessary to produce proper semantics with the big-endian-biased vperm 8387 // instruction. 8388 EVT EltVT = V1.getValueType().getVectorElementType(); 8389 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 8390 8391 SmallVector<SDValue, 16> ResultMask; 8392 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 8393 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 8394 8395 for (unsigned j = 0; j != BytesPerElement; ++j) 8396 if (isLittleEndian) 8397 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 8398 dl, MVT::i32)); 8399 else 8400 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 8401 MVT::i32)); 8402 } 8403 8404 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 8405 if (isLittleEndian) 8406 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8407 V2, V1, VPermMask); 8408 else 8409 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 8410 V1, V2, VPermMask); 8411 } 8412 8413 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 8414 /// vector comparison. If it is, return true and fill in Opc/isDot with 8415 /// information about the intrinsic. 8416 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 8417 bool &isDot, const PPCSubtarget &Subtarget) { 8418 unsigned IntrinsicID = 8419 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 8420 CompareOpc = -1; 8421 isDot = false; 8422 switch (IntrinsicID) { 8423 default: 8424 return false; 8425 // Comparison predicates. 8426 case Intrinsic::ppc_altivec_vcmpbfp_p: 8427 CompareOpc = 966; 8428 isDot = true; 8429 break; 8430 case Intrinsic::ppc_altivec_vcmpeqfp_p: 8431 CompareOpc = 198; 8432 isDot = true; 8433 break; 8434 case Intrinsic::ppc_altivec_vcmpequb_p: 8435 CompareOpc = 6; 8436 isDot = true; 8437 break; 8438 case Intrinsic::ppc_altivec_vcmpequh_p: 8439 CompareOpc = 70; 8440 isDot = true; 8441 break; 8442 case Intrinsic::ppc_altivec_vcmpequw_p: 8443 CompareOpc = 134; 8444 isDot = true; 8445 break; 8446 case Intrinsic::ppc_altivec_vcmpequd_p: 8447 if (Subtarget.hasP8Altivec()) { 8448 CompareOpc = 199; 8449 isDot = true; 8450 } else 8451 return false; 8452 break; 8453 case Intrinsic::ppc_altivec_vcmpneb_p: 8454 case Intrinsic::ppc_altivec_vcmpneh_p: 8455 case Intrinsic::ppc_altivec_vcmpnew_p: 8456 case Intrinsic::ppc_altivec_vcmpnezb_p: 8457 case Intrinsic::ppc_altivec_vcmpnezh_p: 8458 case Intrinsic::ppc_altivec_vcmpnezw_p: 8459 if (Subtarget.hasP9Altivec()) { 8460 switch (IntrinsicID) { 8461 default: 8462 llvm_unreachable("Unknown comparison intrinsic."); 8463 case Intrinsic::ppc_altivec_vcmpneb_p: 8464 CompareOpc = 7; 8465 break; 8466 case Intrinsic::ppc_altivec_vcmpneh_p: 8467 CompareOpc = 71; 8468 break; 8469 case Intrinsic::ppc_altivec_vcmpnew_p: 8470 CompareOpc = 135; 8471 break; 8472 case Intrinsic::ppc_altivec_vcmpnezb_p: 8473 CompareOpc = 263; 8474 break; 8475 case Intrinsic::ppc_altivec_vcmpnezh_p: 8476 CompareOpc = 327; 8477 break; 8478 case Intrinsic::ppc_altivec_vcmpnezw_p: 8479 CompareOpc = 391; 8480 break; 8481 } 8482 isDot = true; 8483 } else 8484 return false; 8485 break; 8486 case Intrinsic::ppc_altivec_vcmpgefp_p: 8487 CompareOpc = 454; 8488 isDot = true; 8489 break; 8490 case Intrinsic::ppc_altivec_vcmpgtfp_p: 8491 CompareOpc = 710; 8492 isDot = true; 8493 break; 8494 case Intrinsic::ppc_altivec_vcmpgtsb_p: 8495 CompareOpc = 774; 8496 isDot = true; 8497 break; 8498 case Intrinsic::ppc_altivec_vcmpgtsh_p: 8499 CompareOpc = 838; 8500 isDot = true; 8501 break; 8502 case Intrinsic::ppc_altivec_vcmpgtsw_p: 8503 CompareOpc = 902; 8504 isDot = true; 8505 break; 8506 case Intrinsic::ppc_altivec_vcmpgtsd_p: 8507 if (Subtarget.hasP8Altivec()) { 8508 CompareOpc = 967; 8509 isDot = true; 8510 } else 8511 return false; 8512 break; 8513 case Intrinsic::ppc_altivec_vcmpgtub_p: 8514 CompareOpc = 518; 8515 isDot = true; 8516 break; 8517 case Intrinsic::ppc_altivec_vcmpgtuh_p: 8518 CompareOpc = 582; 8519 isDot = true; 8520 break; 8521 case Intrinsic::ppc_altivec_vcmpgtuw_p: 8522 CompareOpc = 646; 8523 isDot = true; 8524 break; 8525 case Intrinsic::ppc_altivec_vcmpgtud_p: 8526 if (Subtarget.hasP8Altivec()) { 8527 CompareOpc = 711; 8528 isDot = true; 8529 } else 8530 return false; 8531 break; 8532 8533 // VSX predicate comparisons use the same infrastructure 8534 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8535 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8536 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8537 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8538 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8539 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8540 if (Subtarget.hasVSX()) { 8541 switch (IntrinsicID) { 8542 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8543 CompareOpc = 99; 8544 break; 8545 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8546 CompareOpc = 115; 8547 break; 8548 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8549 CompareOpc = 107; 8550 break; 8551 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8552 CompareOpc = 67; 8553 break; 8554 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8555 CompareOpc = 83; 8556 break; 8557 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8558 CompareOpc = 75; 8559 break; 8560 } 8561 isDot = true; 8562 } else 8563 return false; 8564 break; 8565 8566 // Normal Comparisons. 8567 case Intrinsic::ppc_altivec_vcmpbfp: 8568 CompareOpc = 966; 8569 break; 8570 case Intrinsic::ppc_altivec_vcmpeqfp: 8571 CompareOpc = 198; 8572 break; 8573 case Intrinsic::ppc_altivec_vcmpequb: 8574 CompareOpc = 6; 8575 break; 8576 case Intrinsic::ppc_altivec_vcmpequh: 8577 CompareOpc = 70; 8578 break; 8579 case Intrinsic::ppc_altivec_vcmpequw: 8580 CompareOpc = 134; 8581 break; 8582 case Intrinsic::ppc_altivec_vcmpequd: 8583 if (Subtarget.hasP8Altivec()) 8584 CompareOpc = 199; 8585 else 8586 return false; 8587 break; 8588 case Intrinsic::ppc_altivec_vcmpneb: 8589 case Intrinsic::ppc_altivec_vcmpneh: 8590 case Intrinsic::ppc_altivec_vcmpnew: 8591 case Intrinsic::ppc_altivec_vcmpnezb: 8592 case Intrinsic::ppc_altivec_vcmpnezh: 8593 case Intrinsic::ppc_altivec_vcmpnezw: 8594 if (Subtarget.hasP9Altivec()) 8595 switch (IntrinsicID) { 8596 default: 8597 llvm_unreachable("Unknown comparison intrinsic."); 8598 case Intrinsic::ppc_altivec_vcmpneb: 8599 CompareOpc = 7; 8600 break; 8601 case Intrinsic::ppc_altivec_vcmpneh: 8602 CompareOpc = 71; 8603 break; 8604 case Intrinsic::ppc_altivec_vcmpnew: 8605 CompareOpc = 135; 8606 break; 8607 case Intrinsic::ppc_altivec_vcmpnezb: 8608 CompareOpc = 263; 8609 break; 8610 case Intrinsic::ppc_altivec_vcmpnezh: 8611 CompareOpc = 327; 8612 break; 8613 case Intrinsic::ppc_altivec_vcmpnezw: 8614 CompareOpc = 391; 8615 break; 8616 } 8617 else 8618 return false; 8619 break; 8620 case Intrinsic::ppc_altivec_vcmpgefp: 8621 CompareOpc = 454; 8622 break; 8623 case Intrinsic::ppc_altivec_vcmpgtfp: 8624 CompareOpc = 710; 8625 break; 8626 case Intrinsic::ppc_altivec_vcmpgtsb: 8627 CompareOpc = 774; 8628 break; 8629 case Intrinsic::ppc_altivec_vcmpgtsh: 8630 CompareOpc = 838; 8631 break; 8632 case Intrinsic::ppc_altivec_vcmpgtsw: 8633 CompareOpc = 902; 8634 break; 8635 case Intrinsic::ppc_altivec_vcmpgtsd: 8636 if (Subtarget.hasP8Altivec()) 8637 CompareOpc = 967; 8638 else 8639 return false; 8640 break; 8641 case Intrinsic::ppc_altivec_vcmpgtub: 8642 CompareOpc = 518; 8643 break; 8644 case Intrinsic::ppc_altivec_vcmpgtuh: 8645 CompareOpc = 582; 8646 break; 8647 case Intrinsic::ppc_altivec_vcmpgtuw: 8648 CompareOpc = 646; 8649 break; 8650 case Intrinsic::ppc_altivec_vcmpgtud: 8651 if (Subtarget.hasP8Altivec()) 8652 CompareOpc = 711; 8653 else 8654 return false; 8655 break; 8656 } 8657 return true; 8658 } 8659 8660 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 8661 /// lower, do it, otherwise return null. 8662 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 8663 SelectionDAG &DAG) const { 8664 unsigned IntrinsicID = 8665 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8666 8667 SDLoc dl(Op); 8668 8669 if (IntrinsicID == Intrinsic::thread_pointer) { 8670 // Reads the thread pointer register, used for __builtin_thread_pointer. 8671 if (Subtarget.isPPC64()) 8672 return DAG.getRegister(PPC::X13, MVT::i64); 8673 return DAG.getRegister(PPC::R2, MVT::i32); 8674 } 8675 8676 // We are looking for absolute values here. 8677 // The idea is to try to fit one of two patterns: 8678 // max (a, (0-a)) OR max ((0-a), a) 8679 if (Subtarget.hasP9Vector() && 8680 (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw || 8681 IntrinsicID == Intrinsic::ppc_altivec_vmaxsh || 8682 IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) { 8683 SDValue V1 = Op.getOperand(1); 8684 SDValue V2 = Op.getOperand(2); 8685 if (V1.getSimpleValueType() == V2.getSimpleValueType() && 8686 (V1.getSimpleValueType() == MVT::v4i32 || 8687 V1.getSimpleValueType() == MVT::v8i16 || 8688 V1.getSimpleValueType() == MVT::v16i8)) { 8689 if ( V1.getOpcode() == ISD::SUB && 8690 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 8691 V1.getOperand(1) == V2 ) { 8692 // Generate the abs instruction with the operands 8693 return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2); 8694 } 8695 8696 if ( V2.getOpcode() == ISD::SUB && 8697 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 8698 V2.getOperand(1) == V1 ) { 8699 // Generate the abs instruction with the operands 8700 return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1); 8701 } 8702 } 8703 } 8704 8705 // If this is a lowered altivec predicate compare, CompareOpc is set to the 8706 // opcode number of the comparison. 8707 int CompareOpc; 8708 bool isDot; 8709 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 8710 return SDValue(); // Don't custom lower most intrinsics. 8711 8712 // If this is a non-dot comparison, make the VCMP node and we are done. 8713 if (!isDot) { 8714 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 8715 Op.getOperand(1), Op.getOperand(2), 8716 DAG.getConstant(CompareOpc, dl, MVT::i32)); 8717 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 8718 } 8719 8720 // Create the PPCISD altivec 'dot' comparison node. 8721 SDValue Ops[] = { 8722 Op.getOperand(2), // LHS 8723 Op.getOperand(3), // RHS 8724 DAG.getConstant(CompareOpc, dl, MVT::i32) 8725 }; 8726 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 8727 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 8728 8729 // Now that we have the comparison, emit a copy from the CR to a GPR. 8730 // This is flagged to the above dot comparison. 8731 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 8732 DAG.getRegister(PPC::CR6, MVT::i32), 8733 CompNode.getValue(1)); 8734 8735 // Unpack the result based on how the target uses it. 8736 unsigned BitNo; // Bit # of CR6. 8737 bool InvertBit; // Invert result? 8738 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 8739 default: // Can't happen, don't crash on invalid number though. 8740 case 0: // Return the value of the EQ bit of CR6. 8741 BitNo = 0; InvertBit = false; 8742 break; 8743 case 1: // Return the inverted value of the EQ bit of CR6. 8744 BitNo = 0; InvertBit = true; 8745 break; 8746 case 2: // Return the value of the LT bit of CR6. 8747 BitNo = 2; InvertBit = false; 8748 break; 8749 case 3: // Return the inverted value of the LT bit of CR6. 8750 BitNo = 2; InvertBit = true; 8751 break; 8752 } 8753 8754 // Shift the bit into the low position. 8755 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 8756 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 8757 // Isolate the bit. 8758 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 8759 DAG.getConstant(1, dl, MVT::i32)); 8760 8761 // If we are supposed to, toggle the bit. 8762 if (InvertBit) 8763 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 8764 DAG.getConstant(1, dl, MVT::i32)); 8765 return Flags; 8766 } 8767 8768 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 8769 SelectionDAG &DAG) const { 8770 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 8771 // the beginning of the argument list. 8772 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 8773 SDLoc DL(Op); 8774 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 8775 case Intrinsic::ppc_cfence: { 8776 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 8777 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 8778 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 8779 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 8780 Op.getOperand(ArgStart + 1)), 8781 Op.getOperand(0)), 8782 0); 8783 } 8784 default: 8785 break; 8786 } 8787 return SDValue(); 8788 } 8789 8790 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 8791 // Check for a DIV with the same operands as this REM. 8792 for (auto UI : Op.getOperand(1)->uses()) { 8793 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 8794 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 8795 if (UI->getOperand(0) == Op.getOperand(0) && 8796 UI->getOperand(1) == Op.getOperand(1)) 8797 return SDValue(); 8798 } 8799 return Op; 8800 } 8801 8802 // Lower scalar BSWAP64 to xxbrd. 8803 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 8804 SDLoc dl(Op); 8805 // MTVSRDD 8806 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 8807 Op.getOperand(0)); 8808 // XXBRD 8809 Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op); 8810 // MFVSRD 8811 int VectorIndex = 0; 8812 if (Subtarget.isLittleEndian()) 8813 VectorIndex = 1; 8814 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 8815 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 8816 return Op; 8817 } 8818 8819 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 8820 SelectionDAG &DAG) const { 8821 SDLoc dl(Op); 8822 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 8823 // instructions), but for smaller types, we need to first extend up to v2i32 8824 // before doing going farther. 8825 if (Op.getValueType() == MVT::v2i64) { 8826 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 8827 if (ExtVT != MVT::v2i32) { 8828 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 8829 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 8830 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 8831 ExtVT.getVectorElementType(), 4))); 8832 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 8833 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 8834 DAG.getValueType(MVT::v2i32)); 8835 } 8836 8837 return Op; 8838 } 8839 8840 return SDValue(); 8841 } 8842 8843 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 8844 SelectionDAG &DAG) const { 8845 SDLoc dl(Op); 8846 // Create a stack slot that is 16-byte aligned. 8847 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8848 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8849 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8850 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8851 8852 // Store the input value into Value#0 of the stack slot. 8853 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 8854 MachinePointerInfo()); 8855 // Load it out. 8856 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 8857 } 8858 8859 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 8860 SelectionDAG &DAG) const { 8861 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 8862 "Should only be called for ISD::INSERT_VECTOR_ELT"); 8863 8864 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 8865 // We have legal lowering for constant indices but not for variable ones. 8866 if (!C) 8867 return SDValue(); 8868 8869 EVT VT = Op.getValueType(); 8870 SDLoc dl(Op); 8871 SDValue V1 = Op.getOperand(0); 8872 SDValue V2 = Op.getOperand(1); 8873 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 8874 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 8875 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 8876 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 8877 unsigned InsertAtElement = C->getZExtValue(); 8878 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 8879 if (Subtarget.isLittleEndian()) { 8880 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 8881 } 8882 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 8883 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 8884 } 8885 return Op; 8886 } 8887 8888 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 8889 SelectionDAG &DAG) const { 8890 SDLoc dl(Op); 8891 SDNode *N = Op.getNode(); 8892 8893 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 8894 "Unknown extract_vector_elt type"); 8895 8896 SDValue Value = N->getOperand(0); 8897 8898 // The first part of this is like the store lowering except that we don't 8899 // need to track the chain. 8900 8901 // The values are now known to be -1 (false) or 1 (true). To convert this 8902 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8903 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8904 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8905 8906 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8907 // understand how to form the extending load. 8908 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8909 8910 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8911 8912 // Now convert to an integer and store. 8913 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8914 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8915 Value); 8916 8917 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8918 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8919 MachinePointerInfo PtrInfo = 8920 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8921 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8922 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8923 8924 SDValue StoreChain = DAG.getEntryNode(); 8925 SDValue Ops[] = {StoreChain, 8926 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8927 Value, FIdx}; 8928 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8929 8930 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8931 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8932 8933 // Extract the value requested. 8934 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 8935 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8936 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8937 8938 SDValue IntVal = 8939 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 8940 8941 if (!Subtarget.useCRBits()) 8942 return IntVal; 8943 8944 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 8945 } 8946 8947 /// Lowering for QPX v4i1 loads 8948 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 8949 SelectionDAG &DAG) const { 8950 SDLoc dl(Op); 8951 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 8952 SDValue LoadChain = LN->getChain(); 8953 SDValue BasePtr = LN->getBasePtr(); 8954 8955 if (Op.getValueType() == MVT::v4f64 || 8956 Op.getValueType() == MVT::v4f32) { 8957 EVT MemVT = LN->getMemoryVT(); 8958 unsigned Alignment = LN->getAlignment(); 8959 8960 // If this load is properly aligned, then it is legal. 8961 if (Alignment >= MemVT.getStoreSize()) 8962 return Op; 8963 8964 EVT ScalarVT = Op.getValueType().getScalarType(), 8965 ScalarMemVT = MemVT.getScalarType(); 8966 unsigned Stride = ScalarMemVT.getStoreSize(); 8967 8968 SDValue Vals[4], LoadChains[4]; 8969 for (unsigned Idx = 0; Idx < 4; ++Idx) { 8970 SDValue Load; 8971 if (ScalarVT != ScalarMemVT) 8972 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 8973 BasePtr, 8974 LN->getPointerInfo().getWithOffset(Idx * Stride), 8975 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 8976 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8977 else 8978 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 8979 LN->getPointerInfo().getWithOffset(Idx * Stride), 8980 MinAlign(Alignment, Idx * Stride), 8981 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8982 8983 if (Idx == 0 && LN->isIndexed()) { 8984 assert(LN->getAddressingMode() == ISD::PRE_INC && 8985 "Unknown addressing mode on vector load"); 8986 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 8987 LN->getAddressingMode()); 8988 } 8989 8990 Vals[Idx] = Load; 8991 LoadChains[Idx] = Load.getValue(1); 8992 8993 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8994 DAG.getConstant(Stride, dl, 8995 BasePtr.getValueType())); 8996 } 8997 8998 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8999 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 9000 9001 if (LN->isIndexed()) { 9002 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 9003 return DAG.getMergeValues(RetOps, dl); 9004 } 9005 9006 SDValue RetOps[] = { Value, TF }; 9007 return DAG.getMergeValues(RetOps, dl); 9008 } 9009 9010 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 9011 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 9012 9013 // To lower v4i1 from a byte array, we load the byte elements of the 9014 // vector and then reuse the BUILD_VECTOR logic. 9015 9016 SDValue VectElmts[4], VectElmtChains[4]; 9017 for (unsigned i = 0; i < 4; ++i) { 9018 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9019 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9020 9021 VectElmts[i] = DAG.getExtLoad( 9022 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 9023 LN->getPointerInfo().getWithOffset(i), MVT::i8, 9024 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 9025 VectElmtChains[i] = VectElmts[i].getValue(1); 9026 } 9027 9028 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 9029 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 9030 9031 SDValue RVals[] = { Value, LoadChain }; 9032 return DAG.getMergeValues(RVals, dl); 9033 } 9034 9035 /// Lowering for QPX v4i1 stores 9036 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 9037 SelectionDAG &DAG) const { 9038 SDLoc dl(Op); 9039 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 9040 SDValue StoreChain = SN->getChain(); 9041 SDValue BasePtr = SN->getBasePtr(); 9042 SDValue Value = SN->getValue(); 9043 9044 if (Value.getValueType() == MVT::v4f64 || 9045 Value.getValueType() == MVT::v4f32) { 9046 EVT MemVT = SN->getMemoryVT(); 9047 unsigned Alignment = SN->getAlignment(); 9048 9049 // If this store is properly aligned, then it is legal. 9050 if (Alignment >= MemVT.getStoreSize()) 9051 return Op; 9052 9053 EVT ScalarVT = Value.getValueType().getScalarType(), 9054 ScalarMemVT = MemVT.getScalarType(); 9055 unsigned Stride = ScalarMemVT.getStoreSize(); 9056 9057 SDValue Stores[4]; 9058 for (unsigned Idx = 0; Idx < 4; ++Idx) { 9059 SDValue Ex = DAG.getNode( 9060 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 9061 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 9062 SDValue Store; 9063 if (ScalarVT != ScalarMemVT) 9064 Store = 9065 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 9066 SN->getPointerInfo().getWithOffset(Idx * Stride), 9067 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 9068 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9069 else 9070 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 9071 SN->getPointerInfo().getWithOffset(Idx * Stride), 9072 MinAlign(Alignment, Idx * Stride), 9073 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 9074 9075 if (Idx == 0 && SN->isIndexed()) { 9076 assert(SN->getAddressingMode() == ISD::PRE_INC && 9077 "Unknown addressing mode on vector store"); 9078 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 9079 SN->getAddressingMode()); 9080 } 9081 9082 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 9083 DAG.getConstant(Stride, dl, 9084 BasePtr.getValueType())); 9085 Stores[Idx] = Store; 9086 } 9087 9088 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9089 9090 if (SN->isIndexed()) { 9091 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 9092 return DAG.getMergeValues(RetOps, dl); 9093 } 9094 9095 return TF; 9096 } 9097 9098 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 9099 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 9100 9101 // The values are now known to be -1 (false) or 1 (true). To convert this 9102 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 9103 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 9104 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 9105 9106 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 9107 // understand how to form the extending load. 9108 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 9109 9110 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 9111 9112 // Now convert to an integer and store. 9113 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 9114 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 9115 Value); 9116 9117 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9118 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9119 MachinePointerInfo PtrInfo = 9120 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 9121 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9122 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9123 9124 SDValue Ops[] = {StoreChain, 9125 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 9126 Value, FIdx}; 9127 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 9128 9129 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 9130 dl, VTs, Ops, MVT::v4i32, PtrInfo); 9131 9132 // Move data into the byte array. 9133 SDValue Loads[4], LoadChains[4]; 9134 for (unsigned i = 0; i < 4; ++i) { 9135 unsigned Offset = 4*i; 9136 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 9137 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 9138 9139 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 9140 PtrInfo.getWithOffset(Offset)); 9141 LoadChains[i] = Loads[i].getValue(1); 9142 } 9143 9144 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 9145 9146 SDValue Stores[4]; 9147 for (unsigned i = 0; i < 4; ++i) { 9148 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 9149 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 9150 9151 Stores[i] = DAG.getTruncStore( 9152 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 9153 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 9154 SN->getAAInfo()); 9155 } 9156 9157 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9158 9159 return StoreChain; 9160 } 9161 9162 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 9163 SDLoc dl(Op); 9164 if (Op.getValueType() == MVT::v4i32) { 9165 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9166 9167 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 9168 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 9169 9170 SDValue RHSSwap = // = vrlw RHS, 16 9171 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 9172 9173 // Shrinkify inputs to v8i16. 9174 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 9175 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 9176 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 9177 9178 // Low parts multiplied together, generating 32-bit results (we ignore the 9179 // top parts). 9180 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 9181 LHS, RHS, DAG, dl, MVT::v4i32); 9182 9183 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 9184 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 9185 // Shift the high parts up 16 bits. 9186 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 9187 Neg16, DAG, dl); 9188 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 9189 } else if (Op.getValueType() == MVT::v8i16) { 9190 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9191 9192 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 9193 9194 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 9195 LHS, RHS, Zero, DAG, dl); 9196 } else if (Op.getValueType() == MVT::v16i8) { 9197 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 9198 bool isLittleEndian = Subtarget.isLittleEndian(); 9199 9200 // Multiply the even 8-bit parts, producing 16-bit sums. 9201 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 9202 LHS, RHS, DAG, dl, MVT::v8i16); 9203 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 9204 9205 // Multiply the odd 8-bit parts, producing 16-bit sums. 9206 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 9207 LHS, RHS, DAG, dl, MVT::v8i16); 9208 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 9209 9210 // Merge the results together. Because vmuleub and vmuloub are 9211 // instructions with a big-endian bias, we must reverse the 9212 // element numbering and reverse the meaning of "odd" and "even" 9213 // when generating little endian code. 9214 int Ops[16]; 9215 for (unsigned i = 0; i != 8; ++i) { 9216 if (isLittleEndian) { 9217 Ops[i*2 ] = 2*i; 9218 Ops[i*2+1] = 2*i+16; 9219 } else { 9220 Ops[i*2 ] = 2*i+1; 9221 Ops[i*2+1] = 2*i+1+16; 9222 } 9223 } 9224 if (isLittleEndian) 9225 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 9226 else 9227 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 9228 } else { 9229 llvm_unreachable("Unknown mul to lower!"); 9230 } 9231 } 9232 9233 /// LowerOperation - Provide custom lowering hooks for some operations. 9234 /// 9235 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9236 switch (Op.getOpcode()) { 9237 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 9238 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 9239 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 9240 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 9241 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 9242 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 9243 case ISD::SETCC: return LowerSETCC(Op, DAG); 9244 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 9245 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 9246 case ISD::VASTART: 9247 return LowerVASTART(Op, DAG); 9248 9249 case ISD::VAARG: 9250 return LowerVAARG(Op, DAG); 9251 9252 case ISD::VACOPY: 9253 return LowerVACOPY(Op, DAG); 9254 9255 case ISD::STACKRESTORE: 9256 return LowerSTACKRESTORE(Op, DAG); 9257 9258 case ISD::DYNAMIC_STACKALLOC: 9259 return LowerDYNAMIC_STACKALLOC(Op, DAG); 9260 9261 case ISD::GET_DYNAMIC_AREA_OFFSET: 9262 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 9263 9264 case ISD::EH_DWARF_CFA: 9265 return LowerEH_DWARF_CFA(Op, DAG); 9266 9267 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 9268 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 9269 9270 case ISD::LOAD: return LowerLOAD(Op, DAG); 9271 case ISD::STORE: return LowerSTORE(Op, DAG); 9272 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 9273 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 9274 case ISD::FP_TO_UINT: 9275 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 9276 SDLoc(Op)); 9277 case ISD::UINT_TO_FP: 9278 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 9279 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9280 9281 // Lower 64-bit shifts. 9282 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 9283 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 9284 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 9285 9286 // Vector-related lowering. 9287 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 9288 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 9289 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 9290 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 9291 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 9292 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 9293 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9294 case ISD::MUL: return LowerMUL(Op, DAG); 9295 9296 // For counter-based loop handling. 9297 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 9298 9299 // Frame & Return address. 9300 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9301 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9302 9303 case ISD::INTRINSIC_VOID: 9304 return LowerINTRINSIC_VOID(Op, DAG); 9305 case ISD::SREM: 9306 case ISD::UREM: 9307 return LowerREM(Op, DAG); 9308 case ISD::BSWAP: 9309 return LowerBSWAP(Op, DAG); 9310 } 9311 } 9312 9313 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 9314 SmallVectorImpl<SDValue>&Results, 9315 SelectionDAG &DAG) const { 9316 SDLoc dl(N); 9317 switch (N->getOpcode()) { 9318 default: 9319 llvm_unreachable("Do not know how to custom type legalize this operation!"); 9320 case ISD::READCYCLECOUNTER: { 9321 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 9322 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 9323 9324 Results.push_back(RTB); 9325 Results.push_back(RTB.getValue(1)); 9326 Results.push_back(RTB.getValue(2)); 9327 break; 9328 } 9329 case ISD::INTRINSIC_W_CHAIN: { 9330 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 9331 Intrinsic::ppc_is_decremented_ctr_nonzero) 9332 break; 9333 9334 assert(N->getValueType(0) == MVT::i1 && 9335 "Unexpected result type for CTR decrement intrinsic"); 9336 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 9337 N->getValueType(0)); 9338 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 9339 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 9340 N->getOperand(1)); 9341 9342 Results.push_back(NewInt); 9343 Results.push_back(NewInt.getValue(1)); 9344 break; 9345 } 9346 case ISD::VAARG: { 9347 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 9348 return; 9349 9350 EVT VT = N->getValueType(0); 9351 9352 if (VT == MVT::i64) { 9353 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 9354 9355 Results.push_back(NewNode); 9356 Results.push_back(NewNode.getValue(1)); 9357 } 9358 return; 9359 } 9360 case ISD::FP_ROUND_INREG: { 9361 assert(N->getValueType(0) == MVT::ppcf128); 9362 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 9363 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 9364 MVT::f64, N->getOperand(0), 9365 DAG.getIntPtrConstant(0, dl)); 9366 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 9367 MVT::f64, N->getOperand(0), 9368 DAG.getIntPtrConstant(1, dl)); 9369 9370 // Add the two halves of the long double in round-to-zero mode. 9371 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 9372 9373 // We know the low half is about to be thrown away, so just use something 9374 // convenient. 9375 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 9376 FPreg, FPreg)); 9377 return; 9378 } 9379 case ISD::FP_TO_SINT: 9380 case ISD::FP_TO_UINT: 9381 // LowerFP_TO_INT() can only handle f32 and f64. 9382 if (N->getOperand(0).getValueType() == MVT::ppcf128) 9383 return; 9384 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 9385 return; 9386 } 9387 } 9388 9389 //===----------------------------------------------------------------------===// 9390 // Other Lowering Code 9391 //===----------------------------------------------------------------------===// 9392 9393 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 9394 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 9395 Function *Func = Intrinsic::getDeclaration(M, Id); 9396 return Builder.CreateCall(Func, {}); 9397 } 9398 9399 // The mappings for emitLeading/TrailingFence is taken from 9400 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 9401 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 9402 Instruction *Inst, 9403 AtomicOrdering Ord) const { 9404 if (Ord == AtomicOrdering::SequentiallyConsistent) 9405 return callIntrinsic(Builder, Intrinsic::ppc_sync); 9406 if (isReleaseOrStronger(Ord)) 9407 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9408 return nullptr; 9409 } 9410 9411 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 9412 Instruction *Inst, 9413 AtomicOrdering Ord) const { 9414 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 9415 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 9416 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 9417 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 9418 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 9419 return Builder.CreateCall( 9420 Intrinsic::getDeclaration( 9421 Builder.GetInsertBlock()->getParent()->getParent(), 9422 Intrinsic::ppc_cfence, {Inst->getType()}), 9423 {Inst}); 9424 // FIXME: Can use isync for rmw operation. 9425 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 9426 } 9427 return nullptr; 9428 } 9429 9430 MachineBasicBlock * 9431 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 9432 unsigned AtomicSize, 9433 unsigned BinOpcode, 9434 unsigned CmpOpcode, 9435 unsigned CmpPred) const { 9436 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9437 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9438 9439 auto LoadMnemonic = PPC::LDARX; 9440 auto StoreMnemonic = PPC::STDCX; 9441 switch (AtomicSize) { 9442 default: 9443 llvm_unreachable("Unexpected size of atomic entity"); 9444 case 1: 9445 LoadMnemonic = PPC::LBARX; 9446 StoreMnemonic = PPC::STBCX; 9447 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9448 break; 9449 case 2: 9450 LoadMnemonic = PPC::LHARX; 9451 StoreMnemonic = PPC::STHCX; 9452 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 9453 break; 9454 case 4: 9455 LoadMnemonic = PPC::LWARX; 9456 StoreMnemonic = PPC::STWCX; 9457 break; 9458 case 8: 9459 LoadMnemonic = PPC::LDARX; 9460 StoreMnemonic = PPC::STDCX; 9461 break; 9462 } 9463 9464 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9465 MachineFunction *F = BB->getParent(); 9466 MachineFunction::iterator It = ++BB->getIterator(); 9467 9468 unsigned dest = MI.getOperand(0).getReg(); 9469 unsigned ptrA = MI.getOperand(1).getReg(); 9470 unsigned ptrB = MI.getOperand(2).getReg(); 9471 unsigned incr = MI.getOperand(3).getReg(); 9472 DebugLoc dl = MI.getDebugLoc(); 9473 9474 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9475 MachineBasicBlock *loop2MBB = 9476 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9477 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9478 F->insert(It, loopMBB); 9479 if (CmpOpcode) 9480 F->insert(It, loop2MBB); 9481 F->insert(It, exitMBB); 9482 exitMBB->splice(exitMBB->begin(), BB, 9483 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9484 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9485 9486 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9487 unsigned TmpReg = (!BinOpcode) ? incr : 9488 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 9489 : &PPC::GPRCRegClass); 9490 9491 // thisMBB: 9492 // ... 9493 // fallthrough --> loopMBB 9494 BB->addSuccessor(loopMBB); 9495 9496 // loopMBB: 9497 // l[wd]arx dest, ptr 9498 // add r0, dest, incr 9499 // st[wd]cx. r0, ptr 9500 // bne- loopMBB 9501 // fallthrough --> exitMBB 9502 9503 // For max/min... 9504 // loopMBB: 9505 // l[wd]arx dest, ptr 9506 // cmpl?[wd] incr, dest 9507 // bgt exitMBB 9508 // loop2MBB: 9509 // st[wd]cx. dest, ptr 9510 // bne- loopMBB 9511 // fallthrough --> exitMBB 9512 9513 BB = loopMBB; 9514 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9515 .addReg(ptrA).addReg(ptrB); 9516 if (BinOpcode) 9517 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 9518 if (CmpOpcode) { 9519 // Signed comparisons of byte or halfword values must be sign-extended. 9520 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 9521 unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9522 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 9523 ExtReg).addReg(dest); 9524 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9525 .addReg(incr).addReg(ExtReg); 9526 } else 9527 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9528 .addReg(incr).addReg(dest); 9529 9530 BuildMI(BB, dl, TII->get(PPC::BCC)) 9531 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9532 BB->addSuccessor(loop2MBB); 9533 BB->addSuccessor(exitMBB); 9534 BB = loop2MBB; 9535 } 9536 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9537 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 9538 BuildMI(BB, dl, TII->get(PPC::BCC)) 9539 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9540 BB->addSuccessor(loopMBB); 9541 BB->addSuccessor(exitMBB); 9542 9543 // exitMBB: 9544 // ... 9545 BB = exitMBB; 9546 return BB; 9547 } 9548 9549 MachineBasicBlock * 9550 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 9551 MachineBasicBlock *BB, 9552 bool is8bit, // operation 9553 unsigned BinOpcode, 9554 unsigned CmpOpcode, 9555 unsigned CmpPred) const { 9556 // If we support part-word atomic mnemonics, just use them 9557 if (Subtarget.hasPartwordAtomics()) 9558 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, 9559 CmpOpcode, CmpPred); 9560 9561 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9562 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9563 // In 64 bit mode we have to use 64 bits for addresses, even though the 9564 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 9565 // registers without caring whether they're 32 or 64, but here we're 9566 // doing actual arithmetic on the addresses. 9567 bool is64bit = Subtarget.isPPC64(); 9568 bool isLittleEndian = Subtarget.isLittleEndian(); 9569 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9570 9571 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9572 MachineFunction *F = BB->getParent(); 9573 MachineFunction::iterator It = ++BB->getIterator(); 9574 9575 unsigned dest = MI.getOperand(0).getReg(); 9576 unsigned ptrA = MI.getOperand(1).getReg(); 9577 unsigned ptrB = MI.getOperand(2).getReg(); 9578 unsigned incr = MI.getOperand(3).getReg(); 9579 DebugLoc dl = MI.getDebugLoc(); 9580 9581 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9582 MachineBasicBlock *loop2MBB = 9583 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9584 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9585 F->insert(It, loopMBB); 9586 if (CmpOpcode) 9587 F->insert(It, loop2MBB); 9588 F->insert(It, exitMBB); 9589 exitMBB->splice(exitMBB->begin(), BB, 9590 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9591 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9592 9593 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9594 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9595 : &PPC::GPRCRegClass; 9596 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9597 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9598 unsigned ShiftReg = 9599 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9600 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 9601 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9602 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9603 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9604 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9605 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 9606 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9607 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9608 unsigned Ptr1Reg; 9609 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 9610 9611 // thisMBB: 9612 // ... 9613 // fallthrough --> loopMBB 9614 BB->addSuccessor(loopMBB); 9615 9616 // The 4-byte load must be aligned, while a char or short may be 9617 // anywhere in the word. Hence all this nasty bookkeeping code. 9618 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9619 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9620 // xori shift, shift1, 24 [16] 9621 // rlwinm ptr, ptr1, 0, 0, 29 9622 // slw incr2, incr, shift 9623 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9624 // slw mask, mask2, shift 9625 // loopMBB: 9626 // lwarx tmpDest, ptr 9627 // add tmp, tmpDest, incr2 9628 // andc tmp2, tmpDest, mask 9629 // and tmp3, tmp, mask 9630 // or tmp4, tmp3, tmp2 9631 // stwcx. tmp4, ptr 9632 // bne- loopMBB 9633 // fallthrough --> exitMBB 9634 // srw dest, tmpDest, shift 9635 if (ptrA != ZeroReg) { 9636 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9637 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9638 .addReg(ptrA).addReg(ptrB); 9639 } else { 9640 Ptr1Reg = ptrB; 9641 } 9642 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9643 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9644 if (!isLittleEndian) 9645 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9646 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9647 if (is64bit) 9648 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9649 .addReg(Ptr1Reg).addImm(0).addImm(61); 9650 else 9651 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9652 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9653 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 9654 .addReg(incr).addReg(ShiftReg); 9655 if (is8bit) 9656 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9657 else { 9658 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9659 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 9660 } 9661 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9662 .addReg(Mask2Reg).addReg(ShiftReg); 9663 9664 BB = loopMBB; 9665 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9666 .addReg(ZeroReg).addReg(PtrReg); 9667 if (BinOpcode) 9668 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 9669 .addReg(Incr2Reg).addReg(TmpDestReg); 9670 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 9671 .addReg(TmpDestReg).addReg(MaskReg); 9672 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 9673 .addReg(TmpReg).addReg(MaskReg); 9674 if (CmpOpcode) { 9675 // For unsigned comparisons, we can directly compare the shifted values. 9676 // For signed comparisons we shift and sign extend. 9677 unsigned SReg = RegInfo.createVirtualRegister(RC); 9678 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg) 9679 .addReg(TmpDestReg).addReg(MaskReg); 9680 unsigned ValueReg = SReg; 9681 unsigned CmpReg = Incr2Reg; 9682 if (CmpOpcode == PPC::CMPW) { 9683 ValueReg = RegInfo.createVirtualRegister(RC); 9684 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 9685 .addReg(SReg).addReg(ShiftReg); 9686 unsigned ValueSReg = RegInfo.createVirtualRegister(RC); 9687 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 9688 .addReg(ValueReg); 9689 ValueReg = ValueSReg; 9690 CmpReg = incr; 9691 } 9692 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9693 .addReg(CmpReg).addReg(ValueReg); 9694 BuildMI(BB, dl, TII->get(PPC::BCC)) 9695 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9696 BB->addSuccessor(loop2MBB); 9697 BB->addSuccessor(exitMBB); 9698 BB = loop2MBB; 9699 } 9700 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 9701 .addReg(Tmp3Reg).addReg(Tmp2Reg); 9702 BuildMI(BB, dl, TII->get(PPC::STWCX)) 9703 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 9704 BuildMI(BB, dl, TII->get(PPC::BCC)) 9705 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9706 BB->addSuccessor(loopMBB); 9707 BB->addSuccessor(exitMBB); 9708 9709 // exitMBB: 9710 // ... 9711 BB = exitMBB; 9712 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 9713 .addReg(ShiftReg); 9714 return BB; 9715 } 9716 9717 llvm::MachineBasicBlock * 9718 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 9719 MachineBasicBlock *MBB) const { 9720 DebugLoc DL = MI.getDebugLoc(); 9721 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9722 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 9723 9724 MachineFunction *MF = MBB->getParent(); 9725 MachineRegisterInfo &MRI = MF->getRegInfo(); 9726 9727 const BasicBlock *BB = MBB->getBasicBlock(); 9728 MachineFunction::iterator I = ++MBB->getIterator(); 9729 9730 // Memory Reference 9731 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9732 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9733 9734 unsigned DstReg = MI.getOperand(0).getReg(); 9735 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 9736 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 9737 unsigned mainDstReg = MRI.createVirtualRegister(RC); 9738 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 9739 9740 MVT PVT = getPointerTy(MF->getDataLayout()); 9741 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9742 "Invalid Pointer Size!"); 9743 // For v = setjmp(buf), we generate 9744 // 9745 // thisMBB: 9746 // SjLjSetup mainMBB 9747 // bl mainMBB 9748 // v_restore = 1 9749 // b sinkMBB 9750 // 9751 // mainMBB: 9752 // buf[LabelOffset] = LR 9753 // v_main = 0 9754 // 9755 // sinkMBB: 9756 // v = phi(main, restore) 9757 // 9758 9759 MachineBasicBlock *thisMBB = MBB; 9760 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 9761 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 9762 MF->insert(I, mainMBB); 9763 MF->insert(I, sinkMBB); 9764 9765 MachineInstrBuilder MIB; 9766 9767 // Transfer the remainder of BB and its successor edges to sinkMBB. 9768 sinkMBB->splice(sinkMBB->begin(), MBB, 9769 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 9770 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 9771 9772 // Note that the structure of the jmp_buf used here is not compatible 9773 // with that used by libc, and is not designed to be. Specifically, it 9774 // stores only those 'reserved' registers that LLVM does not otherwise 9775 // understand how to spill. Also, by convention, by the time this 9776 // intrinsic is called, Clang has already stored the frame address in the 9777 // first slot of the buffer and stack address in the third. Following the 9778 // X86 target code, we'll store the jump address in the second slot. We also 9779 // need to save the TOC pointer (R2) to handle jumps between shared 9780 // libraries, and that will be stored in the fourth slot. The thread 9781 // identifier (R13) is not affected. 9782 9783 // thisMBB: 9784 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9785 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9786 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9787 9788 // Prepare IP either in reg. 9789 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 9790 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 9791 unsigned BufReg = MI.getOperand(1).getReg(); 9792 9793 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 9794 setUsesTOCBasePtr(*MBB->getParent()); 9795 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 9796 .addReg(PPC::X2) 9797 .addImm(TOCOffset) 9798 .addReg(BufReg); 9799 MIB.setMemRefs(MMOBegin, MMOEnd); 9800 } 9801 9802 // Naked functions never have a base pointer, and so we use r1. For all 9803 // other functions, this decision must be delayed until during PEI. 9804 unsigned BaseReg; 9805 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 9806 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 9807 else 9808 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 9809 9810 MIB = BuildMI(*thisMBB, MI, DL, 9811 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 9812 .addReg(BaseReg) 9813 .addImm(BPOffset) 9814 .addReg(BufReg); 9815 MIB.setMemRefs(MMOBegin, MMOEnd); 9816 9817 // Setup 9818 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 9819 MIB.addRegMask(TRI->getNoPreservedMask()); 9820 9821 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 9822 9823 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 9824 .addMBB(mainMBB); 9825 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 9826 9827 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 9828 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 9829 9830 // mainMBB: 9831 // mainDstReg = 0 9832 MIB = 9833 BuildMI(mainMBB, DL, 9834 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 9835 9836 // Store IP 9837 if (Subtarget.isPPC64()) { 9838 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 9839 .addReg(LabelReg) 9840 .addImm(LabelOffset) 9841 .addReg(BufReg); 9842 } else { 9843 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 9844 .addReg(LabelReg) 9845 .addImm(LabelOffset) 9846 .addReg(BufReg); 9847 } 9848 9849 MIB.setMemRefs(MMOBegin, MMOEnd); 9850 9851 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 9852 mainMBB->addSuccessor(sinkMBB); 9853 9854 // sinkMBB: 9855 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 9856 TII->get(PPC::PHI), DstReg) 9857 .addReg(mainDstReg).addMBB(mainMBB) 9858 .addReg(restoreDstReg).addMBB(thisMBB); 9859 9860 MI.eraseFromParent(); 9861 return sinkMBB; 9862 } 9863 9864 MachineBasicBlock * 9865 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 9866 MachineBasicBlock *MBB) const { 9867 DebugLoc DL = MI.getDebugLoc(); 9868 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9869 9870 MachineFunction *MF = MBB->getParent(); 9871 MachineRegisterInfo &MRI = MF->getRegInfo(); 9872 9873 // Memory Reference 9874 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9875 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9876 9877 MVT PVT = getPointerTy(MF->getDataLayout()); 9878 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9879 "Invalid Pointer Size!"); 9880 9881 const TargetRegisterClass *RC = 9882 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 9883 unsigned Tmp = MRI.createVirtualRegister(RC); 9884 // Since FP is only updated here but NOT referenced, it's treated as GPR. 9885 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 9886 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 9887 unsigned BP = 9888 (PVT == MVT::i64) 9889 ? PPC::X30 9890 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 9891 : PPC::R30); 9892 9893 MachineInstrBuilder MIB; 9894 9895 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9896 const int64_t SPOffset = 2 * PVT.getStoreSize(); 9897 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9898 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9899 9900 unsigned BufReg = MI.getOperand(0).getReg(); 9901 9902 // Reload FP (the jumped-to function may not have had a 9903 // frame pointer, and if so, then its r31 will be restored 9904 // as necessary). 9905 if (PVT == MVT::i64) { 9906 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 9907 .addImm(0) 9908 .addReg(BufReg); 9909 } else { 9910 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 9911 .addImm(0) 9912 .addReg(BufReg); 9913 } 9914 MIB.setMemRefs(MMOBegin, MMOEnd); 9915 9916 // Reload IP 9917 if (PVT == MVT::i64) { 9918 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 9919 .addImm(LabelOffset) 9920 .addReg(BufReg); 9921 } else { 9922 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 9923 .addImm(LabelOffset) 9924 .addReg(BufReg); 9925 } 9926 MIB.setMemRefs(MMOBegin, MMOEnd); 9927 9928 // Reload SP 9929 if (PVT == MVT::i64) { 9930 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 9931 .addImm(SPOffset) 9932 .addReg(BufReg); 9933 } else { 9934 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 9935 .addImm(SPOffset) 9936 .addReg(BufReg); 9937 } 9938 MIB.setMemRefs(MMOBegin, MMOEnd); 9939 9940 // Reload BP 9941 if (PVT == MVT::i64) { 9942 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 9943 .addImm(BPOffset) 9944 .addReg(BufReg); 9945 } else { 9946 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 9947 .addImm(BPOffset) 9948 .addReg(BufReg); 9949 } 9950 MIB.setMemRefs(MMOBegin, MMOEnd); 9951 9952 // Reload TOC 9953 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 9954 setUsesTOCBasePtr(*MBB->getParent()); 9955 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 9956 .addImm(TOCOffset) 9957 .addReg(BufReg); 9958 9959 MIB.setMemRefs(MMOBegin, MMOEnd); 9960 } 9961 9962 // Jump 9963 BuildMI(*MBB, MI, DL, 9964 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 9965 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 9966 9967 MI.eraseFromParent(); 9968 return MBB; 9969 } 9970 9971 MachineBasicBlock * 9972 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 9973 MachineBasicBlock *BB) const { 9974 if (MI.getOpcode() == TargetOpcode::STACKMAP || 9975 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 9976 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 9977 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 9978 // Call lowering should have added an r2 operand to indicate a dependence 9979 // on the TOC base pointer value. It can't however, because there is no 9980 // way to mark the dependence as implicit there, and so the stackmap code 9981 // will confuse it with a regular operand. Instead, add the dependence 9982 // here. 9983 setUsesTOCBasePtr(*BB->getParent()); 9984 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 9985 } 9986 9987 return emitPatchPoint(MI, BB); 9988 } 9989 9990 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 9991 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 9992 return emitEHSjLjSetJmp(MI, BB); 9993 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 9994 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 9995 return emitEHSjLjLongJmp(MI, BB); 9996 } 9997 9998 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9999 10000 // To "insert" these instructions we actually have to insert their 10001 // control-flow patterns. 10002 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10003 MachineFunction::iterator It = ++BB->getIterator(); 10004 10005 MachineFunction *F = BB->getParent(); 10006 10007 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10008 MI.getOpcode() == PPC::SELECT_CC_I8 || 10009 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) { 10010 SmallVector<MachineOperand, 2> Cond; 10011 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10012 MI.getOpcode() == PPC::SELECT_CC_I8) 10013 Cond.push_back(MI.getOperand(4)); 10014 else 10015 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 10016 Cond.push_back(MI.getOperand(1)); 10017 10018 DebugLoc dl = MI.getDebugLoc(); 10019 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 10020 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 10021 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 10022 MI.getOpcode() == PPC::SELECT_CC_I8 || 10023 MI.getOpcode() == PPC::SELECT_CC_F4 || 10024 MI.getOpcode() == PPC::SELECT_CC_F8 || 10025 MI.getOpcode() == PPC::SELECT_CC_QFRC || 10026 MI.getOpcode() == PPC::SELECT_CC_QSRC || 10027 MI.getOpcode() == PPC::SELECT_CC_QBRC || 10028 MI.getOpcode() == PPC::SELECT_CC_VRRC || 10029 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 10030 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 10031 MI.getOpcode() == PPC::SELECT_CC_VSRC || 10032 MI.getOpcode() == PPC::SELECT_I4 || 10033 MI.getOpcode() == PPC::SELECT_I8 || 10034 MI.getOpcode() == PPC::SELECT_F4 || 10035 MI.getOpcode() == PPC::SELECT_F8 || 10036 MI.getOpcode() == PPC::SELECT_QFRC || 10037 MI.getOpcode() == PPC::SELECT_QSRC || 10038 MI.getOpcode() == PPC::SELECT_QBRC || 10039 MI.getOpcode() == PPC::SELECT_VRRC || 10040 MI.getOpcode() == PPC::SELECT_VSFRC || 10041 MI.getOpcode() == PPC::SELECT_VSSRC || 10042 MI.getOpcode() == PPC::SELECT_VSRC) { 10043 // The incoming instruction knows the destination vreg to set, the 10044 // condition code register to branch on, the true/false values to 10045 // select between, and a branch opcode to use. 10046 10047 // thisMBB: 10048 // ... 10049 // TrueVal = ... 10050 // cmpTY ccX, r1, r2 10051 // bCC copy1MBB 10052 // fallthrough --> copy0MBB 10053 MachineBasicBlock *thisMBB = BB; 10054 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 10055 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10056 DebugLoc dl = MI.getDebugLoc(); 10057 F->insert(It, copy0MBB); 10058 F->insert(It, sinkMBB); 10059 10060 // Transfer the remainder of BB and its successor edges to sinkMBB. 10061 sinkMBB->splice(sinkMBB->begin(), BB, 10062 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10063 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10064 10065 // Next, add the true and fallthrough blocks as its successors. 10066 BB->addSuccessor(copy0MBB); 10067 BB->addSuccessor(sinkMBB); 10068 10069 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 10070 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 10071 MI.getOpcode() == PPC::SELECT_QFRC || 10072 MI.getOpcode() == PPC::SELECT_QSRC || 10073 MI.getOpcode() == PPC::SELECT_QBRC || 10074 MI.getOpcode() == PPC::SELECT_VRRC || 10075 MI.getOpcode() == PPC::SELECT_VSFRC || 10076 MI.getOpcode() == PPC::SELECT_VSSRC || 10077 MI.getOpcode() == PPC::SELECT_VSRC) { 10078 BuildMI(BB, dl, TII->get(PPC::BC)) 10079 .addReg(MI.getOperand(1).getReg()) 10080 .addMBB(sinkMBB); 10081 } else { 10082 unsigned SelectPred = MI.getOperand(4).getImm(); 10083 BuildMI(BB, dl, TII->get(PPC::BCC)) 10084 .addImm(SelectPred) 10085 .addReg(MI.getOperand(1).getReg()) 10086 .addMBB(sinkMBB); 10087 } 10088 10089 // copy0MBB: 10090 // %FalseValue = ... 10091 // # fallthrough to sinkMBB 10092 BB = copy0MBB; 10093 10094 // Update machine-CFG edges 10095 BB->addSuccessor(sinkMBB); 10096 10097 // sinkMBB: 10098 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 10099 // ... 10100 BB = sinkMBB; 10101 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 10102 .addReg(MI.getOperand(3).getReg()) 10103 .addMBB(copy0MBB) 10104 .addReg(MI.getOperand(2).getReg()) 10105 .addMBB(thisMBB); 10106 } else if (MI.getOpcode() == PPC::ReadTB) { 10107 // To read the 64-bit time-base register on a 32-bit target, we read the 10108 // two halves. Should the counter have wrapped while it was being read, we 10109 // need to try again. 10110 // ... 10111 // readLoop: 10112 // mfspr Rx,TBU # load from TBU 10113 // mfspr Ry,TB # load from TB 10114 // mfspr Rz,TBU # load from TBU 10115 // cmpw crX,Rx,Rz # check if 'old'='new' 10116 // bne readLoop # branch if they're not equal 10117 // ... 10118 10119 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 10120 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10121 DebugLoc dl = MI.getDebugLoc(); 10122 F->insert(It, readMBB); 10123 F->insert(It, sinkMBB); 10124 10125 // Transfer the remainder of BB and its successor edges to sinkMBB. 10126 sinkMBB->splice(sinkMBB->begin(), BB, 10127 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10128 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10129 10130 BB->addSuccessor(readMBB); 10131 BB = readMBB; 10132 10133 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10134 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10135 unsigned LoReg = MI.getOperand(0).getReg(); 10136 unsigned HiReg = MI.getOperand(1).getReg(); 10137 10138 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 10139 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 10140 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 10141 10142 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10143 10144 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 10145 .addReg(HiReg).addReg(ReadAgainReg); 10146 BuildMI(BB, dl, TII->get(PPC::BCC)) 10147 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 10148 10149 BB->addSuccessor(readMBB); 10150 BB->addSuccessor(sinkMBB); 10151 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 10152 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 10153 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 10154 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 10155 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 10156 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 10157 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 10158 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 10159 10160 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 10161 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 10162 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 10163 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 10164 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 10165 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 10166 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 10167 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 10168 10169 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 10170 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 10171 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 10172 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 10173 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 10174 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 10175 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 10176 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 10177 10178 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 10179 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 10180 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 10181 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 10182 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 10183 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 10184 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 10185 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 10186 10187 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 10188 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 10189 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 10190 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 10191 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 10192 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 10193 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 10194 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 10195 10196 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 10197 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 10198 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 10199 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 10200 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 10201 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 10202 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 10203 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 10204 10205 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 10206 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 10207 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 10208 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 10209 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 10210 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 10211 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 10212 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 10213 10214 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 10215 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 10216 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 10217 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 10218 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 10219 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 10220 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 10221 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 10222 10223 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 10224 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 10225 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 10226 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 10227 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 10228 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 10229 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 10230 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 10231 10232 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 10233 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 10234 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 10235 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 10236 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 10237 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 10238 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 10239 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 10240 10241 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 10242 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 10243 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 10244 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 10245 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 10246 BB = EmitAtomicBinary(MI, BB, 4, 0); 10247 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 10248 BB = EmitAtomicBinary(MI, BB, 8, 0); 10249 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 10250 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 10251 (Subtarget.hasPartwordAtomics() && 10252 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 10253 (Subtarget.hasPartwordAtomics() && 10254 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 10255 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 10256 10257 auto LoadMnemonic = PPC::LDARX; 10258 auto StoreMnemonic = PPC::STDCX; 10259 switch (MI.getOpcode()) { 10260 default: 10261 llvm_unreachable("Compare and swap of unknown size"); 10262 case PPC::ATOMIC_CMP_SWAP_I8: 10263 LoadMnemonic = PPC::LBARX; 10264 StoreMnemonic = PPC::STBCX; 10265 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10266 break; 10267 case PPC::ATOMIC_CMP_SWAP_I16: 10268 LoadMnemonic = PPC::LHARX; 10269 StoreMnemonic = PPC::STHCX; 10270 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 10271 break; 10272 case PPC::ATOMIC_CMP_SWAP_I32: 10273 LoadMnemonic = PPC::LWARX; 10274 StoreMnemonic = PPC::STWCX; 10275 break; 10276 case PPC::ATOMIC_CMP_SWAP_I64: 10277 LoadMnemonic = PPC::LDARX; 10278 StoreMnemonic = PPC::STDCX; 10279 break; 10280 } 10281 unsigned dest = MI.getOperand(0).getReg(); 10282 unsigned ptrA = MI.getOperand(1).getReg(); 10283 unsigned ptrB = MI.getOperand(2).getReg(); 10284 unsigned oldval = MI.getOperand(3).getReg(); 10285 unsigned newval = MI.getOperand(4).getReg(); 10286 DebugLoc dl = MI.getDebugLoc(); 10287 10288 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10289 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10290 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10291 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10292 F->insert(It, loop1MBB); 10293 F->insert(It, loop2MBB); 10294 F->insert(It, midMBB); 10295 F->insert(It, exitMBB); 10296 exitMBB->splice(exitMBB->begin(), BB, 10297 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10298 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10299 10300 // thisMBB: 10301 // ... 10302 // fallthrough --> loopMBB 10303 BB->addSuccessor(loop1MBB); 10304 10305 // loop1MBB: 10306 // l[bhwd]arx dest, ptr 10307 // cmp[wd] dest, oldval 10308 // bne- midMBB 10309 // loop2MBB: 10310 // st[bhwd]cx. newval, ptr 10311 // bne- loopMBB 10312 // b exitBB 10313 // midMBB: 10314 // st[bhwd]cx. dest, ptr 10315 // exitBB: 10316 BB = loop1MBB; 10317 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10318 .addReg(ptrA).addReg(ptrB); 10319 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 10320 .addReg(oldval).addReg(dest); 10321 BuildMI(BB, dl, TII->get(PPC::BCC)) 10322 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10323 BB->addSuccessor(loop2MBB); 10324 BB->addSuccessor(midMBB); 10325 10326 BB = loop2MBB; 10327 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10328 .addReg(newval).addReg(ptrA).addReg(ptrB); 10329 BuildMI(BB, dl, TII->get(PPC::BCC)) 10330 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10331 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10332 BB->addSuccessor(loop1MBB); 10333 BB->addSuccessor(exitMBB); 10334 10335 BB = midMBB; 10336 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10337 .addReg(dest).addReg(ptrA).addReg(ptrB); 10338 BB->addSuccessor(exitMBB); 10339 10340 // exitMBB: 10341 // ... 10342 BB = exitMBB; 10343 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 10344 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 10345 // We must use 64-bit registers for addresses when targeting 64-bit, 10346 // since we're actually doing arithmetic on them. Other registers 10347 // can be 32-bit. 10348 bool is64bit = Subtarget.isPPC64(); 10349 bool isLittleEndian = Subtarget.isLittleEndian(); 10350 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 10351 10352 unsigned dest = MI.getOperand(0).getReg(); 10353 unsigned ptrA = MI.getOperand(1).getReg(); 10354 unsigned ptrB = MI.getOperand(2).getReg(); 10355 unsigned oldval = MI.getOperand(3).getReg(); 10356 unsigned newval = MI.getOperand(4).getReg(); 10357 DebugLoc dl = MI.getDebugLoc(); 10358 10359 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 10360 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 10361 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 10362 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10363 F->insert(It, loop1MBB); 10364 F->insert(It, loop2MBB); 10365 F->insert(It, midMBB); 10366 F->insert(It, exitMBB); 10367 exitMBB->splice(exitMBB->begin(), BB, 10368 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10369 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10370 10371 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10372 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 10373 : &PPC::GPRCRegClass; 10374 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 10375 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 10376 unsigned ShiftReg = 10377 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 10378 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 10379 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 10380 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 10381 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 10382 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 10383 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 10384 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 10385 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 10386 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 10387 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 10388 unsigned Ptr1Reg; 10389 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 10390 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10391 // thisMBB: 10392 // ... 10393 // fallthrough --> loopMBB 10394 BB->addSuccessor(loop1MBB); 10395 10396 // The 4-byte load must be aligned, while a char or short may be 10397 // anywhere in the word. Hence all this nasty bookkeeping code. 10398 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10399 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10400 // xori shift, shift1, 24 [16] 10401 // rlwinm ptr, ptr1, 0, 0, 29 10402 // slw newval2, newval, shift 10403 // slw oldval2, oldval,shift 10404 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10405 // slw mask, mask2, shift 10406 // and newval3, newval2, mask 10407 // and oldval3, oldval2, mask 10408 // loop1MBB: 10409 // lwarx tmpDest, ptr 10410 // and tmp, tmpDest, mask 10411 // cmpw tmp, oldval3 10412 // bne- midMBB 10413 // loop2MBB: 10414 // andc tmp2, tmpDest, mask 10415 // or tmp4, tmp2, newval3 10416 // stwcx. tmp4, ptr 10417 // bne- loop1MBB 10418 // b exitBB 10419 // midMBB: 10420 // stwcx. tmpDest, ptr 10421 // exitBB: 10422 // srw dest, tmpDest, shift 10423 if (ptrA != ZeroReg) { 10424 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10425 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10426 .addReg(ptrA).addReg(ptrB); 10427 } else { 10428 Ptr1Reg = ptrB; 10429 } 10430 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 10431 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 10432 if (!isLittleEndian) 10433 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 10434 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 10435 if (is64bit) 10436 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10437 .addReg(Ptr1Reg).addImm(0).addImm(61); 10438 else 10439 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10440 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 10441 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 10442 .addReg(newval).addReg(ShiftReg); 10443 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 10444 .addReg(oldval).addReg(ShiftReg); 10445 if (is8bit) 10446 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10447 else { 10448 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10449 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10450 .addReg(Mask3Reg).addImm(65535); 10451 } 10452 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10453 .addReg(Mask2Reg).addReg(ShiftReg); 10454 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 10455 .addReg(NewVal2Reg).addReg(MaskReg); 10456 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 10457 .addReg(OldVal2Reg).addReg(MaskReg); 10458 10459 BB = loop1MBB; 10460 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10461 .addReg(ZeroReg).addReg(PtrReg); 10462 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 10463 .addReg(TmpDestReg).addReg(MaskReg); 10464 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 10465 .addReg(TmpReg).addReg(OldVal3Reg); 10466 BuildMI(BB, dl, TII->get(PPC::BCC)) 10467 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 10468 BB->addSuccessor(loop2MBB); 10469 BB->addSuccessor(midMBB); 10470 10471 BB = loop2MBB; 10472 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 10473 .addReg(TmpDestReg).addReg(MaskReg); 10474 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 10475 .addReg(Tmp2Reg).addReg(NewVal3Reg); 10476 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 10477 .addReg(ZeroReg).addReg(PtrReg); 10478 BuildMI(BB, dl, TII->get(PPC::BCC)) 10479 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 10480 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 10481 BB->addSuccessor(loop1MBB); 10482 BB->addSuccessor(exitMBB); 10483 10484 BB = midMBB; 10485 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 10486 .addReg(ZeroReg).addReg(PtrReg); 10487 BB->addSuccessor(exitMBB); 10488 10489 // exitMBB: 10490 // ... 10491 BB = exitMBB; 10492 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 10493 .addReg(ShiftReg); 10494 } else if (MI.getOpcode() == PPC::FADDrtz) { 10495 // This pseudo performs an FADD with rounding mode temporarily forced 10496 // to round-to-zero. We emit this via custom inserter since the FPSCR 10497 // is not modeled at the SelectionDAG level. 10498 unsigned Dest = MI.getOperand(0).getReg(); 10499 unsigned Src1 = MI.getOperand(1).getReg(); 10500 unsigned Src2 = MI.getOperand(2).getReg(); 10501 DebugLoc dl = MI.getDebugLoc(); 10502 10503 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10504 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 10505 10506 // Save FPSCR value. 10507 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 10508 10509 // Set rounding mode to round-to-zero. 10510 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 10511 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 10512 10513 // Perform addition. 10514 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 10515 10516 // Restore FPSCR value. 10517 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 10518 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10519 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 10520 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10521 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 10522 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 10523 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 10524 ? PPC::ANDIo8 10525 : PPC::ANDIo; 10526 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 10527 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 10528 10529 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10530 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 10531 &PPC::GPRCRegClass : 10532 &PPC::G8RCRegClass); 10533 10534 DebugLoc dl = MI.getDebugLoc(); 10535 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 10536 .addReg(MI.getOperand(1).getReg()) 10537 .addImm(1); 10538 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 10539 MI.getOperand(0).getReg()) 10540 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 10541 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 10542 DebugLoc Dl = MI.getDebugLoc(); 10543 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10544 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10545 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 10546 return BB; 10547 } else { 10548 llvm_unreachable("Unexpected instr type to insert"); 10549 } 10550 10551 MI.eraseFromParent(); // The pseudo instruction is gone now. 10552 return BB; 10553 } 10554 10555 //===----------------------------------------------------------------------===// 10556 // Target Optimization Hooks 10557 //===----------------------------------------------------------------------===// 10558 10559 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 10560 // For the estimates, convergence is quadratic, so we essentially double the 10561 // number of digits correct after every iteration. For both FRE and FRSQRTE, 10562 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 10563 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 10564 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 10565 if (VT.getScalarType() == MVT::f64) 10566 RefinementSteps++; 10567 return RefinementSteps; 10568 } 10569 10570 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 10571 int Enabled, int &RefinementSteps, 10572 bool &UseOneConstNR, 10573 bool Reciprocal) const { 10574 EVT VT = Operand.getValueType(); 10575 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 10576 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 10577 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10578 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10579 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10580 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10581 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10582 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10583 10584 UseOneConstNR = true; 10585 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 10586 } 10587 return SDValue(); 10588 } 10589 10590 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 10591 int Enabled, 10592 int &RefinementSteps) const { 10593 EVT VT = Operand.getValueType(); 10594 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 10595 (VT == MVT::f64 && Subtarget.hasFRE()) || 10596 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10597 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10598 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10599 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10600 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10601 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10602 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 10603 } 10604 return SDValue(); 10605 } 10606 10607 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 10608 // Note: This functionality is used only when unsafe-fp-math is enabled, and 10609 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 10610 // enabled for division), this functionality is redundant with the default 10611 // combiner logic (once the division -> reciprocal/multiply transformation 10612 // has taken place). As a result, this matters more for older cores than for 10613 // newer ones. 10614 10615 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 10616 // reciprocal if there are two or more FDIVs (for embedded cores with only 10617 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 10618 switch (Subtarget.getDarwinDirective()) { 10619 default: 10620 return 3; 10621 case PPC::DIR_440: 10622 case PPC::DIR_A2: 10623 case PPC::DIR_E500mc: 10624 case PPC::DIR_E5500: 10625 return 2; 10626 } 10627 } 10628 10629 // isConsecutiveLSLoc needs to work even if all adds have not yet been 10630 // collapsed, and so we need to look through chains of them. 10631 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 10632 int64_t& Offset, SelectionDAG &DAG) { 10633 if (DAG.isBaseWithConstantOffset(Loc)) { 10634 Base = Loc.getOperand(0); 10635 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 10636 10637 // The base might itself be a base plus an offset, and if so, accumulate 10638 // that as well. 10639 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 10640 } 10641 } 10642 10643 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 10644 unsigned Bytes, int Dist, 10645 SelectionDAG &DAG) { 10646 if (VT.getSizeInBits() / 8 != Bytes) 10647 return false; 10648 10649 SDValue BaseLoc = Base->getBasePtr(); 10650 if (Loc.getOpcode() == ISD::FrameIndex) { 10651 if (BaseLoc.getOpcode() != ISD::FrameIndex) 10652 return false; 10653 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10654 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 10655 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 10656 int FS = MFI.getObjectSize(FI); 10657 int BFS = MFI.getObjectSize(BFI); 10658 if (FS != BFS || FS != (int)Bytes) return false; 10659 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 10660 } 10661 10662 SDValue Base1 = Loc, Base2 = BaseLoc; 10663 int64_t Offset1 = 0, Offset2 = 0; 10664 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 10665 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 10666 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 10667 return true; 10668 10669 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10670 const GlobalValue *GV1 = nullptr; 10671 const GlobalValue *GV2 = nullptr; 10672 Offset1 = 0; 10673 Offset2 = 0; 10674 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 10675 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 10676 if (isGA1 && isGA2 && GV1 == GV2) 10677 return Offset1 == (Offset2 + Dist*Bytes); 10678 return false; 10679 } 10680 10681 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 10682 // not enforce equality of the chain operands. 10683 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 10684 unsigned Bytes, int Dist, 10685 SelectionDAG &DAG) { 10686 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 10687 EVT VT = LS->getMemoryVT(); 10688 SDValue Loc = LS->getBasePtr(); 10689 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 10690 } 10691 10692 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 10693 EVT VT; 10694 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10695 default: return false; 10696 case Intrinsic::ppc_qpx_qvlfd: 10697 case Intrinsic::ppc_qpx_qvlfda: 10698 VT = MVT::v4f64; 10699 break; 10700 case Intrinsic::ppc_qpx_qvlfs: 10701 case Intrinsic::ppc_qpx_qvlfsa: 10702 VT = MVT::v4f32; 10703 break; 10704 case Intrinsic::ppc_qpx_qvlfcd: 10705 case Intrinsic::ppc_qpx_qvlfcda: 10706 VT = MVT::v2f64; 10707 break; 10708 case Intrinsic::ppc_qpx_qvlfcs: 10709 case Intrinsic::ppc_qpx_qvlfcsa: 10710 VT = MVT::v2f32; 10711 break; 10712 case Intrinsic::ppc_qpx_qvlfiwa: 10713 case Intrinsic::ppc_qpx_qvlfiwz: 10714 case Intrinsic::ppc_altivec_lvx: 10715 case Intrinsic::ppc_altivec_lvxl: 10716 case Intrinsic::ppc_vsx_lxvw4x: 10717 case Intrinsic::ppc_vsx_lxvw4x_be: 10718 VT = MVT::v4i32; 10719 break; 10720 case Intrinsic::ppc_vsx_lxvd2x: 10721 case Intrinsic::ppc_vsx_lxvd2x_be: 10722 VT = MVT::v2f64; 10723 break; 10724 case Intrinsic::ppc_altivec_lvebx: 10725 VT = MVT::i8; 10726 break; 10727 case Intrinsic::ppc_altivec_lvehx: 10728 VT = MVT::i16; 10729 break; 10730 case Intrinsic::ppc_altivec_lvewx: 10731 VT = MVT::i32; 10732 break; 10733 } 10734 10735 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 10736 } 10737 10738 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 10739 EVT VT; 10740 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10741 default: return false; 10742 case Intrinsic::ppc_qpx_qvstfd: 10743 case Intrinsic::ppc_qpx_qvstfda: 10744 VT = MVT::v4f64; 10745 break; 10746 case Intrinsic::ppc_qpx_qvstfs: 10747 case Intrinsic::ppc_qpx_qvstfsa: 10748 VT = MVT::v4f32; 10749 break; 10750 case Intrinsic::ppc_qpx_qvstfcd: 10751 case Intrinsic::ppc_qpx_qvstfcda: 10752 VT = MVT::v2f64; 10753 break; 10754 case Intrinsic::ppc_qpx_qvstfcs: 10755 case Intrinsic::ppc_qpx_qvstfcsa: 10756 VT = MVT::v2f32; 10757 break; 10758 case Intrinsic::ppc_qpx_qvstfiw: 10759 case Intrinsic::ppc_qpx_qvstfiwa: 10760 case Intrinsic::ppc_altivec_stvx: 10761 case Intrinsic::ppc_altivec_stvxl: 10762 case Intrinsic::ppc_vsx_stxvw4x: 10763 VT = MVT::v4i32; 10764 break; 10765 case Intrinsic::ppc_vsx_stxvd2x: 10766 VT = MVT::v2f64; 10767 break; 10768 case Intrinsic::ppc_vsx_stxvw4x_be: 10769 VT = MVT::v4i32; 10770 break; 10771 case Intrinsic::ppc_vsx_stxvd2x_be: 10772 VT = MVT::v2f64; 10773 break; 10774 case Intrinsic::ppc_altivec_stvebx: 10775 VT = MVT::i8; 10776 break; 10777 case Intrinsic::ppc_altivec_stvehx: 10778 VT = MVT::i16; 10779 break; 10780 case Intrinsic::ppc_altivec_stvewx: 10781 VT = MVT::i32; 10782 break; 10783 } 10784 10785 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 10786 } 10787 10788 return false; 10789 } 10790 10791 // Return true is there is a nearyby consecutive load to the one provided 10792 // (regardless of alignment). We search up and down the chain, looking though 10793 // token factors and other loads (but nothing else). As a result, a true result 10794 // indicates that it is safe to create a new consecutive load adjacent to the 10795 // load provided. 10796 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 10797 SDValue Chain = LD->getChain(); 10798 EVT VT = LD->getMemoryVT(); 10799 10800 SmallSet<SDNode *, 16> LoadRoots; 10801 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 10802 SmallSet<SDNode *, 16> Visited; 10803 10804 // First, search up the chain, branching to follow all token-factor operands. 10805 // If we find a consecutive load, then we're done, otherwise, record all 10806 // nodes just above the top-level loads and token factors. 10807 while (!Queue.empty()) { 10808 SDNode *ChainNext = Queue.pop_back_val(); 10809 if (!Visited.insert(ChainNext).second) 10810 continue; 10811 10812 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 10813 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10814 return true; 10815 10816 if (!Visited.count(ChainLD->getChain().getNode())) 10817 Queue.push_back(ChainLD->getChain().getNode()); 10818 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 10819 for (const SDUse &O : ChainNext->ops()) 10820 if (!Visited.count(O.getNode())) 10821 Queue.push_back(O.getNode()); 10822 } else 10823 LoadRoots.insert(ChainNext); 10824 } 10825 10826 // Second, search down the chain, starting from the top-level nodes recorded 10827 // in the first phase. These top-level nodes are the nodes just above all 10828 // loads and token factors. Starting with their uses, recursively look though 10829 // all loads (just the chain uses) and token factors to find a consecutive 10830 // load. 10831 Visited.clear(); 10832 Queue.clear(); 10833 10834 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 10835 IE = LoadRoots.end(); I != IE; ++I) { 10836 Queue.push_back(*I); 10837 10838 while (!Queue.empty()) { 10839 SDNode *LoadRoot = Queue.pop_back_val(); 10840 if (!Visited.insert(LoadRoot).second) 10841 continue; 10842 10843 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 10844 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10845 return true; 10846 10847 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 10848 UE = LoadRoot->use_end(); UI != UE; ++UI) 10849 if (((isa<MemSDNode>(*UI) && 10850 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 10851 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 10852 Queue.push_back(*UI); 10853 } 10854 } 10855 10856 return false; 10857 } 10858 10859 /// This function is called when we have proved that a SETCC node can be replaced 10860 /// by subtraction (and other supporting instructions) so that the result of 10861 /// comparison is kept in a GPR instead of CR. This function is purely for 10862 /// codegen purposes and has some flags to guide the codegen process. 10863 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 10864 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 10865 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10866 10867 // Zero extend the operands to the largest legal integer. Originally, they 10868 // must be of a strictly smaller size. 10869 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 10870 DAG.getConstant(Size, DL, MVT::i32)); 10871 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 10872 DAG.getConstant(Size, DL, MVT::i32)); 10873 10874 // Swap if needed. Depends on the condition code. 10875 if (Swap) 10876 std::swap(Op0, Op1); 10877 10878 // Subtract extended integers. 10879 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 10880 10881 // Move the sign bit to the least significant position and zero out the rest. 10882 // Now the least significant bit carries the result of original comparison. 10883 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 10884 DAG.getConstant(Size - 1, DL, MVT::i32)); 10885 auto Final = Shifted; 10886 10887 // Complement the result if needed. Based on the condition code. 10888 if (Complement) 10889 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 10890 DAG.getConstant(1, DL, MVT::i64)); 10891 10892 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 10893 } 10894 10895 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 10896 DAGCombinerInfo &DCI) const { 10897 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10898 10899 SelectionDAG &DAG = DCI.DAG; 10900 SDLoc DL(N); 10901 10902 // Size of integers being compared has a critical role in the following 10903 // analysis, so we prefer to do this when all types are legal. 10904 if (!DCI.isAfterLegalizeVectorOps()) 10905 return SDValue(); 10906 10907 // If all users of SETCC extend its value to a legal integer type 10908 // then we replace SETCC with a subtraction 10909 for (SDNode::use_iterator UI = N->use_begin(), 10910 UE = N->use_end(); UI != UE; ++UI) { 10911 if (UI->getOpcode() != ISD::ZERO_EXTEND) 10912 return SDValue(); 10913 } 10914 10915 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 10916 auto OpSize = N->getOperand(0).getValueSizeInBits(); 10917 10918 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 10919 10920 if (OpSize < Size) { 10921 switch (CC) { 10922 default: break; 10923 case ISD::SETULT: 10924 return generateEquivalentSub(N, Size, false, false, DL, DAG); 10925 case ISD::SETULE: 10926 return generateEquivalentSub(N, Size, true, true, DL, DAG); 10927 case ISD::SETUGT: 10928 return generateEquivalentSub(N, Size, false, true, DL, DAG); 10929 case ISD::SETUGE: 10930 return generateEquivalentSub(N, Size, true, false, DL, DAG); 10931 } 10932 } 10933 10934 return SDValue(); 10935 } 10936 10937 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 10938 DAGCombinerInfo &DCI) const { 10939 SelectionDAG &DAG = DCI.DAG; 10940 SDLoc dl(N); 10941 10942 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 10943 // If we're tracking CR bits, we need to be careful that we don't have: 10944 // trunc(binary-ops(zext(x), zext(y))) 10945 // or 10946 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 10947 // such that we're unnecessarily moving things into GPRs when it would be 10948 // better to keep them in CR bits. 10949 10950 // Note that trunc here can be an actual i1 trunc, or can be the effective 10951 // truncation that comes from a setcc or select_cc. 10952 if (N->getOpcode() == ISD::TRUNCATE && 10953 N->getValueType(0) != MVT::i1) 10954 return SDValue(); 10955 10956 if (N->getOperand(0).getValueType() != MVT::i32 && 10957 N->getOperand(0).getValueType() != MVT::i64) 10958 return SDValue(); 10959 10960 if (N->getOpcode() == ISD::SETCC || 10961 N->getOpcode() == ISD::SELECT_CC) { 10962 // If we're looking at a comparison, then we need to make sure that the 10963 // high bits (all except for the first) don't matter the result. 10964 ISD::CondCode CC = 10965 cast<CondCodeSDNode>(N->getOperand( 10966 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 10967 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 10968 10969 if (ISD::isSignedIntSetCC(CC)) { 10970 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 10971 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 10972 return SDValue(); 10973 } else if (ISD::isUnsignedIntSetCC(CC)) { 10974 if (!DAG.MaskedValueIsZero(N->getOperand(0), 10975 APInt::getHighBitsSet(OpBits, OpBits-1)) || 10976 !DAG.MaskedValueIsZero(N->getOperand(1), 10977 APInt::getHighBitsSet(OpBits, OpBits-1))) 10978 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 10979 : SDValue()); 10980 } else { 10981 // This is neither a signed nor an unsigned comparison, just make sure 10982 // that the high bits are equal. 10983 KnownBits Op1Known, Op2Known; 10984 DAG.computeKnownBits(N->getOperand(0), Op1Known); 10985 DAG.computeKnownBits(N->getOperand(1), Op2Known); 10986 10987 // We don't really care about what is known about the first bit (if 10988 // anything), so clear it in all masks prior to comparing them. 10989 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 10990 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 10991 10992 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 10993 return SDValue(); 10994 } 10995 } 10996 10997 // We now know that the higher-order bits are irrelevant, we just need to 10998 // make sure that all of the intermediate operations are bit operations, and 10999 // all inputs are extensions. 11000 if (N->getOperand(0).getOpcode() != ISD::AND && 11001 N->getOperand(0).getOpcode() != ISD::OR && 11002 N->getOperand(0).getOpcode() != ISD::XOR && 11003 N->getOperand(0).getOpcode() != ISD::SELECT && 11004 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 11005 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 11006 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 11007 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 11008 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 11009 return SDValue(); 11010 11011 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 11012 N->getOperand(1).getOpcode() != ISD::AND && 11013 N->getOperand(1).getOpcode() != ISD::OR && 11014 N->getOperand(1).getOpcode() != ISD::XOR && 11015 N->getOperand(1).getOpcode() != ISD::SELECT && 11016 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 11017 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 11018 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 11019 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 11020 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 11021 return SDValue(); 11022 11023 SmallVector<SDValue, 4> Inputs; 11024 SmallVector<SDValue, 8> BinOps, PromOps; 11025 SmallPtrSet<SDNode *, 16> Visited; 11026 11027 for (unsigned i = 0; i < 2; ++i) { 11028 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11029 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11030 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11031 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11032 isa<ConstantSDNode>(N->getOperand(i))) 11033 Inputs.push_back(N->getOperand(i)); 11034 else 11035 BinOps.push_back(N->getOperand(i)); 11036 11037 if (N->getOpcode() == ISD::TRUNCATE) 11038 break; 11039 } 11040 11041 // Visit all inputs, collect all binary operations (and, or, xor and 11042 // select) that are all fed by extensions. 11043 while (!BinOps.empty()) { 11044 SDValue BinOp = BinOps.back(); 11045 BinOps.pop_back(); 11046 11047 if (!Visited.insert(BinOp.getNode()).second) 11048 continue; 11049 11050 PromOps.push_back(BinOp); 11051 11052 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11053 // The condition of the select is not promoted. 11054 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11055 continue; 11056 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11057 continue; 11058 11059 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11060 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11061 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 11062 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 11063 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11064 Inputs.push_back(BinOp.getOperand(i)); 11065 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11066 BinOp.getOperand(i).getOpcode() == ISD::OR || 11067 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11068 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11069 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 11070 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11071 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 11072 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 11073 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 11074 BinOps.push_back(BinOp.getOperand(i)); 11075 } else { 11076 // We have an input that is not an extension or another binary 11077 // operation; we'll abort this transformation. 11078 return SDValue(); 11079 } 11080 } 11081 } 11082 11083 // Make sure that this is a self-contained cluster of operations (which 11084 // is not quite the same thing as saying that everything has only one 11085 // use). 11086 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11087 if (isa<ConstantSDNode>(Inputs[i])) 11088 continue; 11089 11090 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11091 UE = Inputs[i].getNode()->use_end(); 11092 UI != UE; ++UI) { 11093 SDNode *User = *UI; 11094 if (User != N && !Visited.count(User)) 11095 return SDValue(); 11096 11097 // Make sure that we're not going to promote the non-output-value 11098 // operand(s) or SELECT or SELECT_CC. 11099 // FIXME: Although we could sometimes handle this, and it does occur in 11100 // practice that one of the condition inputs to the select is also one of 11101 // the outputs, we currently can't deal with this. 11102 if (User->getOpcode() == ISD::SELECT) { 11103 if (User->getOperand(0) == Inputs[i]) 11104 return SDValue(); 11105 } else if (User->getOpcode() == ISD::SELECT_CC) { 11106 if (User->getOperand(0) == Inputs[i] || 11107 User->getOperand(1) == Inputs[i]) 11108 return SDValue(); 11109 } 11110 } 11111 } 11112 11113 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11114 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11115 UE = PromOps[i].getNode()->use_end(); 11116 UI != UE; ++UI) { 11117 SDNode *User = *UI; 11118 if (User != N && !Visited.count(User)) 11119 return SDValue(); 11120 11121 // Make sure that we're not going to promote the non-output-value 11122 // operand(s) or SELECT or SELECT_CC. 11123 // FIXME: Although we could sometimes handle this, and it does occur in 11124 // practice that one of the condition inputs to the select is also one of 11125 // the outputs, we currently can't deal with this. 11126 if (User->getOpcode() == ISD::SELECT) { 11127 if (User->getOperand(0) == PromOps[i]) 11128 return SDValue(); 11129 } else if (User->getOpcode() == ISD::SELECT_CC) { 11130 if (User->getOperand(0) == PromOps[i] || 11131 User->getOperand(1) == PromOps[i]) 11132 return SDValue(); 11133 } 11134 } 11135 } 11136 11137 // Replace all inputs with the extension operand. 11138 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11139 // Constants may have users outside the cluster of to-be-promoted nodes, 11140 // and so we need to replace those as we do the promotions. 11141 if (isa<ConstantSDNode>(Inputs[i])) 11142 continue; 11143 else 11144 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 11145 } 11146 11147 std::list<HandleSDNode> PromOpHandles; 11148 for (auto &PromOp : PromOps) 11149 PromOpHandles.emplace_back(PromOp); 11150 11151 // Replace all operations (these are all the same, but have a different 11152 // (i1) return type). DAG.getNode will validate that the types of 11153 // a binary operator match, so go through the list in reverse so that 11154 // we've likely promoted both operands first. Any intermediate truncations or 11155 // extensions disappear. 11156 while (!PromOpHandles.empty()) { 11157 SDValue PromOp = PromOpHandles.back().getValue(); 11158 PromOpHandles.pop_back(); 11159 11160 if (PromOp.getOpcode() == ISD::TRUNCATE || 11161 PromOp.getOpcode() == ISD::SIGN_EXTEND || 11162 PromOp.getOpcode() == ISD::ZERO_EXTEND || 11163 PromOp.getOpcode() == ISD::ANY_EXTEND) { 11164 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 11165 PromOp.getOperand(0).getValueType() != MVT::i1) { 11166 // The operand is not yet ready (see comment below). 11167 PromOpHandles.emplace_front(PromOp); 11168 continue; 11169 } 11170 11171 SDValue RepValue = PromOp.getOperand(0); 11172 if (isa<ConstantSDNode>(RepValue)) 11173 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 11174 11175 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 11176 continue; 11177 } 11178 11179 unsigned C; 11180 switch (PromOp.getOpcode()) { 11181 default: C = 0; break; 11182 case ISD::SELECT: C = 1; break; 11183 case ISD::SELECT_CC: C = 2; break; 11184 } 11185 11186 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11187 PromOp.getOperand(C).getValueType() != MVT::i1) || 11188 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11189 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 11190 // The to-be-promoted operands of this node have not yet been 11191 // promoted (this should be rare because we're going through the 11192 // list backward, but if one of the operands has several users in 11193 // this cluster of to-be-promoted nodes, it is possible). 11194 PromOpHandles.emplace_front(PromOp); 11195 continue; 11196 } 11197 11198 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11199 PromOp.getNode()->op_end()); 11200 11201 // If there are any constant inputs, make sure they're replaced now. 11202 for (unsigned i = 0; i < 2; ++i) 11203 if (isa<ConstantSDNode>(Ops[C+i])) 11204 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 11205 11206 DAG.ReplaceAllUsesOfValueWith(PromOp, 11207 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 11208 } 11209 11210 // Now we're left with the initial truncation itself. 11211 if (N->getOpcode() == ISD::TRUNCATE) 11212 return N->getOperand(0); 11213 11214 // Otherwise, this is a comparison. The operands to be compared have just 11215 // changed type (to i1), but everything else is the same. 11216 return SDValue(N, 0); 11217 } 11218 11219 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 11220 DAGCombinerInfo &DCI) const { 11221 SelectionDAG &DAG = DCI.DAG; 11222 SDLoc dl(N); 11223 11224 // If we're tracking CR bits, we need to be careful that we don't have: 11225 // zext(binary-ops(trunc(x), trunc(y))) 11226 // or 11227 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 11228 // such that we're unnecessarily moving things into CR bits that can more 11229 // efficiently stay in GPRs. Note that if we're not certain that the high 11230 // bits are set as required by the final extension, we still may need to do 11231 // some masking to get the proper behavior. 11232 11233 // This same functionality is important on PPC64 when dealing with 11234 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 11235 // the return values of functions. Because it is so similar, it is handled 11236 // here as well. 11237 11238 if (N->getValueType(0) != MVT::i32 && 11239 N->getValueType(0) != MVT::i64) 11240 return SDValue(); 11241 11242 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 11243 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 11244 return SDValue(); 11245 11246 if (N->getOperand(0).getOpcode() != ISD::AND && 11247 N->getOperand(0).getOpcode() != ISD::OR && 11248 N->getOperand(0).getOpcode() != ISD::XOR && 11249 N->getOperand(0).getOpcode() != ISD::SELECT && 11250 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 11251 return SDValue(); 11252 11253 SmallVector<SDValue, 4> Inputs; 11254 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 11255 SmallPtrSet<SDNode *, 16> Visited; 11256 11257 // Visit all inputs, collect all binary operations (and, or, xor and 11258 // select) that are all fed by truncations. 11259 while (!BinOps.empty()) { 11260 SDValue BinOp = BinOps.back(); 11261 BinOps.pop_back(); 11262 11263 if (!Visited.insert(BinOp.getNode()).second) 11264 continue; 11265 11266 PromOps.push_back(BinOp); 11267 11268 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 11269 // The condition of the select is not promoted. 11270 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 11271 continue; 11272 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 11273 continue; 11274 11275 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 11276 isa<ConstantSDNode>(BinOp.getOperand(i))) { 11277 Inputs.push_back(BinOp.getOperand(i)); 11278 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 11279 BinOp.getOperand(i).getOpcode() == ISD::OR || 11280 BinOp.getOperand(i).getOpcode() == ISD::XOR || 11281 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 11282 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 11283 BinOps.push_back(BinOp.getOperand(i)); 11284 } else { 11285 // We have an input that is not a truncation or another binary 11286 // operation; we'll abort this transformation. 11287 return SDValue(); 11288 } 11289 } 11290 } 11291 11292 // The operands of a select that must be truncated when the select is 11293 // promoted because the operand is actually part of the to-be-promoted set. 11294 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 11295 11296 // Make sure that this is a self-contained cluster of operations (which 11297 // is not quite the same thing as saying that everything has only one 11298 // use). 11299 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11300 if (isa<ConstantSDNode>(Inputs[i])) 11301 continue; 11302 11303 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 11304 UE = Inputs[i].getNode()->use_end(); 11305 UI != UE; ++UI) { 11306 SDNode *User = *UI; 11307 if (User != N && !Visited.count(User)) 11308 return SDValue(); 11309 11310 // If we're going to promote the non-output-value operand(s) or SELECT or 11311 // SELECT_CC, record them for truncation. 11312 if (User->getOpcode() == ISD::SELECT) { 11313 if (User->getOperand(0) == Inputs[i]) 11314 SelectTruncOp[0].insert(std::make_pair(User, 11315 User->getOperand(0).getValueType())); 11316 } else if (User->getOpcode() == ISD::SELECT_CC) { 11317 if (User->getOperand(0) == Inputs[i]) 11318 SelectTruncOp[0].insert(std::make_pair(User, 11319 User->getOperand(0).getValueType())); 11320 if (User->getOperand(1) == Inputs[i]) 11321 SelectTruncOp[1].insert(std::make_pair(User, 11322 User->getOperand(1).getValueType())); 11323 } 11324 } 11325 } 11326 11327 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 11328 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 11329 UE = PromOps[i].getNode()->use_end(); 11330 UI != UE; ++UI) { 11331 SDNode *User = *UI; 11332 if (User != N && !Visited.count(User)) 11333 return SDValue(); 11334 11335 // If we're going to promote the non-output-value operand(s) or SELECT or 11336 // SELECT_CC, record them for truncation. 11337 if (User->getOpcode() == ISD::SELECT) { 11338 if (User->getOperand(0) == PromOps[i]) 11339 SelectTruncOp[0].insert(std::make_pair(User, 11340 User->getOperand(0).getValueType())); 11341 } else if (User->getOpcode() == ISD::SELECT_CC) { 11342 if (User->getOperand(0) == PromOps[i]) 11343 SelectTruncOp[0].insert(std::make_pair(User, 11344 User->getOperand(0).getValueType())); 11345 if (User->getOperand(1) == PromOps[i]) 11346 SelectTruncOp[1].insert(std::make_pair(User, 11347 User->getOperand(1).getValueType())); 11348 } 11349 } 11350 } 11351 11352 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 11353 bool ReallyNeedsExt = false; 11354 if (N->getOpcode() != ISD::ANY_EXTEND) { 11355 // If all of the inputs are not already sign/zero extended, then 11356 // we'll still need to do that at the end. 11357 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11358 if (isa<ConstantSDNode>(Inputs[i])) 11359 continue; 11360 11361 unsigned OpBits = 11362 Inputs[i].getOperand(0).getValueSizeInBits(); 11363 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 11364 11365 if ((N->getOpcode() == ISD::ZERO_EXTEND && 11366 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 11367 APInt::getHighBitsSet(OpBits, 11368 OpBits-PromBits))) || 11369 (N->getOpcode() == ISD::SIGN_EXTEND && 11370 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 11371 (OpBits-(PromBits-1)))) { 11372 ReallyNeedsExt = true; 11373 break; 11374 } 11375 } 11376 } 11377 11378 // Replace all inputs, either with the truncation operand, or a 11379 // truncation or extension to the final output type. 11380 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 11381 // Constant inputs need to be replaced with the to-be-promoted nodes that 11382 // use them because they might have users outside of the cluster of 11383 // promoted nodes. 11384 if (isa<ConstantSDNode>(Inputs[i])) 11385 continue; 11386 11387 SDValue InSrc = Inputs[i].getOperand(0); 11388 if (Inputs[i].getValueType() == N->getValueType(0)) 11389 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 11390 else if (N->getOpcode() == ISD::SIGN_EXTEND) 11391 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11392 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 11393 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11394 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11395 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 11396 else 11397 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 11398 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 11399 } 11400 11401 std::list<HandleSDNode> PromOpHandles; 11402 for (auto &PromOp : PromOps) 11403 PromOpHandles.emplace_back(PromOp); 11404 11405 // Replace all operations (these are all the same, but have a different 11406 // (promoted) return type). DAG.getNode will validate that the types of 11407 // a binary operator match, so go through the list in reverse so that 11408 // we've likely promoted both operands first. 11409 while (!PromOpHandles.empty()) { 11410 SDValue PromOp = PromOpHandles.back().getValue(); 11411 PromOpHandles.pop_back(); 11412 11413 unsigned C; 11414 switch (PromOp.getOpcode()) { 11415 default: C = 0; break; 11416 case ISD::SELECT: C = 1; break; 11417 case ISD::SELECT_CC: C = 2; break; 11418 } 11419 11420 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 11421 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 11422 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 11423 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 11424 // The to-be-promoted operands of this node have not yet been 11425 // promoted (this should be rare because we're going through the 11426 // list backward, but if one of the operands has several users in 11427 // this cluster of to-be-promoted nodes, it is possible). 11428 PromOpHandles.emplace_front(PromOp); 11429 continue; 11430 } 11431 11432 // For SELECT and SELECT_CC nodes, we do a similar check for any 11433 // to-be-promoted comparison inputs. 11434 if (PromOp.getOpcode() == ISD::SELECT || 11435 PromOp.getOpcode() == ISD::SELECT_CC) { 11436 if ((SelectTruncOp[0].count(PromOp.getNode()) && 11437 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 11438 (SelectTruncOp[1].count(PromOp.getNode()) && 11439 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 11440 PromOpHandles.emplace_front(PromOp); 11441 continue; 11442 } 11443 } 11444 11445 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 11446 PromOp.getNode()->op_end()); 11447 11448 // If this node has constant inputs, then they'll need to be promoted here. 11449 for (unsigned i = 0; i < 2; ++i) { 11450 if (!isa<ConstantSDNode>(Ops[C+i])) 11451 continue; 11452 if (Ops[C+i].getValueType() == N->getValueType(0)) 11453 continue; 11454 11455 if (N->getOpcode() == ISD::SIGN_EXTEND) 11456 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11457 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11458 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11459 else 11460 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 11461 } 11462 11463 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 11464 // truncate them again to the original value type. 11465 if (PromOp.getOpcode() == ISD::SELECT || 11466 PromOp.getOpcode() == ISD::SELECT_CC) { 11467 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 11468 if (SI0 != SelectTruncOp[0].end()) 11469 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 11470 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 11471 if (SI1 != SelectTruncOp[1].end()) 11472 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 11473 } 11474 11475 DAG.ReplaceAllUsesOfValueWith(PromOp, 11476 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 11477 } 11478 11479 // Now we're left with the initial extension itself. 11480 if (!ReallyNeedsExt) 11481 return N->getOperand(0); 11482 11483 // To zero extend, just mask off everything except for the first bit (in the 11484 // i1 case). 11485 if (N->getOpcode() == ISD::ZERO_EXTEND) 11486 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 11487 DAG.getConstant(APInt::getLowBitsSet( 11488 N->getValueSizeInBits(0), PromBits), 11489 dl, N->getValueType(0))); 11490 11491 assert(N->getOpcode() == ISD::SIGN_EXTEND && 11492 "Invalid extension type"); 11493 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 11494 SDValue ShiftCst = 11495 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 11496 return DAG.getNode( 11497 ISD::SRA, dl, N->getValueType(0), 11498 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 11499 ShiftCst); 11500 } 11501 11502 /// \brief Reduces the number of fp-to-int conversion when building a vector. 11503 /// 11504 /// If this vector is built out of floating to integer conversions, 11505 /// transform it to a vector built out of floating point values followed by a 11506 /// single floating to integer conversion of the vector. 11507 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 11508 /// becomes (fptosi (build_vector ($A, $B, ...))) 11509 SDValue PPCTargetLowering:: 11510 combineElementTruncationToVectorTruncation(SDNode *N, 11511 DAGCombinerInfo &DCI) const { 11512 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11513 "Should be called with a BUILD_VECTOR node"); 11514 11515 SelectionDAG &DAG = DCI.DAG; 11516 SDLoc dl(N); 11517 11518 SDValue FirstInput = N->getOperand(0); 11519 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 11520 "The input operand must be an fp-to-int conversion."); 11521 11522 // This combine happens after legalization so the fp_to_[su]i nodes are 11523 // already converted to PPCSISD nodes. 11524 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 11525 if (FirstConversion == PPCISD::FCTIDZ || 11526 FirstConversion == PPCISD::FCTIDUZ || 11527 FirstConversion == PPCISD::FCTIWZ || 11528 FirstConversion == PPCISD::FCTIWUZ) { 11529 bool IsSplat = true; 11530 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 11531 FirstConversion == PPCISD::FCTIWUZ; 11532 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 11533 SmallVector<SDValue, 4> Ops; 11534 EVT TargetVT = N->getValueType(0); 11535 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11536 if (N->getOperand(i).getOpcode() != PPCISD::MFVSR) 11537 return SDValue(); 11538 unsigned NextConversion = N->getOperand(i).getOperand(0).getOpcode(); 11539 if (NextConversion != FirstConversion) 11540 return SDValue(); 11541 if (N->getOperand(i) != FirstInput) 11542 IsSplat = false; 11543 } 11544 11545 // If this is a splat, we leave it as-is since there will be only a single 11546 // fp-to-int conversion followed by a splat of the integer. This is better 11547 // for 32-bit and smaller ints and neutral for 64-bit ints. 11548 if (IsSplat) 11549 return SDValue(); 11550 11551 // Now that we know we have the right type of node, get its operands 11552 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11553 SDValue In = N->getOperand(i).getOperand(0); 11554 // For 32-bit values, we need to add an FP_ROUND node. 11555 if (Is32Bit) { 11556 if (In.isUndef()) 11557 Ops.push_back(DAG.getUNDEF(SrcVT)); 11558 else { 11559 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 11560 MVT::f32, In.getOperand(0), 11561 DAG.getIntPtrConstant(1, dl)); 11562 Ops.push_back(Trunc); 11563 } 11564 } else 11565 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 11566 } 11567 11568 unsigned Opcode; 11569 if (FirstConversion == PPCISD::FCTIDZ || 11570 FirstConversion == PPCISD::FCTIWZ) 11571 Opcode = ISD::FP_TO_SINT; 11572 else 11573 Opcode = ISD::FP_TO_UINT; 11574 11575 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 11576 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 11577 return DAG.getNode(Opcode, dl, TargetVT, BV); 11578 } 11579 return SDValue(); 11580 } 11581 11582 /// \brief Reduce the number of loads when building a vector. 11583 /// 11584 /// Building a vector out of multiple loads can be converted to a load 11585 /// of the vector type if the loads are consecutive. If the loads are 11586 /// consecutive but in descending order, a shuffle is added at the end 11587 /// to reorder the vector. 11588 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 11589 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11590 "Should be called with a BUILD_VECTOR node"); 11591 11592 SDLoc dl(N); 11593 bool InputsAreConsecutiveLoads = true; 11594 bool InputsAreReverseConsecutive = true; 11595 unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8; 11596 SDValue FirstInput = N->getOperand(0); 11597 bool IsRoundOfExtLoad = false; 11598 11599 if (FirstInput.getOpcode() == ISD::FP_ROUND && 11600 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 11601 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 11602 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 11603 } 11604 // Not a build vector of (possibly fp_rounded) loads. 11605 if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) 11606 return SDValue(); 11607 11608 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 11609 // If any inputs are fp_round(extload), they all must be. 11610 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 11611 return SDValue(); 11612 11613 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 11614 N->getOperand(i); 11615 if (NextInput.getOpcode() != ISD::LOAD) 11616 return SDValue(); 11617 11618 SDValue PreviousInput = 11619 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 11620 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 11621 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 11622 11623 // If any inputs are fp_round(extload), they all must be. 11624 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 11625 return SDValue(); 11626 11627 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 11628 InputsAreConsecutiveLoads = false; 11629 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 11630 InputsAreReverseConsecutive = false; 11631 11632 // Exit early if the loads are neither consecutive nor reverse consecutive. 11633 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 11634 return SDValue(); 11635 } 11636 11637 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 11638 "The loads cannot be both consecutive and reverse consecutive."); 11639 11640 SDValue FirstLoadOp = 11641 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 11642 SDValue LastLoadOp = 11643 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 11644 N->getOperand(N->getNumOperands()-1); 11645 11646 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 11647 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 11648 if (InputsAreConsecutiveLoads) { 11649 assert(LD1 && "Input needs to be a LoadSDNode."); 11650 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 11651 LD1->getBasePtr(), LD1->getPointerInfo(), 11652 LD1->getAlignment()); 11653 } 11654 if (InputsAreReverseConsecutive) { 11655 assert(LDL && "Input needs to be a LoadSDNode."); 11656 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 11657 LDL->getBasePtr(), LDL->getPointerInfo(), 11658 LDL->getAlignment()); 11659 SmallVector<int, 16> Ops; 11660 for (int i = N->getNumOperands() - 1; i >= 0; i--) 11661 Ops.push_back(i); 11662 11663 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 11664 DAG.getUNDEF(N->getValueType(0)), Ops); 11665 } 11666 return SDValue(); 11667 } 11668 11669 // This function adds the required vector_shuffle needed to get 11670 // the elements of the vector extract in the correct position 11671 // as specified by the CorrectElems encoding. 11672 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 11673 SDValue Input, uint64_t Elems, 11674 uint64_t CorrectElems) { 11675 SDLoc dl(N); 11676 11677 unsigned NumElems = Input.getValueType().getVectorNumElements(); 11678 SmallVector<int, 16> ShuffleMask(NumElems, -1); 11679 11680 // Knowing the element indices being extracted from the original 11681 // vector and the order in which they're being inserted, just put 11682 // them at element indices required for the instruction. 11683 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11684 if (DAG.getDataLayout().isLittleEndian()) 11685 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 11686 else 11687 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 11688 CorrectElems = CorrectElems >> 8; 11689 Elems = Elems >> 8; 11690 } 11691 11692 SDValue Shuffle = 11693 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 11694 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 11695 11696 EVT Ty = N->getValueType(0); 11697 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 11698 return BV; 11699 } 11700 11701 // Look for build vector patterns where input operands come from sign 11702 // extended vector_extract elements of specific indices. If the correct indices 11703 // aren't used, add a vector shuffle to fix up the indices and create a new 11704 // PPCISD:SExtVElems node which selects the vector sign extend instructions 11705 // during instruction selection. 11706 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 11707 // This array encodes the indices that the vector sign extend instructions 11708 // extract from when extending from one type to another for both BE and LE. 11709 // The right nibble of each byte corresponds to the LE incides. 11710 // and the left nibble of each byte corresponds to the BE incides. 11711 // For example: 0x3074B8FC byte->word 11712 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 11713 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 11714 // For example: 0x000070F8 byte->double word 11715 // For LE: the allowed indices are: 0x0,0x8 11716 // For BE: the allowed indices are: 0x7,0xF 11717 uint64_t TargetElems[] = { 11718 0x3074B8FC, // b->w 11719 0x000070F8, // b->d 11720 0x10325476, // h->w 11721 0x00003074, // h->d 11722 0x00001032, // w->d 11723 }; 11724 11725 uint64_t Elems = 0; 11726 int Index; 11727 SDValue Input; 11728 11729 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 11730 if (!Op) 11731 return false; 11732 if (Op.getOpcode() != ISD::SIGN_EXTEND) 11733 return false; 11734 11735 SDValue Extract = Op.getOperand(0); 11736 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11737 return false; 11738 11739 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 11740 if (!ExtOp) 11741 return false; 11742 11743 Index = ExtOp->getZExtValue(); 11744 if (Input && Input != Extract.getOperand(0)) 11745 return false; 11746 11747 if (!Input) 11748 Input = Extract.getOperand(0); 11749 11750 Elems = Elems << 8; 11751 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 11752 Elems |= Index; 11753 11754 return true; 11755 }; 11756 11757 // If the build vector operands aren't sign extended vector extracts, 11758 // of the same input vector, then return. 11759 for (unsigned i = 0; i < N->getNumOperands(); i++) { 11760 if (!isSExtOfVecExtract(N->getOperand(i))) { 11761 return SDValue(); 11762 } 11763 } 11764 11765 // If the vector extract indicies are not correct, add the appropriate 11766 // vector_shuffle. 11767 int TgtElemArrayIdx; 11768 int InputSize = Input.getValueType().getScalarSizeInBits(); 11769 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 11770 if (InputSize + OutputSize == 40) 11771 TgtElemArrayIdx = 0; 11772 else if (InputSize + OutputSize == 72) 11773 TgtElemArrayIdx = 1; 11774 else if (InputSize + OutputSize == 48) 11775 TgtElemArrayIdx = 2; 11776 else if (InputSize + OutputSize == 80) 11777 TgtElemArrayIdx = 3; 11778 else if (InputSize + OutputSize == 96) 11779 TgtElemArrayIdx = 4; 11780 else 11781 return SDValue(); 11782 11783 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 11784 CorrectElems = DAG.getDataLayout().isLittleEndian() 11785 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 11786 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 11787 if (Elems != CorrectElems) { 11788 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 11789 } 11790 11791 // Regular lowering will catch cases where a shuffle is not needed. 11792 return SDValue(); 11793 } 11794 11795 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 11796 DAGCombinerInfo &DCI) const { 11797 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11798 "Should be called with a BUILD_VECTOR node"); 11799 11800 SelectionDAG &DAG = DCI.DAG; 11801 SDLoc dl(N); 11802 11803 if (!Subtarget.hasVSX()) 11804 return SDValue(); 11805 11806 // The target independent DAG combiner will leave a build_vector of 11807 // float-to-int conversions intact. We can generate MUCH better code for 11808 // a float-to-int conversion of a vector of floats. 11809 SDValue FirstInput = N->getOperand(0); 11810 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 11811 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 11812 if (Reduced) 11813 return Reduced; 11814 } 11815 11816 // If we're building a vector out of consecutive loads, just load that 11817 // vector type. 11818 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 11819 if (Reduced) 11820 return Reduced; 11821 11822 // If we're building a vector out of extended elements from another vector 11823 // we have P9 vector integer extend instructions. 11824 if (Subtarget.hasP9Altivec()) { 11825 Reduced = combineBVOfVecSExt(N, DAG); 11826 if (Reduced) 11827 return Reduced; 11828 } 11829 11830 11831 if (N->getValueType(0) != MVT::v2f64) 11832 return SDValue(); 11833 11834 // Looking for: 11835 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 11836 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 11837 FirstInput.getOpcode() != ISD::UINT_TO_FP) 11838 return SDValue(); 11839 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 11840 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 11841 return SDValue(); 11842 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 11843 return SDValue(); 11844 11845 SDValue Ext1 = FirstInput.getOperand(0); 11846 SDValue Ext2 = N->getOperand(1).getOperand(0); 11847 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 11848 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11849 return SDValue(); 11850 11851 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 11852 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 11853 if (!Ext1Op || !Ext2Op) 11854 return SDValue(); 11855 if (Ext1.getValueType() != MVT::i32 || 11856 Ext2.getValueType() != MVT::i32) 11857 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 11858 return SDValue(); 11859 11860 int FirstElem = Ext1Op->getZExtValue(); 11861 int SecondElem = Ext2Op->getZExtValue(); 11862 int SubvecIdx; 11863 if (FirstElem == 0 && SecondElem == 1) 11864 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 11865 else if (FirstElem == 2 && SecondElem == 3) 11866 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 11867 else 11868 return SDValue(); 11869 11870 SDValue SrcVec = Ext1.getOperand(0); 11871 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 11872 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 11873 return DAG.getNode(NodeType, dl, MVT::v2f64, 11874 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 11875 } 11876 11877 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 11878 DAGCombinerInfo &DCI) const { 11879 assert((N->getOpcode() == ISD::SINT_TO_FP || 11880 N->getOpcode() == ISD::UINT_TO_FP) && 11881 "Need an int -> FP conversion node here"); 11882 11883 if (useSoftFloat() || !Subtarget.has64BitSupport()) 11884 return SDValue(); 11885 11886 SelectionDAG &DAG = DCI.DAG; 11887 SDLoc dl(N); 11888 SDValue Op(N, 0); 11889 11890 SDValue FirstOperand(Op.getOperand(0)); 11891 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 11892 (FirstOperand.getValueType() == MVT::i8 || 11893 FirstOperand.getValueType() == MVT::i16); 11894 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 11895 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 11896 bool DstDouble = Op.getValueType() == MVT::f64; 11897 unsigned ConvOp = Signed ? 11898 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 11899 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 11900 SDValue WidthConst = 11901 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 11902 dl, false); 11903 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 11904 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 11905 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 11906 DAG.getVTList(MVT::f64, MVT::Other), 11907 Ops, MVT::i8, LDN->getMemOperand()); 11908 11909 // For signed conversion, we need to sign-extend the value in the VSR 11910 if (Signed) { 11911 SDValue ExtOps[] = { Ld, WidthConst }; 11912 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 11913 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 11914 } else 11915 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 11916 } 11917 11918 // Don't handle ppc_fp128 here or i1 conversions. 11919 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 11920 return SDValue(); 11921 if (Op.getOperand(0).getValueType() == MVT::i1) 11922 return SDValue(); 11923 11924 // For i32 intermediate values, unfortunately, the conversion functions 11925 // leave the upper 32 bits of the value are undefined. Within the set of 11926 // scalar instructions, we have no method for zero- or sign-extending the 11927 // value. Thus, we cannot handle i32 intermediate values here. 11928 if (Op.getOperand(0).getValueType() == MVT::i32) 11929 return SDValue(); 11930 11931 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 11932 "UINT_TO_FP is supported only with FPCVT"); 11933 11934 // If we have FCFIDS, then use it when converting to single-precision. 11935 // Otherwise, convert to double-precision and then round. 11936 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 11937 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 11938 : PPCISD::FCFIDS) 11939 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 11940 : PPCISD::FCFID); 11941 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 11942 ? MVT::f32 11943 : MVT::f64; 11944 11945 // If we're converting from a float, to an int, and back to a float again, 11946 // then we don't need the store/load pair at all. 11947 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 11948 Subtarget.hasFPCVT()) || 11949 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 11950 SDValue Src = Op.getOperand(0).getOperand(0); 11951 if (Src.getValueType() == MVT::f32) { 11952 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 11953 DCI.AddToWorklist(Src.getNode()); 11954 } else if (Src.getValueType() != MVT::f64) { 11955 // Make sure that we don't pick up a ppc_fp128 source value. 11956 return SDValue(); 11957 } 11958 11959 unsigned FCTOp = 11960 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 11961 PPCISD::FCTIDUZ; 11962 11963 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 11964 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 11965 11966 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 11967 FP = DAG.getNode(ISD::FP_ROUND, dl, 11968 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 11969 DCI.AddToWorklist(FP.getNode()); 11970 } 11971 11972 return FP; 11973 } 11974 11975 return SDValue(); 11976 } 11977 11978 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 11979 // builtins) into loads with swaps. 11980 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 11981 DAGCombinerInfo &DCI) const { 11982 SelectionDAG &DAG = DCI.DAG; 11983 SDLoc dl(N); 11984 SDValue Chain; 11985 SDValue Base; 11986 MachineMemOperand *MMO; 11987 11988 switch (N->getOpcode()) { 11989 default: 11990 llvm_unreachable("Unexpected opcode for little endian VSX load"); 11991 case ISD::LOAD: { 11992 LoadSDNode *LD = cast<LoadSDNode>(N); 11993 Chain = LD->getChain(); 11994 Base = LD->getBasePtr(); 11995 MMO = LD->getMemOperand(); 11996 // If the MMO suggests this isn't a load of a full vector, leave 11997 // things alone. For a built-in, we have to make the change for 11998 // correctness, so if there is a size problem that will be a bug. 11999 if (MMO->getSize() < 16) 12000 return SDValue(); 12001 break; 12002 } 12003 case ISD::INTRINSIC_W_CHAIN: { 12004 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12005 Chain = Intrin->getChain(); 12006 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 12007 // us what we want. Get operand 2 instead. 12008 Base = Intrin->getOperand(2); 12009 MMO = Intrin->getMemOperand(); 12010 break; 12011 } 12012 } 12013 12014 MVT VecTy = N->getValueType(0).getSimpleVT(); 12015 12016 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 12017 // aligned and the type is a vector with elements up to 4 bytes 12018 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12019 && VecTy.getScalarSizeInBits() <= 32 ) { 12020 return SDValue(); 12021 } 12022 12023 SDValue LoadOps[] = { Chain, Base }; 12024 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 12025 DAG.getVTList(MVT::v2f64, MVT::Other), 12026 LoadOps, MVT::v2f64, MMO); 12027 12028 DCI.AddToWorklist(Load.getNode()); 12029 Chain = Load.getValue(1); 12030 SDValue Swap = DAG.getNode( 12031 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 12032 DCI.AddToWorklist(Swap.getNode()); 12033 12034 // Add a bitcast if the resulting load type doesn't match v2f64. 12035 if (VecTy != MVT::v2f64) { 12036 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 12037 DCI.AddToWorklist(N.getNode()); 12038 // Package {bitcast value, swap's chain} to match Load's shape. 12039 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 12040 N, Swap.getValue(1)); 12041 } 12042 12043 return Swap; 12044 } 12045 12046 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 12047 // builtins) into stores with swaps. 12048 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 12049 DAGCombinerInfo &DCI) const { 12050 SelectionDAG &DAG = DCI.DAG; 12051 SDLoc dl(N); 12052 SDValue Chain; 12053 SDValue Base; 12054 unsigned SrcOpnd; 12055 MachineMemOperand *MMO; 12056 12057 switch (N->getOpcode()) { 12058 default: 12059 llvm_unreachable("Unexpected opcode for little endian VSX store"); 12060 case ISD::STORE: { 12061 StoreSDNode *ST = cast<StoreSDNode>(N); 12062 Chain = ST->getChain(); 12063 Base = ST->getBasePtr(); 12064 MMO = ST->getMemOperand(); 12065 SrcOpnd = 1; 12066 // If the MMO suggests this isn't a store of a full vector, leave 12067 // things alone. For a built-in, we have to make the change for 12068 // correctness, so if there is a size problem that will be a bug. 12069 if (MMO->getSize() < 16) 12070 return SDValue(); 12071 break; 12072 } 12073 case ISD::INTRINSIC_VOID: { 12074 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 12075 Chain = Intrin->getChain(); 12076 // Intrin->getBasePtr() oddly does not get what we want. 12077 Base = Intrin->getOperand(3); 12078 MMO = Intrin->getMemOperand(); 12079 SrcOpnd = 2; 12080 break; 12081 } 12082 } 12083 12084 SDValue Src = N->getOperand(SrcOpnd); 12085 MVT VecTy = Src.getValueType().getSimpleVT(); 12086 12087 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 12088 // aligned and the type is a vector with elements up to 4 bytes 12089 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 12090 && VecTy.getScalarSizeInBits() <= 32 ) { 12091 return SDValue(); 12092 } 12093 12094 // All stores are done as v2f64 and possible bit cast. 12095 if (VecTy != MVT::v2f64) { 12096 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 12097 DCI.AddToWorklist(Src.getNode()); 12098 } 12099 12100 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 12101 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 12102 DCI.AddToWorklist(Swap.getNode()); 12103 Chain = Swap.getValue(1); 12104 SDValue StoreOps[] = { Chain, Swap, Base }; 12105 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 12106 DAG.getVTList(MVT::Other), 12107 StoreOps, VecTy, MMO); 12108 DCI.AddToWorklist(Store.getNode()); 12109 return Store; 12110 } 12111 12112 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 12113 DAGCombinerInfo &DCI) const { 12114 SelectionDAG &DAG = DCI.DAG; 12115 SDLoc dl(N); 12116 switch (N->getOpcode()) { 12117 default: break; 12118 case ISD::SHL: 12119 return combineSHL(N, DCI); 12120 case ISD::SRA: 12121 return combineSRA(N, DCI); 12122 case ISD::SRL: 12123 return combineSRL(N, DCI); 12124 case PPCISD::SHL: 12125 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 12126 return N->getOperand(0); 12127 break; 12128 case PPCISD::SRL: 12129 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 12130 return N->getOperand(0); 12131 break; 12132 case PPCISD::SRA: 12133 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 12134 if (C->isNullValue() || // 0 >>s V -> 0. 12135 C->isAllOnesValue()) // -1 >>s V -> -1. 12136 return N->getOperand(0); 12137 } 12138 break; 12139 case ISD::SIGN_EXTEND: 12140 case ISD::ZERO_EXTEND: 12141 case ISD::ANY_EXTEND: 12142 return DAGCombineExtBoolTrunc(N, DCI); 12143 case ISD::TRUNCATE: 12144 case ISD::SETCC: 12145 case ISD::SELECT_CC: 12146 return DAGCombineTruncBoolExt(N, DCI); 12147 case ISD::SINT_TO_FP: 12148 case ISD::UINT_TO_FP: 12149 return combineFPToIntToFP(N, DCI); 12150 case ISD::STORE: { 12151 EVT Op1VT = N->getOperand(1).getValueType(); 12152 bool ValidTypeForStoreFltAsInt = (Op1VT == MVT::i32) || 12153 (Subtarget.hasP9Vector() && (Op1VT == MVT::i8 || Op1VT == MVT::i16)); 12154 12155 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 12156 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 12157 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 12158 ValidTypeForStoreFltAsInt && 12159 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 12160 SDValue Val = N->getOperand(1).getOperand(0); 12161 if (Val.getValueType() == MVT::f32) { 12162 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 12163 DCI.AddToWorklist(Val.getNode()); 12164 } 12165 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 12166 DCI.AddToWorklist(Val.getNode()); 12167 12168 if (Op1VT == MVT::i32) { 12169 SDValue Ops[] = { 12170 N->getOperand(0), Val, N->getOperand(2), 12171 DAG.getValueType(N->getOperand(1).getValueType()) 12172 }; 12173 12174 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 12175 DAG.getVTList(MVT::Other), Ops, 12176 cast<StoreSDNode>(N)->getMemoryVT(), 12177 cast<StoreSDNode>(N)->getMemOperand()); 12178 } else { 12179 unsigned WidthInBytes = 12180 N->getOperand(1).getValueType() == MVT::i8 ? 1 : 2; 12181 SDValue WidthConst = DAG.getIntPtrConstant(WidthInBytes, dl, false); 12182 12183 SDValue Ops[] = { 12184 N->getOperand(0), Val, N->getOperand(2), WidthConst, 12185 DAG.getValueType(N->getOperand(1).getValueType()) 12186 }; 12187 Val = DAG.getMemIntrinsicNode(PPCISD::STXSIX, dl, 12188 DAG.getVTList(MVT::Other), Ops, 12189 cast<StoreSDNode>(N)->getMemoryVT(), 12190 cast<StoreSDNode>(N)->getMemOperand()); 12191 } 12192 12193 DCI.AddToWorklist(Val.getNode()); 12194 return Val; 12195 } 12196 12197 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 12198 if (cast<StoreSDNode>(N)->isUnindexed() && 12199 N->getOperand(1).getOpcode() == ISD::BSWAP && 12200 N->getOperand(1).getNode()->hasOneUse() && 12201 (N->getOperand(1).getValueType() == MVT::i32 || 12202 N->getOperand(1).getValueType() == MVT::i16 || 12203 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 12204 N->getOperand(1).getValueType() == MVT::i64))) { 12205 SDValue BSwapOp = N->getOperand(1).getOperand(0); 12206 // Do an any-extend to 32-bits if this is a half-word input. 12207 if (BSwapOp.getValueType() == MVT::i16) 12208 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 12209 12210 // If the type of BSWAP operand is wider than stored memory width 12211 // it need to be shifted to the right side before STBRX. 12212 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 12213 if (Op1VT.bitsGT(mVT)) { 12214 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 12215 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 12216 DAG.getConstant(Shift, dl, MVT::i32)); 12217 // Need to truncate if this is a bswap of i64 stored as i32/i16. 12218 if (Op1VT == MVT::i64) 12219 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 12220 } 12221 12222 SDValue Ops[] = { 12223 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 12224 }; 12225 return 12226 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 12227 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 12228 cast<StoreSDNode>(N)->getMemOperand()); 12229 } 12230 12231 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 12232 // So it can increase the chance of CSE constant construction. 12233 EVT VT = N->getOperand(1).getValueType(); 12234 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 12235 isa<ConstantSDNode>(N->getOperand(1)) && VT == MVT::i32) { 12236 SDValue Const64 = DAG.getConstant(N->getConstantOperandVal(1), dl, 12237 MVT::i64); 12238 // DAG.getTruncStore() can't be used here because it doesn't accept 12239 // the general (base + offset) addressing mode. 12240 // So we use UpdateNodeOperands and setTruncatingStore instead. 12241 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 12242 N->getOperand(3)); 12243 cast<StoreSDNode>(N)->setTruncatingStore(true); 12244 return SDValue(N, 0); 12245 } 12246 12247 // For little endian, VSX stores require generating xxswapd/lxvd2x. 12248 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12249 if (VT.isSimple()) { 12250 MVT StoreVT = VT.getSimpleVT(); 12251 if (Subtarget.needsSwapsForVSXMemOps() && 12252 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 12253 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 12254 return expandVSXStoreForLE(N, DCI); 12255 } 12256 break; 12257 } 12258 case ISD::LOAD: { 12259 LoadSDNode *LD = cast<LoadSDNode>(N); 12260 EVT VT = LD->getValueType(0); 12261 12262 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12263 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12264 if (VT.isSimple()) { 12265 MVT LoadVT = VT.getSimpleVT(); 12266 if (Subtarget.needsSwapsForVSXMemOps() && 12267 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 12268 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 12269 return expandVSXLoadForLE(N, DCI); 12270 } 12271 12272 // We sometimes end up with a 64-bit integer load, from which we extract 12273 // two single-precision floating-point numbers. This happens with 12274 // std::complex<float>, and other similar structures, because of the way we 12275 // canonicalize structure copies. However, if we lack direct moves, 12276 // then the final bitcasts from the extracted integer values to the 12277 // floating-point numbers turn into store/load pairs. Even with direct moves, 12278 // just loading the two floating-point numbers is likely better. 12279 auto ReplaceTwoFloatLoad = [&]() { 12280 if (VT != MVT::i64) 12281 return false; 12282 12283 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 12284 LD->isVolatile()) 12285 return false; 12286 12287 // We're looking for a sequence like this: 12288 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 12289 // t16: i64 = srl t13, Constant:i32<32> 12290 // t17: i32 = truncate t16 12291 // t18: f32 = bitcast t17 12292 // t19: i32 = truncate t13 12293 // t20: f32 = bitcast t19 12294 12295 if (!LD->hasNUsesOfValue(2, 0)) 12296 return false; 12297 12298 auto UI = LD->use_begin(); 12299 while (UI.getUse().getResNo() != 0) ++UI; 12300 SDNode *Trunc = *UI++; 12301 while (UI.getUse().getResNo() != 0) ++UI; 12302 SDNode *RightShift = *UI; 12303 if (Trunc->getOpcode() != ISD::TRUNCATE) 12304 std::swap(Trunc, RightShift); 12305 12306 if (Trunc->getOpcode() != ISD::TRUNCATE || 12307 Trunc->getValueType(0) != MVT::i32 || 12308 !Trunc->hasOneUse()) 12309 return false; 12310 if (RightShift->getOpcode() != ISD::SRL || 12311 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 12312 RightShift->getConstantOperandVal(1) != 32 || 12313 !RightShift->hasOneUse()) 12314 return false; 12315 12316 SDNode *Trunc2 = *RightShift->use_begin(); 12317 if (Trunc2->getOpcode() != ISD::TRUNCATE || 12318 Trunc2->getValueType(0) != MVT::i32 || 12319 !Trunc2->hasOneUse()) 12320 return false; 12321 12322 SDNode *Bitcast = *Trunc->use_begin(); 12323 SDNode *Bitcast2 = *Trunc2->use_begin(); 12324 12325 if (Bitcast->getOpcode() != ISD::BITCAST || 12326 Bitcast->getValueType(0) != MVT::f32) 12327 return false; 12328 if (Bitcast2->getOpcode() != ISD::BITCAST || 12329 Bitcast2->getValueType(0) != MVT::f32) 12330 return false; 12331 12332 if (Subtarget.isLittleEndian()) 12333 std::swap(Bitcast, Bitcast2); 12334 12335 // Bitcast has the second float (in memory-layout order) and Bitcast2 12336 // has the first one. 12337 12338 SDValue BasePtr = LD->getBasePtr(); 12339 if (LD->isIndexed()) { 12340 assert(LD->getAddressingMode() == ISD::PRE_INC && 12341 "Non-pre-inc AM on PPC?"); 12342 BasePtr = 12343 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 12344 LD->getOffset()); 12345 } 12346 12347 auto MMOFlags = 12348 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 12349 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 12350 LD->getPointerInfo(), LD->getAlignment(), 12351 MMOFlags, LD->getAAInfo()); 12352 SDValue AddPtr = 12353 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 12354 BasePtr, DAG.getIntPtrConstant(4, dl)); 12355 SDValue FloatLoad2 = DAG.getLoad( 12356 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 12357 LD->getPointerInfo().getWithOffset(4), 12358 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 12359 12360 if (LD->isIndexed()) { 12361 // Note that DAGCombine should re-form any pre-increment load(s) from 12362 // what is produced here if that makes sense. 12363 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 12364 } 12365 12366 DCI.CombineTo(Bitcast2, FloatLoad); 12367 DCI.CombineTo(Bitcast, FloatLoad2); 12368 12369 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 12370 SDValue(FloatLoad2.getNode(), 1)); 12371 return true; 12372 }; 12373 12374 if (ReplaceTwoFloatLoad()) 12375 return SDValue(N, 0); 12376 12377 EVT MemVT = LD->getMemoryVT(); 12378 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 12379 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 12380 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 12381 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 12382 if (LD->isUnindexed() && VT.isVector() && 12383 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 12384 // P8 and later hardware should just use LOAD. 12385 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 12386 VT == MVT::v4i32 || VT == MVT::v4f32)) || 12387 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 12388 LD->getAlignment() >= ScalarABIAlignment)) && 12389 LD->getAlignment() < ABIAlignment) { 12390 // This is a type-legal unaligned Altivec or QPX load. 12391 SDValue Chain = LD->getChain(); 12392 SDValue Ptr = LD->getBasePtr(); 12393 bool isLittleEndian = Subtarget.isLittleEndian(); 12394 12395 // This implements the loading of unaligned vectors as described in 12396 // the venerable Apple Velocity Engine overview. Specifically: 12397 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 12398 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 12399 // 12400 // The general idea is to expand a sequence of one or more unaligned 12401 // loads into an alignment-based permutation-control instruction (lvsl 12402 // or lvsr), a series of regular vector loads (which always truncate 12403 // their input address to an aligned address), and a series of 12404 // permutations. The results of these permutations are the requested 12405 // loaded values. The trick is that the last "extra" load is not taken 12406 // from the address you might suspect (sizeof(vector) bytes after the 12407 // last requested load), but rather sizeof(vector) - 1 bytes after the 12408 // last requested vector. The point of this is to avoid a page fault if 12409 // the base address happened to be aligned. This works because if the 12410 // base address is aligned, then adding less than a full vector length 12411 // will cause the last vector in the sequence to be (re)loaded. 12412 // Otherwise, the next vector will be fetched as you might suspect was 12413 // necessary. 12414 12415 // We might be able to reuse the permutation generation from 12416 // a different base address offset from this one by an aligned amount. 12417 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 12418 // optimization later. 12419 Intrinsic::ID Intr, IntrLD, IntrPerm; 12420 MVT PermCntlTy, PermTy, LDTy; 12421 if (Subtarget.hasAltivec()) { 12422 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 12423 Intrinsic::ppc_altivec_lvsl; 12424 IntrLD = Intrinsic::ppc_altivec_lvx; 12425 IntrPerm = Intrinsic::ppc_altivec_vperm; 12426 PermCntlTy = MVT::v16i8; 12427 PermTy = MVT::v4i32; 12428 LDTy = MVT::v4i32; 12429 } else { 12430 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 12431 Intrinsic::ppc_qpx_qvlpcls; 12432 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 12433 Intrinsic::ppc_qpx_qvlfs; 12434 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 12435 PermCntlTy = MVT::v4f64; 12436 PermTy = MVT::v4f64; 12437 LDTy = MemVT.getSimpleVT(); 12438 } 12439 12440 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 12441 12442 // Create the new MMO for the new base load. It is like the original MMO, 12443 // but represents an area in memory almost twice the vector size centered 12444 // on the original address. If the address is unaligned, we might start 12445 // reading up to (sizeof(vector)-1) bytes below the address of the 12446 // original unaligned load. 12447 MachineFunction &MF = DAG.getMachineFunction(); 12448 MachineMemOperand *BaseMMO = 12449 MF.getMachineMemOperand(LD->getMemOperand(), 12450 -(long)MemVT.getStoreSize()+1, 12451 2*MemVT.getStoreSize()-1); 12452 12453 // Create the new base load. 12454 SDValue LDXIntID = 12455 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 12456 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 12457 SDValue BaseLoad = 12458 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12459 DAG.getVTList(PermTy, MVT::Other), 12460 BaseLoadOps, LDTy, BaseMMO); 12461 12462 // Note that the value of IncOffset (which is provided to the next 12463 // load's pointer info offset value, and thus used to calculate the 12464 // alignment), and the value of IncValue (which is actually used to 12465 // increment the pointer value) are different! This is because we 12466 // require the next load to appear to be aligned, even though it 12467 // is actually offset from the base pointer by a lesser amount. 12468 int IncOffset = VT.getSizeInBits() / 8; 12469 int IncValue = IncOffset; 12470 12471 // Walk (both up and down) the chain looking for another load at the real 12472 // (aligned) offset (the alignment of the other load does not matter in 12473 // this case). If found, then do not use the offset reduction trick, as 12474 // that will prevent the loads from being later combined (as they would 12475 // otherwise be duplicates). 12476 if (!findConsecutiveLoad(LD, DAG)) 12477 --IncValue; 12478 12479 SDValue Increment = 12480 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 12481 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 12482 12483 MachineMemOperand *ExtraMMO = 12484 MF.getMachineMemOperand(LD->getMemOperand(), 12485 1, 2*MemVT.getStoreSize()-1); 12486 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 12487 SDValue ExtraLoad = 12488 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 12489 DAG.getVTList(PermTy, MVT::Other), 12490 ExtraLoadOps, LDTy, ExtraMMO); 12491 12492 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 12493 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 12494 12495 // Because vperm has a big-endian bias, we must reverse the order 12496 // of the input vectors and complement the permute control vector 12497 // when generating little endian code. We have already handled the 12498 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 12499 // and ExtraLoad here. 12500 SDValue Perm; 12501 if (isLittleEndian) 12502 Perm = BuildIntrinsicOp(IntrPerm, 12503 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 12504 else 12505 Perm = BuildIntrinsicOp(IntrPerm, 12506 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 12507 12508 if (VT != PermTy) 12509 Perm = Subtarget.hasAltivec() ? 12510 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 12511 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 12512 DAG.getTargetConstant(1, dl, MVT::i64)); 12513 // second argument is 1 because this rounding 12514 // is always exact. 12515 12516 // The output of the permutation is our loaded result, the TokenFactor is 12517 // our new chain. 12518 DCI.CombineTo(N, Perm, TF); 12519 return SDValue(N, 0); 12520 } 12521 } 12522 break; 12523 case ISD::INTRINSIC_WO_CHAIN: { 12524 bool isLittleEndian = Subtarget.isLittleEndian(); 12525 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 12526 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 12527 : Intrinsic::ppc_altivec_lvsl); 12528 if ((IID == Intr || 12529 IID == Intrinsic::ppc_qpx_qvlpcld || 12530 IID == Intrinsic::ppc_qpx_qvlpcls) && 12531 N->getOperand(1)->getOpcode() == ISD::ADD) { 12532 SDValue Add = N->getOperand(1); 12533 12534 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 12535 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 12536 12537 if (DAG.MaskedValueIsZero(Add->getOperand(1), 12538 APInt::getAllOnesValue(Bits /* alignment */) 12539 .zext(Add.getScalarValueSizeInBits()))) { 12540 SDNode *BasePtr = Add->getOperand(0).getNode(); 12541 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12542 UE = BasePtr->use_end(); 12543 UI != UE; ++UI) { 12544 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12545 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 12546 // We've found another LVSL/LVSR, and this address is an aligned 12547 // multiple of that one. The results will be the same, so use the 12548 // one we've just found instead. 12549 12550 return SDValue(*UI, 0); 12551 } 12552 } 12553 } 12554 12555 if (isa<ConstantSDNode>(Add->getOperand(1))) { 12556 SDNode *BasePtr = Add->getOperand(0).getNode(); 12557 for (SDNode::use_iterator UI = BasePtr->use_begin(), 12558 UE = BasePtr->use_end(); UI != UE; ++UI) { 12559 if (UI->getOpcode() == ISD::ADD && 12560 isa<ConstantSDNode>(UI->getOperand(1)) && 12561 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 12562 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 12563 (1ULL << Bits) == 0) { 12564 SDNode *OtherAdd = *UI; 12565 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 12566 VE = OtherAdd->use_end(); VI != VE; ++VI) { 12567 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12568 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 12569 return SDValue(*VI, 0); 12570 } 12571 } 12572 } 12573 } 12574 } 12575 } 12576 } 12577 12578 break; 12579 case ISD::INTRINSIC_W_CHAIN: 12580 // For little endian, VSX loads require generating lxvd2x/xxswapd. 12581 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 12582 if (Subtarget.needsSwapsForVSXMemOps()) { 12583 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12584 default: 12585 break; 12586 case Intrinsic::ppc_vsx_lxvw4x: 12587 case Intrinsic::ppc_vsx_lxvd2x: 12588 return expandVSXLoadForLE(N, DCI); 12589 } 12590 } 12591 break; 12592 case ISD::INTRINSIC_VOID: 12593 // For little endian, VSX stores require generating xxswapd/stxvd2x. 12594 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 12595 if (Subtarget.needsSwapsForVSXMemOps()) { 12596 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12597 default: 12598 break; 12599 case Intrinsic::ppc_vsx_stxvw4x: 12600 case Intrinsic::ppc_vsx_stxvd2x: 12601 return expandVSXStoreForLE(N, DCI); 12602 } 12603 } 12604 break; 12605 case ISD::BSWAP: 12606 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 12607 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 12608 N->getOperand(0).hasOneUse() && 12609 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 12610 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 12611 N->getValueType(0) == MVT::i64))) { 12612 SDValue Load = N->getOperand(0); 12613 LoadSDNode *LD = cast<LoadSDNode>(Load); 12614 // Create the byte-swapping load. 12615 SDValue Ops[] = { 12616 LD->getChain(), // Chain 12617 LD->getBasePtr(), // Ptr 12618 DAG.getValueType(N->getValueType(0)) // VT 12619 }; 12620 SDValue BSLoad = 12621 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 12622 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 12623 MVT::i64 : MVT::i32, MVT::Other), 12624 Ops, LD->getMemoryVT(), LD->getMemOperand()); 12625 12626 // If this is an i16 load, insert the truncate. 12627 SDValue ResVal = BSLoad; 12628 if (N->getValueType(0) == MVT::i16) 12629 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 12630 12631 // First, combine the bswap away. This makes the value produced by the 12632 // load dead. 12633 DCI.CombineTo(N, ResVal); 12634 12635 // Next, combine the load away, we give it a bogus result value but a real 12636 // chain result. The result value is dead because the bswap is dead. 12637 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 12638 12639 // Return N so it doesn't get rechecked! 12640 return SDValue(N, 0); 12641 } 12642 break; 12643 case PPCISD::VCMP: 12644 // If a VCMPo node already exists with exactly the same operands as this 12645 // node, use its result instead of this node (VCMPo computes both a CR6 and 12646 // a normal output). 12647 // 12648 if (!N->getOperand(0).hasOneUse() && 12649 !N->getOperand(1).hasOneUse() && 12650 !N->getOperand(2).hasOneUse()) { 12651 12652 // Scan all of the users of the LHS, looking for VCMPo's that match. 12653 SDNode *VCMPoNode = nullptr; 12654 12655 SDNode *LHSN = N->getOperand(0).getNode(); 12656 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 12657 UI != E; ++UI) 12658 if (UI->getOpcode() == PPCISD::VCMPo && 12659 UI->getOperand(1) == N->getOperand(1) && 12660 UI->getOperand(2) == N->getOperand(2) && 12661 UI->getOperand(0) == N->getOperand(0)) { 12662 VCMPoNode = *UI; 12663 break; 12664 } 12665 12666 // If there is no VCMPo node, or if the flag value has a single use, don't 12667 // transform this. 12668 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 12669 break; 12670 12671 // Look at the (necessarily single) use of the flag value. If it has a 12672 // chain, this transformation is more complex. Note that multiple things 12673 // could use the value result, which we should ignore. 12674 SDNode *FlagUser = nullptr; 12675 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 12676 FlagUser == nullptr; ++UI) { 12677 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 12678 SDNode *User = *UI; 12679 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 12680 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 12681 FlagUser = User; 12682 break; 12683 } 12684 } 12685 } 12686 12687 // If the user is a MFOCRF instruction, we know this is safe. 12688 // Otherwise we give up for right now. 12689 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 12690 return SDValue(VCMPoNode, 0); 12691 } 12692 break; 12693 case ISD::BRCOND: { 12694 SDValue Cond = N->getOperand(1); 12695 SDValue Target = N->getOperand(2); 12696 12697 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12698 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 12699 Intrinsic::ppc_is_decremented_ctr_nonzero) { 12700 12701 // We now need to make the intrinsic dead (it cannot be instruction 12702 // selected). 12703 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 12704 assert(Cond.getNode()->hasOneUse() && 12705 "Counter decrement has more than one use"); 12706 12707 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 12708 N->getOperand(0), Target); 12709 } 12710 } 12711 break; 12712 case ISD::BR_CC: { 12713 // If this is a branch on an altivec predicate comparison, lower this so 12714 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 12715 // lowering is done pre-legalize, because the legalizer lowers the predicate 12716 // compare down to code that is difficult to reassemble. 12717 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 12718 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 12719 12720 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 12721 // value. If so, pass-through the AND to get to the intrinsic. 12722 if (LHS.getOpcode() == ISD::AND && 12723 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 12724 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 12725 Intrinsic::ppc_is_decremented_ctr_nonzero && 12726 isa<ConstantSDNode>(LHS.getOperand(1)) && 12727 !isNullConstant(LHS.getOperand(1))) 12728 LHS = LHS.getOperand(0); 12729 12730 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12731 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 12732 Intrinsic::ppc_is_decremented_ctr_nonzero && 12733 isa<ConstantSDNode>(RHS)) { 12734 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 12735 "Counter decrement comparison is not EQ or NE"); 12736 12737 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12738 bool isBDNZ = (CC == ISD::SETEQ && Val) || 12739 (CC == ISD::SETNE && !Val); 12740 12741 // We now need to make the intrinsic dead (it cannot be instruction 12742 // selected). 12743 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 12744 assert(LHS.getNode()->hasOneUse() && 12745 "Counter decrement has more than one use"); 12746 12747 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 12748 N->getOperand(0), N->getOperand(4)); 12749 } 12750 12751 int CompareOpc; 12752 bool isDot; 12753 12754 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12755 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 12756 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 12757 assert(isDot && "Can't compare against a vector result!"); 12758 12759 // If this is a comparison against something other than 0/1, then we know 12760 // that the condition is never/always true. 12761 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12762 if (Val != 0 && Val != 1) { 12763 if (CC == ISD::SETEQ) // Cond never true, remove branch. 12764 return N->getOperand(0); 12765 // Always !=, turn it into an unconditional branch. 12766 return DAG.getNode(ISD::BR, dl, MVT::Other, 12767 N->getOperand(0), N->getOperand(4)); 12768 } 12769 12770 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 12771 12772 // Create the PPCISD altivec 'dot' comparison node. 12773 SDValue Ops[] = { 12774 LHS.getOperand(2), // LHS of compare 12775 LHS.getOperand(3), // RHS of compare 12776 DAG.getConstant(CompareOpc, dl, MVT::i32) 12777 }; 12778 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 12779 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 12780 12781 // Unpack the result based on how the target uses it. 12782 PPC::Predicate CompOpc; 12783 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 12784 default: // Can't happen, don't crash on invalid number though. 12785 case 0: // Branch on the value of the EQ bit of CR6. 12786 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 12787 break; 12788 case 1: // Branch on the inverted value of the EQ bit of CR6. 12789 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 12790 break; 12791 case 2: // Branch on the value of the LT bit of CR6. 12792 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 12793 break; 12794 case 3: // Branch on the inverted value of the LT bit of CR6. 12795 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 12796 break; 12797 } 12798 12799 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 12800 DAG.getConstant(CompOpc, dl, MVT::i32), 12801 DAG.getRegister(PPC::CR6, MVT::i32), 12802 N->getOperand(4), CompNode.getValue(1)); 12803 } 12804 break; 12805 } 12806 case ISD::BUILD_VECTOR: 12807 return DAGCombineBuildVector(N, DCI); 12808 } 12809 12810 return SDValue(); 12811 } 12812 12813 SDValue 12814 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 12815 SelectionDAG &DAG, 12816 std::vector<SDNode *> *Created) const { 12817 // fold (sdiv X, pow2) 12818 EVT VT = N->getValueType(0); 12819 if (VT == MVT::i64 && !Subtarget.isPPC64()) 12820 return SDValue(); 12821 if ((VT != MVT::i32 && VT != MVT::i64) || 12822 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 12823 return SDValue(); 12824 12825 SDLoc DL(N); 12826 SDValue N0 = N->getOperand(0); 12827 12828 bool IsNegPow2 = (-Divisor).isPowerOf2(); 12829 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 12830 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 12831 12832 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 12833 if (Created) 12834 Created->push_back(Op.getNode()); 12835 12836 if (IsNegPow2) { 12837 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 12838 if (Created) 12839 Created->push_back(Op.getNode()); 12840 } 12841 12842 return Op; 12843 } 12844 12845 //===----------------------------------------------------------------------===// 12846 // Inline Assembly Support 12847 //===----------------------------------------------------------------------===// 12848 12849 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 12850 KnownBits &Known, 12851 const APInt &DemandedElts, 12852 const SelectionDAG &DAG, 12853 unsigned Depth) const { 12854 Known.resetAll(); 12855 switch (Op.getOpcode()) { 12856 default: break; 12857 case PPCISD::LBRX: { 12858 // lhbrx is known to have the top bits cleared out. 12859 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 12860 Known.Zero = 0xFFFF0000; 12861 break; 12862 } 12863 case ISD::INTRINSIC_WO_CHAIN: { 12864 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 12865 default: break; 12866 case Intrinsic::ppc_altivec_vcmpbfp_p: 12867 case Intrinsic::ppc_altivec_vcmpeqfp_p: 12868 case Intrinsic::ppc_altivec_vcmpequb_p: 12869 case Intrinsic::ppc_altivec_vcmpequh_p: 12870 case Intrinsic::ppc_altivec_vcmpequw_p: 12871 case Intrinsic::ppc_altivec_vcmpequd_p: 12872 case Intrinsic::ppc_altivec_vcmpgefp_p: 12873 case Intrinsic::ppc_altivec_vcmpgtfp_p: 12874 case Intrinsic::ppc_altivec_vcmpgtsb_p: 12875 case Intrinsic::ppc_altivec_vcmpgtsh_p: 12876 case Intrinsic::ppc_altivec_vcmpgtsw_p: 12877 case Intrinsic::ppc_altivec_vcmpgtsd_p: 12878 case Intrinsic::ppc_altivec_vcmpgtub_p: 12879 case Intrinsic::ppc_altivec_vcmpgtuh_p: 12880 case Intrinsic::ppc_altivec_vcmpgtuw_p: 12881 case Intrinsic::ppc_altivec_vcmpgtud_p: 12882 Known.Zero = ~1U; // All bits but the low one are known to be zero. 12883 break; 12884 } 12885 } 12886 } 12887 } 12888 12889 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 12890 switch (Subtarget.getDarwinDirective()) { 12891 default: break; 12892 case PPC::DIR_970: 12893 case PPC::DIR_PWR4: 12894 case PPC::DIR_PWR5: 12895 case PPC::DIR_PWR5X: 12896 case PPC::DIR_PWR6: 12897 case PPC::DIR_PWR6X: 12898 case PPC::DIR_PWR7: 12899 case PPC::DIR_PWR8: 12900 case PPC::DIR_PWR9: { 12901 if (!ML) 12902 break; 12903 12904 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 12905 12906 // For small loops (between 5 and 8 instructions), align to a 32-byte 12907 // boundary so that the entire loop fits in one instruction-cache line. 12908 uint64_t LoopSize = 0; 12909 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 12910 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 12911 LoopSize += TII->getInstSizeInBytes(*J); 12912 if (LoopSize > 32) 12913 break; 12914 } 12915 12916 if (LoopSize > 16 && LoopSize <= 32) 12917 return 5; 12918 12919 break; 12920 } 12921 } 12922 12923 return TargetLowering::getPrefLoopAlignment(ML); 12924 } 12925 12926 /// getConstraintType - Given a constraint, return the type of 12927 /// constraint it is for this target. 12928 PPCTargetLowering::ConstraintType 12929 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 12930 if (Constraint.size() == 1) { 12931 switch (Constraint[0]) { 12932 default: break; 12933 case 'b': 12934 case 'r': 12935 case 'f': 12936 case 'd': 12937 case 'v': 12938 case 'y': 12939 return C_RegisterClass; 12940 case 'Z': 12941 // FIXME: While Z does indicate a memory constraint, it specifically 12942 // indicates an r+r address (used in conjunction with the 'y' modifier 12943 // in the replacement string). Currently, we're forcing the base 12944 // register to be r0 in the asm printer (which is interpreted as zero) 12945 // and forming the complete address in the second register. This is 12946 // suboptimal. 12947 return C_Memory; 12948 } 12949 } else if (Constraint == "wc") { // individual CR bits. 12950 return C_RegisterClass; 12951 } else if (Constraint == "wa" || Constraint == "wd" || 12952 Constraint == "wf" || Constraint == "ws") { 12953 return C_RegisterClass; // VSX registers. 12954 } 12955 return TargetLowering::getConstraintType(Constraint); 12956 } 12957 12958 /// Examine constraint type and operand type and determine a weight value. 12959 /// This object must already have been set up with the operand type 12960 /// and the current alternative constraint selected. 12961 TargetLowering::ConstraintWeight 12962 PPCTargetLowering::getSingleConstraintMatchWeight( 12963 AsmOperandInfo &info, const char *constraint) const { 12964 ConstraintWeight weight = CW_Invalid; 12965 Value *CallOperandVal = info.CallOperandVal; 12966 // If we don't have a value, we can't do a match, 12967 // but allow it at the lowest weight. 12968 if (!CallOperandVal) 12969 return CW_Default; 12970 Type *type = CallOperandVal->getType(); 12971 12972 // Look at the constraint type. 12973 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 12974 return CW_Register; // an individual CR bit. 12975 else if ((StringRef(constraint) == "wa" || 12976 StringRef(constraint) == "wd" || 12977 StringRef(constraint) == "wf") && 12978 type->isVectorTy()) 12979 return CW_Register; 12980 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 12981 return CW_Register; 12982 12983 switch (*constraint) { 12984 default: 12985 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 12986 break; 12987 case 'b': 12988 if (type->isIntegerTy()) 12989 weight = CW_Register; 12990 break; 12991 case 'f': 12992 if (type->isFloatTy()) 12993 weight = CW_Register; 12994 break; 12995 case 'd': 12996 if (type->isDoubleTy()) 12997 weight = CW_Register; 12998 break; 12999 case 'v': 13000 if (type->isVectorTy()) 13001 weight = CW_Register; 13002 break; 13003 case 'y': 13004 weight = CW_Register; 13005 break; 13006 case 'Z': 13007 weight = CW_Memory; 13008 break; 13009 } 13010 return weight; 13011 } 13012 13013 std::pair<unsigned, const TargetRegisterClass *> 13014 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 13015 StringRef Constraint, 13016 MVT VT) const { 13017 if (Constraint.size() == 1) { 13018 // GCC RS6000 Constraint Letters 13019 switch (Constraint[0]) { 13020 case 'b': // R1-R31 13021 if (VT == MVT::i64 && Subtarget.isPPC64()) 13022 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 13023 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 13024 case 'r': // R0-R31 13025 if (VT == MVT::i64 && Subtarget.isPPC64()) 13026 return std::make_pair(0U, &PPC::G8RCRegClass); 13027 return std::make_pair(0U, &PPC::GPRCRegClass); 13028 // 'd' and 'f' constraints are both defined to be "the floating point 13029 // registers", where one is for 32-bit and the other for 64-bit. We don't 13030 // really care overly much here so just give them all the same reg classes. 13031 case 'd': 13032 case 'f': 13033 if (VT == MVT::f32 || VT == MVT::i32) 13034 return std::make_pair(0U, &PPC::F4RCRegClass); 13035 if (VT == MVT::f64 || VT == MVT::i64) 13036 return std::make_pair(0U, &PPC::F8RCRegClass); 13037 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13038 return std::make_pair(0U, &PPC::QFRCRegClass); 13039 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13040 return std::make_pair(0U, &PPC::QSRCRegClass); 13041 break; 13042 case 'v': 13043 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 13044 return std::make_pair(0U, &PPC::QFRCRegClass); 13045 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 13046 return std::make_pair(0U, &PPC::QSRCRegClass); 13047 if (Subtarget.hasAltivec()) 13048 return std::make_pair(0U, &PPC::VRRCRegClass); 13049 case 'y': // crrc 13050 return std::make_pair(0U, &PPC::CRRCRegClass); 13051 } 13052 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 13053 // An individual CR bit. 13054 return std::make_pair(0U, &PPC::CRBITRCRegClass); 13055 } else if ((Constraint == "wa" || Constraint == "wd" || 13056 Constraint == "wf") && Subtarget.hasVSX()) { 13057 return std::make_pair(0U, &PPC::VSRCRegClass); 13058 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 13059 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 13060 return std::make_pair(0U, &PPC::VSSRCRegClass); 13061 else 13062 return std::make_pair(0U, &PPC::VSFRCRegClass); 13063 } 13064 13065 std::pair<unsigned, const TargetRegisterClass *> R = 13066 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 13067 13068 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 13069 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 13070 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 13071 // register. 13072 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 13073 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 13074 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 13075 PPC::GPRCRegClass.contains(R.first)) 13076 return std::make_pair(TRI->getMatchingSuperReg(R.first, 13077 PPC::sub_32, &PPC::G8RCRegClass), 13078 &PPC::G8RCRegClass); 13079 13080 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 13081 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 13082 R.first = PPC::CR0; 13083 R.second = &PPC::CRRCRegClass; 13084 } 13085 13086 return R; 13087 } 13088 13089 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 13090 /// vector. If it is invalid, don't add anything to Ops. 13091 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 13092 std::string &Constraint, 13093 std::vector<SDValue>&Ops, 13094 SelectionDAG &DAG) const { 13095 SDValue Result; 13096 13097 // Only support length 1 constraints. 13098 if (Constraint.length() > 1) return; 13099 13100 char Letter = Constraint[0]; 13101 switch (Letter) { 13102 default: break; 13103 case 'I': 13104 case 'J': 13105 case 'K': 13106 case 'L': 13107 case 'M': 13108 case 'N': 13109 case 'O': 13110 case 'P': { 13111 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 13112 if (!CST) return; // Must be an immediate to match. 13113 SDLoc dl(Op); 13114 int64_t Value = CST->getSExtValue(); 13115 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 13116 // numbers are printed as such. 13117 switch (Letter) { 13118 default: llvm_unreachable("Unknown constraint letter!"); 13119 case 'I': // "I" is a signed 16-bit constant. 13120 if (isInt<16>(Value)) 13121 Result = DAG.getTargetConstant(Value, dl, TCVT); 13122 break; 13123 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 13124 if (isShiftedUInt<16, 16>(Value)) 13125 Result = DAG.getTargetConstant(Value, dl, TCVT); 13126 break; 13127 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 13128 if (isShiftedInt<16, 16>(Value)) 13129 Result = DAG.getTargetConstant(Value, dl, TCVT); 13130 break; 13131 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 13132 if (isUInt<16>(Value)) 13133 Result = DAG.getTargetConstant(Value, dl, TCVT); 13134 break; 13135 case 'M': // "M" is a constant that is greater than 31. 13136 if (Value > 31) 13137 Result = DAG.getTargetConstant(Value, dl, TCVT); 13138 break; 13139 case 'N': // "N" is a positive constant that is an exact power of two. 13140 if (Value > 0 && isPowerOf2_64(Value)) 13141 Result = DAG.getTargetConstant(Value, dl, TCVT); 13142 break; 13143 case 'O': // "O" is the constant zero. 13144 if (Value == 0) 13145 Result = DAG.getTargetConstant(Value, dl, TCVT); 13146 break; 13147 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 13148 if (isInt<16>(-Value)) 13149 Result = DAG.getTargetConstant(Value, dl, TCVT); 13150 break; 13151 } 13152 break; 13153 } 13154 } 13155 13156 if (Result.getNode()) { 13157 Ops.push_back(Result); 13158 return; 13159 } 13160 13161 // Handle standard constraint letters. 13162 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 13163 } 13164 13165 // isLegalAddressingMode - Return true if the addressing mode represented 13166 // by AM is legal for this target, for a load/store of the specified type. 13167 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 13168 const AddrMode &AM, Type *Ty, 13169 unsigned AS, Instruction *I) const { 13170 // PPC does not allow r+i addressing modes for vectors! 13171 if (Ty->isVectorTy() && AM.BaseOffs != 0) 13172 return false; 13173 13174 // PPC allows a sign-extended 16-bit immediate field. 13175 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 13176 return false; 13177 13178 // No global is ever allowed as a base. 13179 if (AM.BaseGV) 13180 return false; 13181 13182 // PPC only support r+r, 13183 switch (AM.Scale) { 13184 case 0: // "r+i" or just "i", depending on HasBaseReg. 13185 break; 13186 case 1: 13187 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 13188 return false; 13189 // Otherwise we have r+r or r+i. 13190 break; 13191 case 2: 13192 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 13193 return false; 13194 // Allow 2*r as r+r. 13195 break; 13196 default: 13197 // No other scales are supported. 13198 return false; 13199 } 13200 13201 return true; 13202 } 13203 13204 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 13205 SelectionDAG &DAG) const { 13206 MachineFunction &MF = DAG.getMachineFunction(); 13207 MachineFrameInfo &MFI = MF.getFrameInfo(); 13208 MFI.setReturnAddressIsTaken(true); 13209 13210 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 13211 return SDValue(); 13212 13213 SDLoc dl(Op); 13214 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13215 13216 // Make sure the function does not optimize away the store of the RA to 13217 // the stack. 13218 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 13219 FuncInfo->setLRStoreRequired(); 13220 bool isPPC64 = Subtarget.isPPC64(); 13221 auto PtrVT = getPointerTy(MF.getDataLayout()); 13222 13223 if (Depth > 0) { 13224 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 13225 SDValue Offset = 13226 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 13227 isPPC64 ? MVT::i64 : MVT::i32); 13228 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 13229 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 13230 MachinePointerInfo()); 13231 } 13232 13233 // Just load the return address off the stack. 13234 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 13235 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 13236 MachinePointerInfo()); 13237 } 13238 13239 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 13240 SelectionDAG &DAG) const { 13241 SDLoc dl(Op); 13242 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13243 13244 MachineFunction &MF = DAG.getMachineFunction(); 13245 MachineFrameInfo &MFI = MF.getFrameInfo(); 13246 MFI.setFrameAddressIsTaken(true); 13247 13248 EVT PtrVT = getPointerTy(MF.getDataLayout()); 13249 bool isPPC64 = PtrVT == MVT::i64; 13250 13251 // Naked functions never have a frame pointer, and so we use r1. For all 13252 // other functions, this decision must be delayed until during PEI. 13253 unsigned FrameReg; 13254 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 13255 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 13256 else 13257 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 13258 13259 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 13260 PtrVT); 13261 while (Depth--) 13262 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 13263 FrameAddr, MachinePointerInfo()); 13264 return FrameAddr; 13265 } 13266 13267 // FIXME? Maybe this could be a TableGen attribute on some registers and 13268 // this table could be generated automatically from RegInfo. 13269 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 13270 SelectionDAG &DAG) const { 13271 bool isPPC64 = Subtarget.isPPC64(); 13272 bool isDarwinABI = Subtarget.isDarwinABI(); 13273 13274 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 13275 (!isPPC64 && VT != MVT::i32)) 13276 report_fatal_error("Invalid register global variable type"); 13277 13278 bool is64Bit = isPPC64 && VT == MVT::i64; 13279 unsigned Reg = StringSwitch<unsigned>(RegName) 13280 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 13281 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 13282 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 13283 (is64Bit ? PPC::X13 : PPC::R13)) 13284 .Default(0); 13285 13286 if (Reg) 13287 return Reg; 13288 report_fatal_error("Invalid register name global variable"); 13289 } 13290 13291 bool 13292 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 13293 // The PowerPC target isn't yet aware of offsets. 13294 return false; 13295 } 13296 13297 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 13298 const CallInst &I, 13299 unsigned Intrinsic) const { 13300 switch (Intrinsic) { 13301 case Intrinsic::ppc_qpx_qvlfd: 13302 case Intrinsic::ppc_qpx_qvlfs: 13303 case Intrinsic::ppc_qpx_qvlfcd: 13304 case Intrinsic::ppc_qpx_qvlfcs: 13305 case Intrinsic::ppc_qpx_qvlfiwa: 13306 case Intrinsic::ppc_qpx_qvlfiwz: 13307 case Intrinsic::ppc_altivec_lvx: 13308 case Intrinsic::ppc_altivec_lvxl: 13309 case Intrinsic::ppc_altivec_lvebx: 13310 case Intrinsic::ppc_altivec_lvehx: 13311 case Intrinsic::ppc_altivec_lvewx: 13312 case Intrinsic::ppc_vsx_lxvd2x: 13313 case Intrinsic::ppc_vsx_lxvw4x: { 13314 EVT VT; 13315 switch (Intrinsic) { 13316 case Intrinsic::ppc_altivec_lvebx: 13317 VT = MVT::i8; 13318 break; 13319 case Intrinsic::ppc_altivec_lvehx: 13320 VT = MVT::i16; 13321 break; 13322 case Intrinsic::ppc_altivec_lvewx: 13323 VT = MVT::i32; 13324 break; 13325 case Intrinsic::ppc_vsx_lxvd2x: 13326 VT = MVT::v2f64; 13327 break; 13328 case Intrinsic::ppc_qpx_qvlfd: 13329 VT = MVT::v4f64; 13330 break; 13331 case Intrinsic::ppc_qpx_qvlfs: 13332 VT = MVT::v4f32; 13333 break; 13334 case Intrinsic::ppc_qpx_qvlfcd: 13335 VT = MVT::v2f64; 13336 break; 13337 case Intrinsic::ppc_qpx_qvlfcs: 13338 VT = MVT::v2f32; 13339 break; 13340 default: 13341 VT = MVT::v4i32; 13342 break; 13343 } 13344 13345 Info.opc = ISD::INTRINSIC_W_CHAIN; 13346 Info.memVT = VT; 13347 Info.ptrVal = I.getArgOperand(0); 13348 Info.offset = -VT.getStoreSize()+1; 13349 Info.size = 2*VT.getStoreSize()-1; 13350 Info.align = 1; 13351 Info.vol = false; 13352 Info.readMem = true; 13353 Info.writeMem = false; 13354 return true; 13355 } 13356 case Intrinsic::ppc_qpx_qvlfda: 13357 case Intrinsic::ppc_qpx_qvlfsa: 13358 case Intrinsic::ppc_qpx_qvlfcda: 13359 case Intrinsic::ppc_qpx_qvlfcsa: 13360 case Intrinsic::ppc_qpx_qvlfiwaa: 13361 case Intrinsic::ppc_qpx_qvlfiwza: { 13362 EVT VT; 13363 switch (Intrinsic) { 13364 case Intrinsic::ppc_qpx_qvlfda: 13365 VT = MVT::v4f64; 13366 break; 13367 case Intrinsic::ppc_qpx_qvlfsa: 13368 VT = MVT::v4f32; 13369 break; 13370 case Intrinsic::ppc_qpx_qvlfcda: 13371 VT = MVT::v2f64; 13372 break; 13373 case Intrinsic::ppc_qpx_qvlfcsa: 13374 VT = MVT::v2f32; 13375 break; 13376 default: 13377 VT = MVT::v4i32; 13378 break; 13379 } 13380 13381 Info.opc = ISD::INTRINSIC_W_CHAIN; 13382 Info.memVT = VT; 13383 Info.ptrVal = I.getArgOperand(0); 13384 Info.offset = 0; 13385 Info.size = VT.getStoreSize(); 13386 Info.align = 1; 13387 Info.vol = false; 13388 Info.readMem = true; 13389 Info.writeMem = false; 13390 return true; 13391 } 13392 case Intrinsic::ppc_qpx_qvstfd: 13393 case Intrinsic::ppc_qpx_qvstfs: 13394 case Intrinsic::ppc_qpx_qvstfcd: 13395 case Intrinsic::ppc_qpx_qvstfcs: 13396 case Intrinsic::ppc_qpx_qvstfiw: 13397 case Intrinsic::ppc_altivec_stvx: 13398 case Intrinsic::ppc_altivec_stvxl: 13399 case Intrinsic::ppc_altivec_stvebx: 13400 case Intrinsic::ppc_altivec_stvehx: 13401 case Intrinsic::ppc_altivec_stvewx: 13402 case Intrinsic::ppc_vsx_stxvd2x: 13403 case Intrinsic::ppc_vsx_stxvw4x: { 13404 EVT VT; 13405 switch (Intrinsic) { 13406 case Intrinsic::ppc_altivec_stvebx: 13407 VT = MVT::i8; 13408 break; 13409 case Intrinsic::ppc_altivec_stvehx: 13410 VT = MVT::i16; 13411 break; 13412 case Intrinsic::ppc_altivec_stvewx: 13413 VT = MVT::i32; 13414 break; 13415 case Intrinsic::ppc_vsx_stxvd2x: 13416 VT = MVT::v2f64; 13417 break; 13418 case Intrinsic::ppc_qpx_qvstfd: 13419 VT = MVT::v4f64; 13420 break; 13421 case Intrinsic::ppc_qpx_qvstfs: 13422 VT = MVT::v4f32; 13423 break; 13424 case Intrinsic::ppc_qpx_qvstfcd: 13425 VT = MVT::v2f64; 13426 break; 13427 case Intrinsic::ppc_qpx_qvstfcs: 13428 VT = MVT::v2f32; 13429 break; 13430 default: 13431 VT = MVT::v4i32; 13432 break; 13433 } 13434 13435 Info.opc = ISD::INTRINSIC_VOID; 13436 Info.memVT = VT; 13437 Info.ptrVal = I.getArgOperand(1); 13438 Info.offset = -VT.getStoreSize()+1; 13439 Info.size = 2*VT.getStoreSize()-1; 13440 Info.align = 1; 13441 Info.vol = false; 13442 Info.readMem = false; 13443 Info.writeMem = true; 13444 return true; 13445 } 13446 case Intrinsic::ppc_qpx_qvstfda: 13447 case Intrinsic::ppc_qpx_qvstfsa: 13448 case Intrinsic::ppc_qpx_qvstfcda: 13449 case Intrinsic::ppc_qpx_qvstfcsa: 13450 case Intrinsic::ppc_qpx_qvstfiwa: { 13451 EVT VT; 13452 switch (Intrinsic) { 13453 case Intrinsic::ppc_qpx_qvstfda: 13454 VT = MVT::v4f64; 13455 break; 13456 case Intrinsic::ppc_qpx_qvstfsa: 13457 VT = MVT::v4f32; 13458 break; 13459 case Intrinsic::ppc_qpx_qvstfcda: 13460 VT = MVT::v2f64; 13461 break; 13462 case Intrinsic::ppc_qpx_qvstfcsa: 13463 VT = MVT::v2f32; 13464 break; 13465 default: 13466 VT = MVT::v4i32; 13467 break; 13468 } 13469 13470 Info.opc = ISD::INTRINSIC_VOID; 13471 Info.memVT = VT; 13472 Info.ptrVal = I.getArgOperand(1); 13473 Info.offset = 0; 13474 Info.size = VT.getStoreSize(); 13475 Info.align = 1; 13476 Info.vol = false; 13477 Info.readMem = false; 13478 Info.writeMem = true; 13479 return true; 13480 } 13481 default: 13482 break; 13483 } 13484 13485 return false; 13486 } 13487 13488 /// getOptimalMemOpType - Returns the target specific optimal type for load 13489 /// and store operations as a result of memset, memcpy, and memmove 13490 /// lowering. If DstAlign is zero that means it's safe to destination 13491 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 13492 /// means there isn't a need to check it against alignment requirement, 13493 /// probably because the source does not need to be loaded. If 'IsMemset' is 13494 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 13495 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 13496 /// source is constant so it does not need to be loaded. 13497 /// It returns EVT::Other if the type should be determined using generic 13498 /// target-independent logic. 13499 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 13500 unsigned DstAlign, unsigned SrcAlign, 13501 bool IsMemset, bool ZeroMemset, 13502 bool MemcpyStrSrc, 13503 MachineFunction &MF) const { 13504 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 13505 const Function *F = MF.getFunction(); 13506 // When expanding a memset, require at least two QPX instructions to cover 13507 // the cost of loading the value to be stored from the constant pool. 13508 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 13509 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 13510 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 13511 return MVT::v4f64; 13512 } 13513 13514 // We should use Altivec/VSX loads and stores when available. For unaligned 13515 // addresses, unaligned VSX loads are only fast starting with the P8. 13516 if (Subtarget.hasAltivec() && Size >= 16 && 13517 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 13518 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 13519 return MVT::v4i32; 13520 } 13521 13522 if (Subtarget.isPPC64()) { 13523 return MVT::i64; 13524 } 13525 13526 return MVT::i32; 13527 } 13528 13529 /// \brief Returns true if it is beneficial to convert a load of a constant 13530 /// to just the constant itself. 13531 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 13532 Type *Ty) const { 13533 assert(Ty->isIntegerTy()); 13534 13535 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 13536 return !(BitSize == 0 || BitSize > 64); 13537 } 13538 13539 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 13540 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 13541 return false; 13542 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 13543 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 13544 return NumBits1 == 64 && NumBits2 == 32; 13545 } 13546 13547 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 13548 if (!VT1.isInteger() || !VT2.isInteger()) 13549 return false; 13550 unsigned NumBits1 = VT1.getSizeInBits(); 13551 unsigned NumBits2 = VT2.getSizeInBits(); 13552 return NumBits1 == 64 && NumBits2 == 32; 13553 } 13554 13555 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 13556 // Generally speaking, zexts are not free, but they are free when they can be 13557 // folded with other operations. 13558 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 13559 EVT MemVT = LD->getMemoryVT(); 13560 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 13561 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 13562 (LD->getExtensionType() == ISD::NON_EXTLOAD || 13563 LD->getExtensionType() == ISD::ZEXTLOAD)) 13564 return true; 13565 } 13566 13567 // FIXME: Add other cases... 13568 // - 32-bit shifts with a zext to i64 13569 // - zext after ctlz, bswap, etc. 13570 // - zext after and by a constant mask 13571 13572 return TargetLowering::isZExtFree(Val, VT2); 13573 } 13574 13575 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 13576 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 13577 "invalid fpext types"); 13578 return true; 13579 } 13580 13581 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 13582 return isInt<16>(Imm) || isUInt<16>(Imm); 13583 } 13584 13585 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 13586 return isInt<16>(Imm) || isUInt<16>(Imm); 13587 } 13588 13589 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 13590 unsigned, 13591 unsigned, 13592 bool *Fast) const { 13593 if (DisablePPCUnaligned) 13594 return false; 13595 13596 // PowerPC supports unaligned memory access for simple non-vector types. 13597 // Although accessing unaligned addresses is not as efficient as accessing 13598 // aligned addresses, it is generally more efficient than manual expansion, 13599 // and generally only traps for software emulation when crossing page 13600 // boundaries. 13601 13602 if (!VT.isSimple()) 13603 return false; 13604 13605 if (VT.getSimpleVT().isVector()) { 13606 if (Subtarget.hasVSX()) { 13607 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 13608 VT != MVT::v4f32 && VT != MVT::v4i32) 13609 return false; 13610 } else { 13611 return false; 13612 } 13613 } 13614 13615 if (VT == MVT::ppcf128) 13616 return false; 13617 13618 if (Fast) 13619 *Fast = true; 13620 13621 return true; 13622 } 13623 13624 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 13625 VT = VT.getScalarType(); 13626 13627 if (!VT.isSimple()) 13628 return false; 13629 13630 switch (VT.getSimpleVT().SimpleTy) { 13631 case MVT::f32: 13632 case MVT::f64: 13633 return true; 13634 default: 13635 break; 13636 } 13637 13638 return false; 13639 } 13640 13641 const MCPhysReg * 13642 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 13643 // LR is a callee-save register, but we must treat it as clobbered by any call 13644 // site. Hence we include LR in the scratch registers, which are in turn added 13645 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 13646 // to CTR, which is used by any indirect call. 13647 static const MCPhysReg ScratchRegs[] = { 13648 PPC::X12, PPC::LR8, PPC::CTR8, 0 13649 }; 13650 13651 return ScratchRegs; 13652 } 13653 13654 unsigned PPCTargetLowering::getExceptionPointerRegister( 13655 const Constant *PersonalityFn) const { 13656 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 13657 } 13658 13659 unsigned PPCTargetLowering::getExceptionSelectorRegister( 13660 const Constant *PersonalityFn) const { 13661 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 13662 } 13663 13664 bool 13665 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 13666 EVT VT , unsigned DefinedValues) const { 13667 if (VT == MVT::v2i64) 13668 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 13669 13670 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 13671 return true; 13672 13673 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 13674 } 13675 13676 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 13677 if (DisableILPPref || Subtarget.enableMachineScheduler()) 13678 return TargetLowering::getSchedulingPreference(N); 13679 13680 return Sched::ILP; 13681 } 13682 13683 // Create a fast isel object. 13684 FastISel * 13685 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 13686 const TargetLibraryInfo *LibInfo) const { 13687 return PPC::createFastISel(FuncInfo, LibInfo); 13688 } 13689 13690 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 13691 if (Subtarget.isDarwinABI()) return; 13692 if (!Subtarget.isPPC64()) return; 13693 13694 // Update IsSplitCSR in PPCFunctionInfo 13695 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 13696 PFI->setIsSplitCSR(true); 13697 } 13698 13699 void PPCTargetLowering::insertCopiesSplitCSR( 13700 MachineBasicBlock *Entry, 13701 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 13702 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 13703 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 13704 if (!IStart) 13705 return; 13706 13707 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 13708 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 13709 MachineBasicBlock::iterator MBBI = Entry->begin(); 13710 for (const MCPhysReg *I = IStart; *I; ++I) { 13711 const TargetRegisterClass *RC = nullptr; 13712 if (PPC::G8RCRegClass.contains(*I)) 13713 RC = &PPC::G8RCRegClass; 13714 else if (PPC::F8RCRegClass.contains(*I)) 13715 RC = &PPC::F8RCRegClass; 13716 else if (PPC::CRRCRegClass.contains(*I)) 13717 RC = &PPC::CRRCRegClass; 13718 else if (PPC::VRRCRegClass.contains(*I)) 13719 RC = &PPC::VRRCRegClass; 13720 else 13721 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 13722 13723 unsigned NewVR = MRI->createVirtualRegister(RC); 13724 // Create copy from CSR to a virtual register. 13725 // FIXME: this currently does not emit CFI pseudo-instructions, it works 13726 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 13727 // nounwind. If we want to generalize this later, we may need to emit 13728 // CFI pseudo-instructions. 13729 assert(Entry->getParent()->getFunction()->hasFnAttribute( 13730 Attribute::NoUnwind) && 13731 "Function should be nounwind in insertCopiesSplitCSR!"); 13732 Entry->addLiveIn(*I); 13733 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 13734 .addReg(*I); 13735 13736 // Insert the copy-back instructions right before the terminator 13737 for (auto *Exit : Exits) 13738 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 13739 TII->get(TargetOpcode::COPY), *I) 13740 .addReg(NewVR); 13741 } 13742 } 13743 13744 // Override to enable LOAD_STACK_GUARD lowering on Linux. 13745 bool PPCTargetLowering::useLoadStackGuardNode() const { 13746 if (!Subtarget.isTargetLinux()) 13747 return TargetLowering::useLoadStackGuardNode(); 13748 return true; 13749 } 13750 13751 // Override to disable global variable loading on Linux. 13752 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 13753 if (!Subtarget.isTargetLinux()) 13754 return TargetLowering::insertSSPDeclarations(M); 13755 } 13756 13757 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 13758 if (!VT.isSimple() || !Subtarget.hasVSX()) 13759 return false; 13760 13761 switch(VT.getSimpleVT().SimpleTy) { 13762 default: 13763 // For FP types that are currently not supported by PPC backend, return 13764 // false. Examples: f16, f80. 13765 return false; 13766 case MVT::f32: 13767 case MVT::f64: 13768 case MVT::ppcf128: 13769 return Imm.isPosZero(); 13770 } 13771 } 13772 13773 // For vector shift operation op, fold 13774 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 13775 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 13776 SelectionDAG &DAG) { 13777 SDValue N0 = N->getOperand(0); 13778 SDValue N1 = N->getOperand(1); 13779 EVT VT = N0.getValueType(); 13780 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 13781 unsigned Opcode = N->getOpcode(); 13782 unsigned TargetOpcode; 13783 13784 switch (Opcode) { 13785 default: 13786 llvm_unreachable("Unexpected shift operation"); 13787 case ISD::SHL: 13788 TargetOpcode = PPCISD::SHL; 13789 break; 13790 case ISD::SRL: 13791 TargetOpcode = PPCISD::SRL; 13792 break; 13793 case ISD::SRA: 13794 TargetOpcode = PPCISD::SRA; 13795 break; 13796 } 13797 13798 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 13799 N1->getOpcode() == ISD::AND) 13800 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 13801 if (Mask->getZExtValue() == OpSizeInBits - 1) 13802 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 13803 13804 return SDValue(); 13805 } 13806 13807 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 13808 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13809 return Value; 13810 13811 return SDValue(); 13812 } 13813 13814 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 13815 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13816 return Value; 13817 13818 return SDValue(); 13819 } 13820 13821 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 13822 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13823 return Value; 13824 13825 return SDValue(); 13826 } 13827 13828 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 13829 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 13830 if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64()) 13831 return false; 13832 13833 // If not a tail call then no need to proceed. 13834 if (!CI->isTailCall()) 13835 return false; 13836 13837 // If tail calls are disabled for the caller then we are done. 13838 const Function *Caller = CI->getParent()->getParent(); 13839 auto Attr = Caller->getFnAttribute("disable-tail-calls"); 13840 if (Attr.getValueAsString() == "true") 13841 return false; 13842 13843 // If sibling calls have been disabled and tail-calls aren't guaranteed 13844 // there is no reason to duplicate. 13845 auto &TM = getTargetMachine(); 13846 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 13847 return false; 13848 13849 // Can't tail call a function called indirectly, or if it has variadic args. 13850 const Function *Callee = CI->getCalledFunction(); 13851 if (!Callee || Callee->isVarArg()) 13852 return false; 13853 13854 // Make sure the callee and caller calling conventions are eligible for tco. 13855 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 13856 CI->getCallingConv())) 13857 return false; 13858 13859 // If the function is local then we have a good chance at tail-calling it 13860 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 13861 } 13862