1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/PPCPredicates.h" 15 #include "PPC.h" 16 #include "PPCCallingConv.h" 17 #include "PPCCCState.h" 18 #include "PPCFrameLowering.h" 19 #include "PPCInstrInfo.h" 20 #include "PPCISelLowering.h" 21 #include "PPCMachineFunctionInfo.h" 22 #include "PPCPerfectShuffle.h" 23 #include "PPCRegisterInfo.h" 24 #include "PPCSubtarget.h" 25 #include "PPCTargetMachine.h" 26 #include "llvm/ADT/APFloat.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/STLExtras.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/ISDOpcodes.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFrameInfo.h" 42 #include "llvm/CodeGen/MachineFunction.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineJumpTableInfo.h" 46 #include "llvm/CodeGen/MachineLoopInfo.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/MachineValueType.h" 51 #include "llvm/CodeGen/RuntimeLibcalls.h" 52 #include "llvm/CodeGen/SelectionDAG.h" 53 #include "llvm/CodeGen/SelectionDAGNodes.h" 54 #include "llvm/CodeGen/ValueTypes.h" 55 #include "llvm/IR/CallingConv.h" 56 #include "llvm/IR/CallSite.h" 57 #include "llvm/IR/Constant.h" 58 #include "llvm/IR/Constants.h" 59 #include "llvm/IR/DataLayout.h" 60 #include "llvm/IR/DebugLoc.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/Function.h" 63 #include "llvm/IR/GlobalValue.h" 64 #include "llvm/IR/Instructions.h" 65 #include "llvm/IR/Intrinsics.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Module.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/MC/MCExpr.h" 72 #include "llvm/MC/MCRegisterInfo.h" 73 #include "llvm/Support/AtomicOrdering.h" 74 #include "llvm/Support/BranchProbability.h" 75 #include "llvm/Support/Casting.h" 76 #include "llvm/Support/CodeGen.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/Format.h" 82 #include "llvm/Support/KnownBits.h" 83 #include "llvm/Support/MathExtras.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/Target/TargetInstrInfo.h" 86 #include "llvm/Target/TargetLowering.h" 87 #include "llvm/Target/TargetMachine.h" 88 #include "llvm/Target/TargetOptions.h" 89 #include "llvm/Target/TargetRegisterInfo.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 100 #define DEBUG_TYPE "ppc-lowering" 101 102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 104 105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 107 108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 110 111 static cl::opt<bool> DisableSCO("disable-ppc-sco", 112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 113 114 STATISTIC(NumTailCalls, "Number of tail calls"); 115 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 116 117 // FIXME: Remove this once the bug has been fixed! 118 extern cl::opt<bool> ANDIGlueBug; 119 120 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 121 const PPCSubtarget &STI) 122 : TargetLowering(TM), Subtarget(STI) { 123 // Use _setjmp/_longjmp instead of setjmp/longjmp. 124 setUseUnderscoreSetJmp(true); 125 setUseUnderscoreLongJmp(true); 126 127 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 128 // arguments are at least 4/8 bytes aligned. 129 bool isPPC64 = Subtarget.isPPC64(); 130 setMinStackArgumentAlignment(isPPC64 ? 8:4); 131 132 // Set up the register classes. 133 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 134 if (!useSoftFloat()) { 135 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 136 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 137 } 138 139 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 140 for (MVT VT : MVT::integer_valuetypes()) { 141 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 142 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 143 } 144 145 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 146 147 // PowerPC has pre-inc load and store's. 148 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 149 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 150 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 151 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 152 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 153 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 154 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 155 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 156 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 157 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 158 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 159 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 160 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 161 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 162 163 if (Subtarget.useCRBits()) { 164 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 165 166 if (isPPC64 || Subtarget.hasFPCVT()) { 167 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 168 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 169 isPPC64 ? MVT::i64 : MVT::i32); 170 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 171 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 172 isPPC64 ? MVT::i64 : MVT::i32); 173 } else { 174 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 175 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 176 } 177 178 // PowerPC does not support direct load / store of condition registers 179 setOperationAction(ISD::LOAD, MVT::i1, Custom); 180 setOperationAction(ISD::STORE, MVT::i1, Custom); 181 182 // FIXME: Remove this once the ANDI glue bug is fixed: 183 if (ANDIGlueBug) 184 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 185 186 for (MVT VT : MVT::integer_valuetypes()) { 187 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 188 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 189 setTruncStoreAction(VT, MVT::i1, Expand); 190 } 191 192 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 193 } 194 195 // This is used in the ppcf128->int sequence. Note it has different semantics 196 // from FP_ROUND: that rounds to nearest, this rounds to zero. 197 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 198 199 // We do not currently implement these libm ops for PowerPC. 200 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 201 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 202 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 203 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 204 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 205 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 206 207 // PowerPC has no SREM/UREM instructions 208 setOperationAction(ISD::SREM, MVT::i32, Expand); 209 setOperationAction(ISD::UREM, MVT::i32, Expand); 210 setOperationAction(ISD::SREM, MVT::i64, Expand); 211 setOperationAction(ISD::UREM, MVT::i64, Expand); 212 213 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 214 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 215 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 216 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 217 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 218 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 219 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 220 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 221 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 222 223 // We don't support sin/cos/sqrt/fmod/pow 224 setOperationAction(ISD::FSIN , MVT::f64, Expand); 225 setOperationAction(ISD::FCOS , MVT::f64, Expand); 226 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 227 setOperationAction(ISD::FREM , MVT::f64, Expand); 228 setOperationAction(ISD::FPOW , MVT::f64, Expand); 229 setOperationAction(ISD::FMA , MVT::f64, Legal); 230 setOperationAction(ISD::FSIN , MVT::f32, Expand); 231 setOperationAction(ISD::FCOS , MVT::f32, Expand); 232 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 233 setOperationAction(ISD::FREM , MVT::f32, Expand); 234 setOperationAction(ISD::FPOW , MVT::f32, Expand); 235 setOperationAction(ISD::FMA , MVT::f32, Legal); 236 237 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 238 239 // If we're enabling GP optimizations, use hardware square root 240 if (!Subtarget.hasFSQRT() && 241 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 242 Subtarget.hasFRE())) 243 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 244 245 if (!Subtarget.hasFSQRT() && 246 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 247 Subtarget.hasFRES())) 248 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 249 250 if (Subtarget.hasFCPSGN()) { 251 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 252 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 253 } else { 254 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 255 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 256 } 257 258 if (Subtarget.hasFPRND()) { 259 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 260 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 261 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 262 setOperationAction(ISD::FROUND, MVT::f64, Legal); 263 264 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 265 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 266 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 267 setOperationAction(ISD::FROUND, MVT::f32, Legal); 268 } 269 270 // PowerPC does not have BSWAP 271 // CTPOP or CTTZ were introduced in P8/P9 respectivelly 272 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 273 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 274 if (Subtarget.isISA3_0()) { 275 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 276 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 277 } else { 278 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 279 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 280 } 281 282 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 283 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 284 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 285 } else { 286 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 287 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 288 } 289 290 // PowerPC does not have ROTR 291 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 292 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 293 294 if (!Subtarget.useCRBits()) { 295 // PowerPC does not have Select 296 setOperationAction(ISD::SELECT, MVT::i32, Expand); 297 setOperationAction(ISD::SELECT, MVT::i64, Expand); 298 setOperationAction(ISD::SELECT, MVT::f32, Expand); 299 setOperationAction(ISD::SELECT, MVT::f64, Expand); 300 } 301 302 // PowerPC wants to turn select_cc of FP into fsel when possible. 303 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 304 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 305 306 // PowerPC wants to optimize integer setcc a bit 307 if (!Subtarget.useCRBits()) 308 setOperationAction(ISD::SETCC, MVT::i32, Custom); 309 310 // PowerPC does not have BRCOND which requires SetCC 311 if (!Subtarget.useCRBits()) 312 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 313 314 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 315 316 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 317 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 318 319 // PowerPC does not have [U|S]INT_TO_FP 320 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 321 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 322 323 if (Subtarget.hasDirectMove() && isPPC64) { 324 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 325 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 326 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 327 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 328 } else { 329 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 330 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 331 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 332 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 333 } 334 335 // We cannot sextinreg(i1). Expand to shifts. 336 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 337 338 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 339 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 340 // support continuation, user-level threading, and etc.. As a result, no 341 // other SjLj exception interfaces are implemented and please don't build 342 // your own exception handling based on them. 343 // LLVM/Clang supports zero-cost DWARF exception handling. 344 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 345 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 346 347 // We want to legalize GlobalAddress and ConstantPool nodes into the 348 // appropriate instructions to materialize the address. 349 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 350 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 351 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 352 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 353 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 354 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 355 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 356 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 357 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 358 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 359 360 // TRAP is legal. 361 setOperationAction(ISD::TRAP, MVT::Other, Legal); 362 363 // TRAMPOLINE is custom lowered. 364 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 365 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 366 367 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 368 setOperationAction(ISD::VASTART , MVT::Other, Custom); 369 370 if (Subtarget.isSVR4ABI()) { 371 if (isPPC64) { 372 // VAARG always uses double-word chunks, so promote anything smaller. 373 setOperationAction(ISD::VAARG, MVT::i1, Promote); 374 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 375 setOperationAction(ISD::VAARG, MVT::i8, Promote); 376 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 377 setOperationAction(ISD::VAARG, MVT::i16, Promote); 378 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 379 setOperationAction(ISD::VAARG, MVT::i32, Promote); 380 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 381 setOperationAction(ISD::VAARG, MVT::Other, Expand); 382 } else { 383 // VAARG is custom lowered with the 32-bit SVR4 ABI. 384 setOperationAction(ISD::VAARG, MVT::Other, Custom); 385 setOperationAction(ISD::VAARG, MVT::i64, Custom); 386 } 387 } else 388 setOperationAction(ISD::VAARG, MVT::Other, Expand); 389 390 if (Subtarget.isSVR4ABI() && !isPPC64) 391 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 392 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 393 else 394 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 395 396 // Use the default implementation. 397 setOperationAction(ISD::VAEND , MVT::Other, Expand); 398 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 399 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 400 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 401 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 402 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 403 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 404 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 405 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 406 407 // We want to custom lower some of our intrinsics. 408 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 409 410 // To handle counter-based loop conditions. 411 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 412 413 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 414 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 415 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 416 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 417 418 // Comparisons that require checking two conditions. 419 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 420 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 421 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 422 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 423 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 424 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 425 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 426 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 427 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 428 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 429 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 430 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 431 432 if (Subtarget.has64BitSupport()) { 433 // They also have instructions for converting between i64 and fp. 434 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 435 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 436 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 437 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 438 // This is just the low 32 bits of a (signed) fp->i64 conversion. 439 // We cannot do this with Promote because i64 is not a legal type. 440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 441 442 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 443 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 444 } else { 445 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 446 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 447 } 448 449 // With the instructions enabled under FPCVT, we can do everything. 450 if (Subtarget.hasFPCVT()) { 451 if (Subtarget.has64BitSupport()) { 452 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 453 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 454 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 455 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 456 } 457 458 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 459 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 460 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 461 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 462 } 463 464 if (Subtarget.use64BitRegs()) { 465 // 64-bit PowerPC implementations can support i64 types directly 466 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 467 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 468 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 469 // 64-bit PowerPC wants to expand i128 shifts itself. 470 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 471 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 472 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 473 } else { 474 // 32-bit PowerPC wants to expand i64 shifts itself. 475 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 476 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 477 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 478 } 479 480 if (Subtarget.hasAltivec()) { 481 // First set operation action for all vector types to expand. Then we 482 // will selectively turn on ones that can be effectively codegen'd. 483 for (MVT VT : MVT::vector_valuetypes()) { 484 // add/sub are legal for all supported vector VT's. 485 setOperationAction(ISD::ADD, VT, Legal); 486 setOperationAction(ISD::SUB, VT, Legal); 487 488 // Vector instructions introduced in P8 489 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 490 setOperationAction(ISD::CTPOP, VT, Legal); 491 setOperationAction(ISD::CTLZ, VT, Legal); 492 } 493 else { 494 setOperationAction(ISD::CTPOP, VT, Expand); 495 setOperationAction(ISD::CTLZ, VT, Expand); 496 } 497 498 // Vector instructions introduced in P9 499 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 500 setOperationAction(ISD::CTTZ, VT, Legal); 501 else 502 setOperationAction(ISD::CTTZ, VT, Expand); 503 504 // We promote all shuffles to v16i8. 505 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 506 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 507 508 // We promote all non-typed operations to v4i32. 509 setOperationAction(ISD::AND , VT, Promote); 510 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 511 setOperationAction(ISD::OR , VT, Promote); 512 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 513 setOperationAction(ISD::XOR , VT, Promote); 514 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 515 setOperationAction(ISD::LOAD , VT, Promote); 516 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 517 setOperationAction(ISD::SELECT, VT, Promote); 518 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 519 setOperationAction(ISD::SELECT_CC, VT, Promote); 520 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 521 setOperationAction(ISD::STORE, VT, Promote); 522 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 523 524 // No other operations are legal. 525 setOperationAction(ISD::MUL , VT, Expand); 526 setOperationAction(ISD::SDIV, VT, Expand); 527 setOperationAction(ISD::SREM, VT, Expand); 528 setOperationAction(ISD::UDIV, VT, Expand); 529 setOperationAction(ISD::UREM, VT, Expand); 530 setOperationAction(ISD::FDIV, VT, Expand); 531 setOperationAction(ISD::FREM, VT, Expand); 532 setOperationAction(ISD::FNEG, VT, Expand); 533 setOperationAction(ISD::FSQRT, VT, Expand); 534 setOperationAction(ISD::FLOG, VT, Expand); 535 setOperationAction(ISD::FLOG10, VT, Expand); 536 setOperationAction(ISD::FLOG2, VT, Expand); 537 setOperationAction(ISD::FEXP, VT, Expand); 538 setOperationAction(ISD::FEXP2, VT, Expand); 539 setOperationAction(ISD::FSIN, VT, Expand); 540 setOperationAction(ISD::FCOS, VT, Expand); 541 setOperationAction(ISD::FABS, VT, Expand); 542 setOperationAction(ISD::FPOWI, VT, Expand); 543 setOperationAction(ISD::FFLOOR, VT, Expand); 544 setOperationAction(ISD::FCEIL, VT, Expand); 545 setOperationAction(ISD::FTRUNC, VT, Expand); 546 setOperationAction(ISD::FRINT, VT, Expand); 547 setOperationAction(ISD::FNEARBYINT, VT, Expand); 548 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 549 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 550 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 551 setOperationAction(ISD::MULHU, VT, Expand); 552 setOperationAction(ISD::MULHS, VT, Expand); 553 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 554 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 555 setOperationAction(ISD::UDIVREM, VT, Expand); 556 setOperationAction(ISD::SDIVREM, VT, Expand); 557 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 558 setOperationAction(ISD::FPOW, VT, Expand); 559 setOperationAction(ISD::BSWAP, VT, Expand); 560 setOperationAction(ISD::VSELECT, VT, Expand); 561 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 562 setOperationAction(ISD::ROTL, VT, Expand); 563 setOperationAction(ISD::ROTR, VT, Expand); 564 565 for (MVT InnerVT : MVT::vector_valuetypes()) { 566 setTruncStoreAction(VT, InnerVT, Expand); 567 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 568 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 569 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 570 } 571 } 572 573 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 574 // with merges, splats, etc. 575 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 576 577 setOperationAction(ISD::AND , MVT::v4i32, Legal); 578 setOperationAction(ISD::OR , MVT::v4i32, Legal); 579 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 580 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 581 setOperationAction(ISD::SELECT, MVT::v4i32, 582 Subtarget.useCRBits() ? Legal : Expand); 583 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 584 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 585 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 586 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 587 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 588 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 589 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 590 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 591 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 592 593 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 594 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 595 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 596 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 597 598 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 599 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 600 601 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 602 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 603 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 604 } 605 606 if (Subtarget.hasP8Altivec()) 607 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 608 else 609 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 610 611 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 612 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 613 614 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 615 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 616 617 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 618 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 619 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 620 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 621 622 // Altivec does not contain unordered floating-point compare instructions 623 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 624 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 625 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 626 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 627 628 if (Subtarget.hasVSX()) { 629 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 630 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 631 if (Subtarget.hasP8Vector()) { 632 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 633 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 634 } 635 if (Subtarget.hasDirectMove() && isPPC64) { 636 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 637 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 638 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 639 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 640 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 641 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 642 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 643 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 644 } 645 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 646 647 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 648 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 649 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 650 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 651 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 652 653 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 654 655 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 656 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 657 658 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 659 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 660 661 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 662 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 663 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 664 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 665 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 666 667 // Share the Altivec comparison restrictions. 668 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 669 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 670 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 671 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 672 673 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 674 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 675 676 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 677 678 if (Subtarget.hasP8Vector()) 679 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 680 681 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 682 683 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 684 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 685 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 686 687 if (Subtarget.hasP8Altivec()) { 688 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 689 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 690 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 691 692 // 128 bit shifts can be accomplished via 3 instructions for SHL and 693 // SRL, but not for SRA because of the instructions available: 694 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 695 // doing 696 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 697 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 698 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 699 700 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 701 } 702 else { 703 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 704 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 705 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 706 707 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 708 709 // VSX v2i64 only supports non-arithmetic operations. 710 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 711 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 712 } 713 714 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 715 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 716 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 717 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 718 719 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 720 721 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 722 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 723 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 724 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 725 726 // Vector operation legalization checks the result type of 727 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 728 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 729 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 730 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 731 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 732 733 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 734 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 735 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 736 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 737 738 if (Subtarget.hasDirectMove()) 739 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 740 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 741 742 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 743 } 744 745 if (Subtarget.hasP8Altivec()) { 746 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 747 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 748 } 749 750 if (Subtarget.hasP9Vector()) { 751 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 752 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 753 754 // 128 bit shifts can be accomplished via 3 instructions for SHL and 755 // SRL, but not for SRA because of the instructions available: 756 // VS{RL} and VS{RL}O. 757 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 758 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 759 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 760 } 761 } 762 763 if (Subtarget.hasQPX()) { 764 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 765 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 766 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 767 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 768 769 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 770 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 771 772 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 773 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 774 775 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 776 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 777 778 if (!Subtarget.useCRBits()) 779 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 780 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 781 782 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 783 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 784 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 785 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 786 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 787 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 788 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 789 790 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 791 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 792 793 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 794 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 795 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 796 797 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 798 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 799 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 800 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 801 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 802 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 803 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 804 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 805 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 806 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 807 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 808 809 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 810 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 811 812 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 813 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 814 815 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 816 817 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 818 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 819 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 820 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 821 822 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 823 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 824 825 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 826 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 827 828 if (!Subtarget.useCRBits()) 829 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 830 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 831 832 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 833 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 834 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 835 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 836 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 837 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 838 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 839 840 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 841 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 842 843 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 844 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 845 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 846 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 847 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 848 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 849 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 850 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 851 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 852 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 853 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 854 855 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 856 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 857 858 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 859 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 860 861 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 862 863 setOperationAction(ISD::AND , MVT::v4i1, Legal); 864 setOperationAction(ISD::OR , MVT::v4i1, Legal); 865 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 866 867 if (!Subtarget.useCRBits()) 868 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 869 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 870 871 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 872 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 873 874 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 875 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 876 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 877 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 878 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 879 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 880 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 881 882 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 883 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 884 885 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 886 887 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 888 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 889 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 890 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 891 892 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 893 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 894 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 895 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 896 897 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 898 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 899 900 // These need to set FE_INEXACT, and so cannot be vectorized here. 901 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 902 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 903 904 if (TM.Options.UnsafeFPMath) { 905 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 906 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 907 908 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 909 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 910 } else { 911 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 912 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 913 914 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 915 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 916 } 917 } 918 919 if (Subtarget.has64BitSupport()) 920 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 921 922 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 923 924 if (!isPPC64) { 925 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 926 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 927 } 928 929 setBooleanContents(ZeroOrOneBooleanContent); 930 931 if (Subtarget.hasAltivec()) { 932 // Altivec instructions set fields to all zeros or all ones. 933 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 934 } 935 936 if (!isPPC64) { 937 // These libcalls are not available in 32-bit. 938 setLibcallName(RTLIB::SHL_I128, nullptr); 939 setLibcallName(RTLIB::SRL_I128, nullptr); 940 setLibcallName(RTLIB::SRA_I128, nullptr); 941 } 942 943 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 944 945 // We have target-specific dag combine patterns for the following nodes: 946 setTargetDAGCombine(ISD::SHL); 947 setTargetDAGCombine(ISD::SRA); 948 setTargetDAGCombine(ISD::SRL); 949 setTargetDAGCombine(ISD::SINT_TO_FP); 950 setTargetDAGCombine(ISD::BUILD_VECTOR); 951 if (Subtarget.hasFPCVT()) 952 setTargetDAGCombine(ISD::UINT_TO_FP); 953 setTargetDAGCombine(ISD::LOAD); 954 setTargetDAGCombine(ISD::STORE); 955 setTargetDAGCombine(ISD::BR_CC); 956 if (Subtarget.useCRBits()) 957 setTargetDAGCombine(ISD::BRCOND); 958 setTargetDAGCombine(ISD::BSWAP); 959 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 960 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 961 setTargetDAGCombine(ISD::INTRINSIC_VOID); 962 963 setTargetDAGCombine(ISD::SIGN_EXTEND); 964 setTargetDAGCombine(ISD::ZERO_EXTEND); 965 setTargetDAGCombine(ISD::ANY_EXTEND); 966 967 if (Subtarget.useCRBits()) { 968 setTargetDAGCombine(ISD::TRUNCATE); 969 setTargetDAGCombine(ISD::SETCC); 970 setTargetDAGCombine(ISD::SELECT_CC); 971 } 972 973 // Use reciprocal estimates. 974 if (TM.Options.UnsafeFPMath) { 975 setTargetDAGCombine(ISD::FDIV); 976 setTargetDAGCombine(ISD::FSQRT); 977 } 978 979 // Darwin long double math library functions have $LDBL128 appended. 980 if (Subtarget.isDarwin()) { 981 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 982 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 983 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 984 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 985 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 986 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 987 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 988 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 989 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 990 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 991 } 992 993 // With 32 condition bits, we don't need to sink (and duplicate) compares 994 // aggressively in CodeGenPrep. 995 if (Subtarget.useCRBits()) { 996 setHasMultipleConditionRegisters(); 997 setJumpIsExpensive(); 998 } 999 1000 setMinFunctionAlignment(2); 1001 if (Subtarget.isDarwin()) 1002 setPrefFunctionAlignment(4); 1003 1004 switch (Subtarget.getDarwinDirective()) { 1005 default: break; 1006 case PPC::DIR_970: 1007 case PPC::DIR_A2: 1008 case PPC::DIR_E500mc: 1009 case PPC::DIR_E5500: 1010 case PPC::DIR_PWR4: 1011 case PPC::DIR_PWR5: 1012 case PPC::DIR_PWR5X: 1013 case PPC::DIR_PWR6: 1014 case PPC::DIR_PWR6X: 1015 case PPC::DIR_PWR7: 1016 case PPC::DIR_PWR8: 1017 case PPC::DIR_PWR9: 1018 setPrefFunctionAlignment(4); 1019 setPrefLoopAlignment(4); 1020 break; 1021 } 1022 1023 if (Subtarget.enableMachineScheduler()) 1024 setSchedulingPreference(Sched::Source); 1025 else 1026 setSchedulingPreference(Sched::Hybrid); 1027 1028 computeRegisterProperties(STI.getRegisterInfo()); 1029 1030 // The Freescale cores do better with aggressive inlining of memcpy and 1031 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1032 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1033 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1034 MaxStoresPerMemset = 32; 1035 MaxStoresPerMemsetOptSize = 16; 1036 MaxStoresPerMemcpy = 32; 1037 MaxStoresPerMemcpyOptSize = 8; 1038 MaxStoresPerMemmove = 32; 1039 MaxStoresPerMemmoveOptSize = 8; 1040 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1041 // The A2 also benefits from (very) aggressive inlining of memcpy and 1042 // friends. The overhead of a the function call, even when warm, can be 1043 // over one hundred cycles. 1044 MaxStoresPerMemset = 128; 1045 MaxStoresPerMemcpy = 128; 1046 MaxStoresPerMemmove = 128; 1047 } 1048 } 1049 1050 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1051 /// the desired ByVal argument alignment. 1052 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1053 unsigned MaxMaxAlign) { 1054 if (MaxAlign == MaxMaxAlign) 1055 return; 1056 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1057 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1058 MaxAlign = 32; 1059 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1060 MaxAlign = 16; 1061 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1062 unsigned EltAlign = 0; 1063 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1064 if (EltAlign > MaxAlign) 1065 MaxAlign = EltAlign; 1066 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1067 for (auto *EltTy : STy->elements()) { 1068 unsigned EltAlign = 0; 1069 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1070 if (EltAlign > MaxAlign) 1071 MaxAlign = EltAlign; 1072 if (MaxAlign == MaxMaxAlign) 1073 break; 1074 } 1075 } 1076 } 1077 1078 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1079 /// function arguments in the caller parameter area. 1080 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1081 const DataLayout &DL) const { 1082 // Darwin passes everything on 4 byte boundary. 1083 if (Subtarget.isDarwin()) 1084 return 4; 1085 1086 // 16byte and wider vectors are passed on 16byte boundary. 1087 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1088 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1089 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1090 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1091 return Align; 1092 } 1093 1094 bool PPCTargetLowering::useSoftFloat() const { 1095 return Subtarget.useSoftFloat(); 1096 } 1097 1098 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1099 switch ((PPCISD::NodeType)Opcode) { 1100 case PPCISD::FIRST_NUMBER: break; 1101 case PPCISD::FSEL: return "PPCISD::FSEL"; 1102 case PPCISD::FCFID: return "PPCISD::FCFID"; 1103 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1104 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1105 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1106 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1107 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1108 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1109 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1110 case PPCISD::FRE: return "PPCISD::FRE"; 1111 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1112 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1113 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1114 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1115 case PPCISD::VPERM: return "PPCISD::VPERM"; 1116 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1117 case PPCISD::XXINSERT: return "PPCISD::XXINSERT"; 1118 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1119 case PPCISD::CMPB: return "PPCISD::CMPB"; 1120 case PPCISD::Hi: return "PPCISD::Hi"; 1121 case PPCISD::Lo: return "PPCISD::Lo"; 1122 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1123 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1124 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1125 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1126 case PPCISD::SRL: return "PPCISD::SRL"; 1127 case PPCISD::SRA: return "PPCISD::SRA"; 1128 case PPCISD::SHL: return "PPCISD::SHL"; 1129 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1130 case PPCISD::CALL: return "PPCISD::CALL"; 1131 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1132 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1133 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1134 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1135 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1136 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1137 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1138 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1139 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1140 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1141 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1142 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1143 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1144 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1145 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1146 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1147 case PPCISD::VCMP: return "PPCISD::VCMP"; 1148 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1149 case PPCISD::LBRX: return "PPCISD::LBRX"; 1150 case PPCISD::STBRX: return "PPCISD::STBRX"; 1151 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1152 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1153 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1154 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1155 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1156 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1157 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1158 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1159 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1160 case PPCISD::BDZ: return "PPCISD::BDZ"; 1161 case PPCISD::MFFS: return "PPCISD::MFFS"; 1162 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1163 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1164 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1165 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1166 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1167 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1168 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1169 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1170 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1171 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1172 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1173 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1174 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1175 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1176 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1177 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1178 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1179 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1180 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1181 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1182 case PPCISD::SC: return "PPCISD::SC"; 1183 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1184 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1185 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1186 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1187 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1188 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1189 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1190 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1191 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1192 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1193 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1194 } 1195 return nullptr; 1196 } 1197 1198 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1199 EVT VT) const { 1200 if (!VT.isVector()) 1201 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1202 1203 if (Subtarget.hasQPX()) 1204 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1205 1206 return VT.changeVectorElementTypeToInteger(); 1207 } 1208 1209 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1210 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1211 return true; 1212 } 1213 1214 //===----------------------------------------------------------------------===// 1215 // Node matching predicates, for use by the tblgen matching code. 1216 //===----------------------------------------------------------------------===// 1217 1218 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1219 static bool isFloatingPointZero(SDValue Op) { 1220 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1221 return CFP->getValueAPF().isZero(); 1222 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1223 // Maybe this has already been legalized into the constant pool? 1224 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1225 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1226 return CFP->getValueAPF().isZero(); 1227 } 1228 return false; 1229 } 1230 1231 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1232 /// true if Op is undef or if it matches the specified value. 1233 static bool isConstantOrUndef(int Op, int Val) { 1234 return Op < 0 || Op == Val; 1235 } 1236 1237 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1238 /// VPKUHUM instruction. 1239 /// The ShuffleKind distinguishes between big-endian operations with 1240 /// two different inputs (0), either-endian operations with two identical 1241 /// inputs (1), and little-endian operations with two different inputs (2). 1242 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1243 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1244 SelectionDAG &DAG) { 1245 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1246 if (ShuffleKind == 0) { 1247 if (IsLE) 1248 return false; 1249 for (unsigned i = 0; i != 16; ++i) 1250 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1251 return false; 1252 } else if (ShuffleKind == 2) { 1253 if (!IsLE) 1254 return false; 1255 for (unsigned i = 0; i != 16; ++i) 1256 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1257 return false; 1258 } else if (ShuffleKind == 1) { 1259 unsigned j = IsLE ? 0 : 1; 1260 for (unsigned i = 0; i != 8; ++i) 1261 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1262 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1263 return false; 1264 } 1265 return true; 1266 } 1267 1268 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1269 /// VPKUWUM instruction. 1270 /// The ShuffleKind distinguishes between big-endian operations with 1271 /// two different inputs (0), either-endian operations with two identical 1272 /// inputs (1), and little-endian operations with two different inputs (2). 1273 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1274 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1275 SelectionDAG &DAG) { 1276 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1277 if (ShuffleKind == 0) { 1278 if (IsLE) 1279 return false; 1280 for (unsigned i = 0; i != 16; i += 2) 1281 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1282 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1283 return false; 1284 } else if (ShuffleKind == 2) { 1285 if (!IsLE) 1286 return false; 1287 for (unsigned i = 0; i != 16; i += 2) 1288 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1289 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1290 return false; 1291 } else if (ShuffleKind == 1) { 1292 unsigned j = IsLE ? 0 : 2; 1293 for (unsigned i = 0; i != 8; i += 2) 1294 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1295 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1296 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1297 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1298 return false; 1299 } 1300 return true; 1301 } 1302 1303 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1304 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1305 /// current subtarget. 1306 /// 1307 /// The ShuffleKind distinguishes between big-endian operations with 1308 /// two different inputs (0), either-endian operations with two identical 1309 /// inputs (1), and little-endian operations with two different inputs (2). 1310 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1311 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1312 SelectionDAG &DAG) { 1313 const PPCSubtarget& Subtarget = 1314 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1315 if (!Subtarget.hasP8Vector()) 1316 return false; 1317 1318 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1319 if (ShuffleKind == 0) { 1320 if (IsLE) 1321 return false; 1322 for (unsigned i = 0; i != 16; i += 4) 1323 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1324 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1325 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1326 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1327 return false; 1328 } else if (ShuffleKind == 2) { 1329 if (!IsLE) 1330 return false; 1331 for (unsigned i = 0; i != 16; i += 4) 1332 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1333 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1334 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1335 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1336 return false; 1337 } else if (ShuffleKind == 1) { 1338 unsigned j = IsLE ? 0 : 4; 1339 for (unsigned i = 0; i != 8; i += 4) 1340 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1341 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1342 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1343 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1344 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1345 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1346 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1347 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1348 return false; 1349 } 1350 return true; 1351 } 1352 1353 /// isVMerge - Common function, used to match vmrg* shuffles. 1354 /// 1355 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1356 unsigned LHSStart, unsigned RHSStart) { 1357 if (N->getValueType(0) != MVT::v16i8) 1358 return false; 1359 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1360 "Unsupported merge size!"); 1361 1362 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1363 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1364 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1365 LHSStart+j+i*UnitSize) || 1366 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1367 RHSStart+j+i*UnitSize)) 1368 return false; 1369 } 1370 return true; 1371 } 1372 1373 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1374 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1375 /// The ShuffleKind distinguishes between big-endian merges with two 1376 /// different inputs (0), either-endian merges with two identical inputs (1), 1377 /// and little-endian merges with two different inputs (2). For the latter, 1378 /// the input operands are swapped (see PPCInstrAltivec.td). 1379 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1380 unsigned ShuffleKind, SelectionDAG &DAG) { 1381 if (DAG.getDataLayout().isLittleEndian()) { 1382 if (ShuffleKind == 1) // unary 1383 return isVMerge(N, UnitSize, 0, 0); 1384 else if (ShuffleKind == 2) // swapped 1385 return isVMerge(N, UnitSize, 0, 16); 1386 else 1387 return false; 1388 } else { 1389 if (ShuffleKind == 1) // unary 1390 return isVMerge(N, UnitSize, 8, 8); 1391 else if (ShuffleKind == 0) // normal 1392 return isVMerge(N, UnitSize, 8, 24); 1393 else 1394 return false; 1395 } 1396 } 1397 1398 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1399 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1400 /// The ShuffleKind distinguishes between big-endian merges with two 1401 /// different inputs (0), either-endian merges with two identical inputs (1), 1402 /// and little-endian merges with two different inputs (2). For the latter, 1403 /// the input operands are swapped (see PPCInstrAltivec.td). 1404 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1405 unsigned ShuffleKind, SelectionDAG &DAG) { 1406 if (DAG.getDataLayout().isLittleEndian()) { 1407 if (ShuffleKind == 1) // unary 1408 return isVMerge(N, UnitSize, 8, 8); 1409 else if (ShuffleKind == 2) // swapped 1410 return isVMerge(N, UnitSize, 8, 24); 1411 else 1412 return false; 1413 } else { 1414 if (ShuffleKind == 1) // unary 1415 return isVMerge(N, UnitSize, 0, 0); 1416 else if (ShuffleKind == 0) // normal 1417 return isVMerge(N, UnitSize, 0, 16); 1418 else 1419 return false; 1420 } 1421 } 1422 1423 /** 1424 * \brief Common function used to match vmrgew and vmrgow shuffles 1425 * 1426 * The indexOffset determines whether to look for even or odd words in 1427 * the shuffle mask. This is based on the of the endianness of the target 1428 * machine. 1429 * - Little Endian: 1430 * - Use offset of 0 to check for odd elements 1431 * - Use offset of 4 to check for even elements 1432 * - Big Endian: 1433 * - Use offset of 0 to check for even elements 1434 * - Use offset of 4 to check for odd elements 1435 * A detailed description of the vector element ordering for little endian and 1436 * big endian can be found at 1437 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1438 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1439 * compiler differences mean to you 1440 * 1441 * The mask to the shuffle vector instruction specifies the indices of the 1442 * elements from the two input vectors to place in the result. The elements are 1443 * numbered in array-access order, starting with the first vector. These vectors 1444 * are always of type v16i8, thus each vector will contain 16 elements of size 1445 * 8. More info on the shuffle vector can be found in the 1446 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1447 * Language Reference. 1448 * 1449 * The RHSStartValue indicates whether the same input vectors are used (unary) 1450 * or two different input vectors are used, based on the following: 1451 * - If the instruction uses the same vector for both inputs, the range of the 1452 * indices will be 0 to 15. In this case, the RHSStart value passed should 1453 * be 0. 1454 * - If the instruction has two different vectors then the range of the 1455 * indices will be 0 to 31. In this case, the RHSStart value passed should 1456 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1457 * to 31 specify elements in the second vector). 1458 * 1459 * \param[in] N The shuffle vector SD Node to analyze 1460 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1461 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1462 * vector to the shuffle_vector instruction 1463 * \return true iff this shuffle vector represents an even or odd word merge 1464 */ 1465 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1466 unsigned RHSStartValue) { 1467 if (N->getValueType(0) != MVT::v16i8) 1468 return false; 1469 1470 for (unsigned i = 0; i < 2; ++i) 1471 for (unsigned j = 0; j < 4; ++j) 1472 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1473 i*RHSStartValue+j+IndexOffset) || 1474 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1475 i*RHSStartValue+j+IndexOffset+8)) 1476 return false; 1477 return true; 1478 } 1479 1480 /** 1481 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1482 * vmrgow instructions. 1483 * 1484 * \param[in] N The shuffle vector SD Node to analyze 1485 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1486 * \param[in] ShuffleKind Identify the type of merge: 1487 * - 0 = big-endian merge with two different inputs; 1488 * - 1 = either-endian merge with two identical inputs; 1489 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1490 * little-endian merges). 1491 * \param[in] DAG The current SelectionDAG 1492 * \return true iff this shuffle mask 1493 */ 1494 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1495 unsigned ShuffleKind, SelectionDAG &DAG) { 1496 if (DAG.getDataLayout().isLittleEndian()) { 1497 unsigned indexOffset = CheckEven ? 4 : 0; 1498 if (ShuffleKind == 1) // Unary 1499 return isVMerge(N, indexOffset, 0); 1500 else if (ShuffleKind == 2) // swapped 1501 return isVMerge(N, indexOffset, 16); 1502 else 1503 return false; 1504 } 1505 else { 1506 unsigned indexOffset = CheckEven ? 0 : 4; 1507 if (ShuffleKind == 1) // Unary 1508 return isVMerge(N, indexOffset, 0); 1509 else if (ShuffleKind == 0) // Normal 1510 return isVMerge(N, indexOffset, 16); 1511 else 1512 return false; 1513 } 1514 return false; 1515 } 1516 1517 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1518 /// amount, otherwise return -1. 1519 /// The ShuffleKind distinguishes between big-endian operations with two 1520 /// different inputs (0), either-endian operations with two identical inputs 1521 /// (1), and little-endian operations with two different inputs (2). For the 1522 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1523 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1524 SelectionDAG &DAG) { 1525 if (N->getValueType(0) != MVT::v16i8) 1526 return -1; 1527 1528 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1529 1530 // Find the first non-undef value in the shuffle mask. 1531 unsigned i; 1532 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1533 /*search*/; 1534 1535 if (i == 16) return -1; // all undef. 1536 1537 // Otherwise, check to see if the rest of the elements are consecutively 1538 // numbered from this value. 1539 unsigned ShiftAmt = SVOp->getMaskElt(i); 1540 if (ShiftAmt < i) return -1; 1541 1542 ShiftAmt -= i; 1543 bool isLE = DAG.getDataLayout().isLittleEndian(); 1544 1545 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1546 // Check the rest of the elements to see if they are consecutive. 1547 for (++i; i != 16; ++i) 1548 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1549 return -1; 1550 } else if (ShuffleKind == 1) { 1551 // Check the rest of the elements to see if they are consecutive. 1552 for (++i; i != 16; ++i) 1553 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1554 return -1; 1555 } else 1556 return -1; 1557 1558 if (isLE) 1559 ShiftAmt = 16 - ShiftAmt; 1560 1561 return ShiftAmt; 1562 } 1563 1564 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1565 /// specifies a splat of a single element that is suitable for input to 1566 /// VSPLTB/VSPLTH/VSPLTW. 1567 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1568 assert(N->getValueType(0) == MVT::v16i8 && 1569 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1570 1571 // The consecutive indices need to specify an element, not part of two 1572 // different elements. So abandon ship early if this isn't the case. 1573 if (N->getMaskElt(0) % EltSize != 0) 1574 return false; 1575 1576 // This is a splat operation if each element of the permute is the same, and 1577 // if the value doesn't reference the second vector. 1578 unsigned ElementBase = N->getMaskElt(0); 1579 1580 // FIXME: Handle UNDEF elements too! 1581 if (ElementBase >= 16) 1582 return false; 1583 1584 // Check that the indices are consecutive, in the case of a multi-byte element 1585 // splatted with a v16i8 mask. 1586 for (unsigned i = 1; i != EltSize; ++i) 1587 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1588 return false; 1589 1590 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1591 if (N->getMaskElt(i) < 0) continue; 1592 for (unsigned j = 0; j != EltSize; ++j) 1593 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1594 return false; 1595 } 1596 return true; 1597 } 1598 1599 // Check that the mask is shuffling words 1600 static bool isWordShuffleMask(ShuffleVectorSDNode *N) { 1601 for (unsigned i = 0; i < 4; ++i) { 1602 unsigned B0 = N->getMaskElt(i*4); 1603 unsigned B1 = N->getMaskElt(i*4+1); 1604 unsigned B2 = N->getMaskElt(i*4+2); 1605 unsigned B3 = N->getMaskElt(i*4+3); 1606 if (B0 % 4) 1607 return false; 1608 if (B1 != B0+1 || B2 != B1+1 || B3 != B2+1) 1609 return false; 1610 } 1611 1612 return true; 1613 } 1614 1615 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1616 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1617 if (!isWordShuffleMask(N)) 1618 return false; 1619 1620 // Now we look at mask elements 0,4,8,12 1621 unsigned M0 = N->getMaskElt(0) / 4; 1622 unsigned M1 = N->getMaskElt(4) / 4; 1623 unsigned M2 = N->getMaskElt(8) / 4; 1624 unsigned M3 = N->getMaskElt(12) / 4; 1625 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1626 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1627 1628 // Below, let H and L be arbitrary elements of the shuffle mask 1629 // where H is in the range [4,7] and L is in the range [0,3]. 1630 // H, 1, 2, 3 or L, 5, 6, 7 1631 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1632 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1633 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1634 InsertAtByte = IsLE ? 12 : 0; 1635 Swap = M0 < 4; 1636 return true; 1637 } 1638 // 0, H, 2, 3 or 4, L, 6, 7 1639 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1640 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1641 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1642 InsertAtByte = IsLE ? 8 : 4; 1643 Swap = M1 < 4; 1644 return true; 1645 } 1646 // 0, 1, H, 3 or 4, 5, L, 7 1647 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1648 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1649 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1650 InsertAtByte = IsLE ? 4 : 8; 1651 Swap = M2 < 4; 1652 return true; 1653 } 1654 // 0, 1, 2, H or 4, 5, 6, L 1655 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1656 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1657 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1658 InsertAtByte = IsLE ? 0 : 12; 1659 Swap = M3 < 4; 1660 return true; 1661 } 1662 1663 // If both vector operands for the shuffle are the same vector, the mask will 1664 // contain only elements from the first one and the second one will be undef. 1665 if (N->getOperand(1).isUndef()) { 1666 ShiftElts = 0; 1667 Swap = true; 1668 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1669 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1670 InsertAtByte = IsLE ? 12 : 0; 1671 return true; 1672 } 1673 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1674 InsertAtByte = IsLE ? 8 : 4; 1675 return true; 1676 } 1677 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1678 InsertAtByte = IsLE ? 4 : 8; 1679 return true; 1680 } 1681 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1682 InsertAtByte = IsLE ? 0 : 12; 1683 return true; 1684 } 1685 } 1686 1687 return false; 1688 } 1689 1690 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1691 bool &Swap, bool IsLE) { 1692 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1693 // Ensure each byte index of the word is consecutive. 1694 if (!isWordShuffleMask(N)) 1695 return false; 1696 1697 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 1698 unsigned M0 = N->getMaskElt(0) / 4; 1699 unsigned M1 = N->getMaskElt(4) / 4; 1700 unsigned M2 = N->getMaskElt(8) / 4; 1701 unsigned M3 = N->getMaskElt(12) / 4; 1702 1703 // If both vector operands for the shuffle are the same vector, the mask will 1704 // contain only elements from the first one and the second one will be undef. 1705 if (N->getOperand(1).isUndef()) { 1706 assert(M0 < 4 && "Indexing into an undef vector?"); 1707 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 1708 return false; 1709 1710 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 1711 Swap = false; 1712 return true; 1713 } 1714 1715 // Ensure each word index of the ShuffleVector Mask is consecutive. 1716 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 1717 return false; 1718 1719 if (IsLE) { 1720 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 1721 // Input vectors don't need to be swapped if the leading element 1722 // of the result is one of the 3 left elements of the second vector 1723 // (or if there is no shift to be done at all). 1724 Swap = false; 1725 ShiftElts = (8 - M0) % 8; 1726 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 1727 // Input vectors need to be swapped if the leading element 1728 // of the result is one of the 3 left elements of the first vector 1729 // (or if we're shifting by 4 - thereby simply swapping the vectors). 1730 Swap = true; 1731 ShiftElts = (4 - M0) % 4; 1732 } 1733 1734 return true; 1735 } else { // BE 1736 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 1737 // Input vectors don't need to be swapped if the leading element 1738 // of the result is one of the 4 elements of the first vector. 1739 Swap = false; 1740 ShiftElts = M0; 1741 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 1742 // Input vectors need to be swapped if the leading element 1743 // of the result is one of the 4 elements of the right vector. 1744 Swap = true; 1745 ShiftElts = M0 - 4; 1746 } 1747 1748 return true; 1749 } 1750 } 1751 1752 1753 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1754 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1755 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1756 SelectionDAG &DAG) { 1757 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1758 assert(isSplatShuffleMask(SVOp, EltSize)); 1759 if (DAG.getDataLayout().isLittleEndian()) 1760 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1761 else 1762 return SVOp->getMaskElt(0) / EltSize; 1763 } 1764 1765 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1766 /// by using a vspltis[bhw] instruction of the specified element size, return 1767 /// the constant being splatted. The ByteSize field indicates the number of 1768 /// bytes of each element [124] -> [bhw]. 1769 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1770 SDValue OpVal(nullptr, 0); 1771 1772 // If ByteSize of the splat is bigger than the element size of the 1773 // build_vector, then we have a case where we are checking for a splat where 1774 // multiple elements of the buildvector are folded together into a single 1775 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1776 unsigned EltSize = 16/N->getNumOperands(); 1777 if (EltSize < ByteSize) { 1778 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1779 SDValue UniquedVals[4]; 1780 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1781 1782 // See if all of the elements in the buildvector agree across. 1783 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1784 if (N->getOperand(i).isUndef()) continue; 1785 // If the element isn't a constant, bail fully out. 1786 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1787 1788 if (!UniquedVals[i&(Multiple-1)].getNode()) 1789 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1790 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1791 return SDValue(); // no match. 1792 } 1793 1794 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1795 // either constant or undef values that are identical for each chunk. See 1796 // if these chunks can form into a larger vspltis*. 1797 1798 // Check to see if all of the leading entries are either 0 or -1. If 1799 // neither, then this won't fit into the immediate field. 1800 bool LeadingZero = true; 1801 bool LeadingOnes = true; 1802 for (unsigned i = 0; i != Multiple-1; ++i) { 1803 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1804 1805 LeadingZero &= isNullConstant(UniquedVals[i]); 1806 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1807 } 1808 // Finally, check the least significant entry. 1809 if (LeadingZero) { 1810 if (!UniquedVals[Multiple-1].getNode()) 1811 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1812 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1813 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1814 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1815 } 1816 if (LeadingOnes) { 1817 if (!UniquedVals[Multiple-1].getNode()) 1818 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1819 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1820 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1821 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1822 } 1823 1824 return SDValue(); 1825 } 1826 1827 // Check to see if this buildvec has a single non-undef value in its elements. 1828 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1829 if (N->getOperand(i).isUndef()) continue; 1830 if (!OpVal.getNode()) 1831 OpVal = N->getOperand(i); 1832 else if (OpVal != N->getOperand(i)) 1833 return SDValue(); 1834 } 1835 1836 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1837 1838 unsigned ValSizeInBytes = EltSize; 1839 uint64_t Value = 0; 1840 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1841 Value = CN->getZExtValue(); 1842 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1843 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1844 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1845 } 1846 1847 // If the splat value is larger than the element value, then we can never do 1848 // this splat. The only case that we could fit the replicated bits into our 1849 // immediate field for would be zero, and we prefer to use vxor for it. 1850 if (ValSizeInBytes < ByteSize) return SDValue(); 1851 1852 // If the element value is larger than the splat value, check if it consists 1853 // of a repeated bit pattern of size ByteSize. 1854 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1855 return SDValue(); 1856 1857 // Properly sign extend the value. 1858 int MaskVal = SignExtend32(Value, ByteSize * 8); 1859 1860 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1861 if (MaskVal == 0) return SDValue(); 1862 1863 // Finally, if this value fits in a 5 bit sext field, return it 1864 if (SignExtend32<5>(MaskVal) == MaskVal) 1865 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1866 return SDValue(); 1867 } 1868 1869 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1870 /// amount, otherwise return -1. 1871 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1872 EVT VT = N->getValueType(0); 1873 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1874 return -1; 1875 1876 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1877 1878 // Find the first non-undef value in the shuffle mask. 1879 unsigned i; 1880 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1881 /*search*/; 1882 1883 if (i == 4) return -1; // all undef. 1884 1885 // Otherwise, check to see if the rest of the elements are consecutively 1886 // numbered from this value. 1887 unsigned ShiftAmt = SVOp->getMaskElt(i); 1888 if (ShiftAmt < i) return -1; 1889 ShiftAmt -= i; 1890 1891 // Check the rest of the elements to see if they are consecutive. 1892 for (++i; i != 4; ++i) 1893 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1894 return -1; 1895 1896 return ShiftAmt; 1897 } 1898 1899 //===----------------------------------------------------------------------===// 1900 // Addressing Mode Selection 1901 //===----------------------------------------------------------------------===// 1902 1903 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1904 /// or 64-bit immediate, and if the value can be accurately represented as a 1905 /// sign extension from a 16-bit value. If so, this returns true and the 1906 /// immediate. 1907 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1908 if (!isa<ConstantSDNode>(N)) 1909 return false; 1910 1911 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1912 if (N->getValueType(0) == MVT::i32) 1913 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1914 else 1915 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1916 } 1917 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1918 return isIntS16Immediate(Op.getNode(), Imm); 1919 } 1920 1921 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1922 /// can be represented as an indexed [r+r] operation. Returns false if it 1923 /// can be more efficiently represented with [r+imm]. 1924 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1925 SDValue &Index, 1926 SelectionDAG &DAG) const { 1927 short imm = 0; 1928 if (N.getOpcode() == ISD::ADD) { 1929 if (isIntS16Immediate(N.getOperand(1), imm)) 1930 return false; // r+i 1931 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1932 return false; // r+i 1933 1934 Base = N.getOperand(0); 1935 Index = N.getOperand(1); 1936 return true; 1937 } else if (N.getOpcode() == ISD::OR) { 1938 if (isIntS16Immediate(N.getOperand(1), imm)) 1939 return false; // r+i can fold it if we can. 1940 1941 // If this is an or of disjoint bitfields, we can codegen this as an add 1942 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1943 // disjoint. 1944 KnownBits LHSKnown, RHSKnown; 1945 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 1946 1947 if (LHSKnown.Zero.getBoolValue()) { 1948 DAG.computeKnownBits(N.getOperand(1), RHSKnown); 1949 // If all of the bits are known zero on the LHS or RHS, the add won't 1950 // carry. 1951 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 1952 Base = N.getOperand(0); 1953 Index = N.getOperand(1); 1954 return true; 1955 } 1956 } 1957 } 1958 1959 return false; 1960 } 1961 1962 // If we happen to be doing an i64 load or store into a stack slot that has 1963 // less than a 4-byte alignment, then the frame-index elimination may need to 1964 // use an indexed load or store instruction (because the offset may not be a 1965 // multiple of 4). The extra register needed to hold the offset comes from the 1966 // register scavenger, and it is possible that the scavenger will need to use 1967 // an emergency spill slot. As a result, we need to make sure that a spill slot 1968 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1969 // stack slot. 1970 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1971 // FIXME: This does not handle the LWA case. 1972 if (VT != MVT::i64) 1973 return; 1974 1975 // NOTE: We'll exclude negative FIs here, which come from argument 1976 // lowering, because there are no known test cases triggering this problem 1977 // using packed structures (or similar). We can remove this exclusion if 1978 // we find such a test case. The reason why this is so test-case driven is 1979 // because this entire 'fixup' is only to prevent crashes (from the 1980 // register scavenger) on not-really-valid inputs. For example, if we have: 1981 // %a = alloca i1 1982 // %b = bitcast i1* %a to i64* 1983 // store i64* a, i64 b 1984 // then the store should really be marked as 'align 1', but is not. If it 1985 // were marked as 'align 1' then the indexed form would have been 1986 // instruction-selected initially, and the problem this 'fixup' is preventing 1987 // won't happen regardless. 1988 if (FrameIdx < 0) 1989 return; 1990 1991 MachineFunction &MF = DAG.getMachineFunction(); 1992 MachineFrameInfo &MFI = MF.getFrameInfo(); 1993 1994 unsigned Align = MFI.getObjectAlignment(FrameIdx); 1995 if (Align >= 4) 1996 return; 1997 1998 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1999 FuncInfo->setHasNonRISpills(); 2000 } 2001 2002 /// Returns true if the address N can be represented by a base register plus 2003 /// a signed 16-bit displacement [r+imm], and if it is not better 2004 /// represented as reg+reg. If Aligned is true, only accept displacements 2005 /// suitable for STD and friends, i.e. multiples of 4. 2006 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2007 SDValue &Base, 2008 SelectionDAG &DAG, 2009 bool Aligned) const { 2010 // FIXME dl should come from parent load or store, not from address 2011 SDLoc dl(N); 2012 // If this can be more profitably realized as r+r, fail. 2013 if (SelectAddressRegReg(N, Disp, Base, DAG)) 2014 return false; 2015 2016 if (N.getOpcode() == ISD::ADD) { 2017 short imm = 0; 2018 if (isIntS16Immediate(N.getOperand(1), imm) && 2019 (!Aligned || (imm & 3) == 0)) { 2020 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2021 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2022 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2023 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2024 } else { 2025 Base = N.getOperand(0); 2026 } 2027 return true; // [r+i] 2028 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2029 // Match LOAD (ADD (X, Lo(G))). 2030 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2031 && "Cannot handle constant offsets yet!"); 2032 Disp = N.getOperand(1).getOperand(0); // The global address. 2033 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2034 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2035 Disp.getOpcode() == ISD::TargetConstantPool || 2036 Disp.getOpcode() == ISD::TargetJumpTable); 2037 Base = N.getOperand(0); 2038 return true; // [&g+r] 2039 } 2040 } else if (N.getOpcode() == ISD::OR) { 2041 short imm = 0; 2042 if (isIntS16Immediate(N.getOperand(1), imm) && 2043 (!Aligned || (imm & 3) == 0)) { 2044 // If this is an or of disjoint bitfields, we can codegen this as an add 2045 // (for better address arithmetic) if the LHS and RHS of the OR are 2046 // provably disjoint. 2047 KnownBits LHSKnown; 2048 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 2049 2050 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2051 // If all of the bits are known zero on the LHS or RHS, the add won't 2052 // carry. 2053 if (FrameIndexSDNode *FI = 2054 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2055 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2056 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2057 } else { 2058 Base = N.getOperand(0); 2059 } 2060 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2061 return true; 2062 } 2063 } 2064 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2065 // Loading from a constant address. 2066 2067 // If this address fits entirely in a 16-bit sext immediate field, codegen 2068 // this as "d, 0" 2069 short Imm; 2070 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 2071 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2072 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2073 CN->getValueType(0)); 2074 return true; 2075 } 2076 2077 // Handle 32-bit sext immediates with LIS + addr mode. 2078 if ((CN->getValueType(0) == MVT::i32 || 2079 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2080 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 2081 int Addr = (int)CN->getZExtValue(); 2082 2083 // Otherwise, break this down into an LIS + disp. 2084 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2085 2086 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2087 MVT::i32); 2088 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2089 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2090 return true; 2091 } 2092 } 2093 2094 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2095 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2096 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2097 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2098 } else 2099 Base = N; 2100 return true; // [r+0] 2101 } 2102 2103 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2104 /// represented as an indexed [r+r] operation. 2105 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2106 SDValue &Index, 2107 SelectionDAG &DAG) const { 2108 // Check to see if we can easily represent this as an [r+r] address. This 2109 // will fail if it thinks that the address is more profitably represented as 2110 // reg+imm, e.g. where imm = 0. 2111 if (SelectAddressRegReg(N, Base, Index, DAG)) 2112 return true; 2113 2114 // If the operand is an addition, always emit this as [r+r], since this is 2115 // better (for code size, and execution, as the memop does the add for free) 2116 // than emitting an explicit add. 2117 if (N.getOpcode() == ISD::ADD) { 2118 Base = N.getOperand(0); 2119 Index = N.getOperand(1); 2120 return true; 2121 } 2122 2123 // Otherwise, do it the hard way, using R0 as the base register. 2124 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2125 N.getValueType()); 2126 Index = N; 2127 return true; 2128 } 2129 2130 /// getPreIndexedAddressParts - returns true by value, base pointer and 2131 /// offset pointer and addressing mode by reference if the node's address 2132 /// can be legally represented as pre-indexed load / store address. 2133 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2134 SDValue &Offset, 2135 ISD::MemIndexedMode &AM, 2136 SelectionDAG &DAG) const { 2137 if (DisablePPCPreinc) return false; 2138 2139 bool isLoad = true; 2140 SDValue Ptr; 2141 EVT VT; 2142 unsigned Alignment; 2143 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2144 Ptr = LD->getBasePtr(); 2145 VT = LD->getMemoryVT(); 2146 Alignment = LD->getAlignment(); 2147 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2148 Ptr = ST->getBasePtr(); 2149 VT = ST->getMemoryVT(); 2150 Alignment = ST->getAlignment(); 2151 isLoad = false; 2152 } else 2153 return false; 2154 2155 // PowerPC doesn't have preinc load/store instructions for vectors (except 2156 // for QPX, which does have preinc r+r forms). 2157 if (VT.isVector()) { 2158 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2159 return false; 2160 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2161 AM = ISD::PRE_INC; 2162 return true; 2163 } 2164 } 2165 2166 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2167 // Common code will reject creating a pre-inc form if the base pointer 2168 // is a frame index, or if N is a store and the base pointer is either 2169 // the same as or a predecessor of the value being stored. Check for 2170 // those situations here, and try with swapped Base/Offset instead. 2171 bool Swap = false; 2172 2173 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2174 Swap = true; 2175 else if (!isLoad) { 2176 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2177 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2178 Swap = true; 2179 } 2180 2181 if (Swap) 2182 std::swap(Base, Offset); 2183 2184 AM = ISD::PRE_INC; 2185 return true; 2186 } 2187 2188 // LDU/STU can only handle immediates that are a multiple of 4. 2189 if (VT != MVT::i64) { 2190 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 2191 return false; 2192 } else { 2193 // LDU/STU need an address with at least 4-byte alignment. 2194 if (Alignment < 4) 2195 return false; 2196 2197 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 2198 return false; 2199 } 2200 2201 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2202 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2203 // sext i32 to i64 when addr mode is r+i. 2204 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2205 LD->getExtensionType() == ISD::SEXTLOAD && 2206 isa<ConstantSDNode>(Offset)) 2207 return false; 2208 } 2209 2210 AM = ISD::PRE_INC; 2211 return true; 2212 } 2213 2214 //===----------------------------------------------------------------------===// 2215 // LowerOperation implementation 2216 //===----------------------------------------------------------------------===// 2217 2218 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2219 /// and LoOpFlags to the target MO flags. 2220 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2221 unsigned &HiOpFlags, unsigned &LoOpFlags, 2222 const GlobalValue *GV = nullptr) { 2223 HiOpFlags = PPCII::MO_HA; 2224 LoOpFlags = PPCII::MO_LO; 2225 2226 // Don't use the pic base if not in PIC relocation model. 2227 if (IsPIC) { 2228 HiOpFlags |= PPCII::MO_PIC_FLAG; 2229 LoOpFlags |= PPCII::MO_PIC_FLAG; 2230 } 2231 2232 // If this is a reference to a global value that requires a non-lazy-ptr, make 2233 // sure that instruction lowering adds it. 2234 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2235 HiOpFlags |= PPCII::MO_NLP_FLAG; 2236 LoOpFlags |= PPCII::MO_NLP_FLAG; 2237 2238 if (GV->hasHiddenVisibility()) { 2239 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2240 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2241 } 2242 } 2243 } 2244 2245 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2246 SelectionDAG &DAG) { 2247 SDLoc DL(HiPart); 2248 EVT PtrVT = HiPart.getValueType(); 2249 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2250 2251 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2252 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2253 2254 // With PIC, the first instruction is actually "GR+hi(&G)". 2255 if (isPIC) 2256 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2257 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2258 2259 // Generate non-pic code that has direct accesses to the constant pool. 2260 // The address of the global is just (hi(&g)+lo(&g)). 2261 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2262 } 2263 2264 static void setUsesTOCBasePtr(MachineFunction &MF) { 2265 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2266 FuncInfo->setUsesTOCBasePtr(); 2267 } 2268 2269 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2270 setUsesTOCBasePtr(DAG.getMachineFunction()); 2271 } 2272 2273 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2274 SDValue GA) { 2275 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2276 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2277 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2278 2279 SDValue Ops[] = { GA, Reg }; 2280 return DAG.getMemIntrinsicNode( 2281 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2282 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2283 false, 0); 2284 } 2285 2286 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2287 SelectionDAG &DAG) const { 2288 EVT PtrVT = Op.getValueType(); 2289 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2290 const Constant *C = CP->getConstVal(); 2291 2292 // 64-bit SVR4 ABI code is always position-independent. 2293 // The actual address of the GlobalValue is stored in the TOC. 2294 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2295 setUsesTOCBasePtr(DAG); 2296 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2297 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2298 } 2299 2300 unsigned MOHiFlag, MOLoFlag; 2301 bool IsPIC = isPositionIndependent(); 2302 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2303 2304 if (IsPIC && Subtarget.isSVR4ABI()) { 2305 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2306 PPCII::MO_PIC_FLAG); 2307 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2308 } 2309 2310 SDValue CPIHi = 2311 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2312 SDValue CPILo = 2313 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2314 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2315 } 2316 2317 // For 64-bit PowerPC, prefer the more compact relative encodings. 2318 // This trades 32 bits per jump table entry for one or two instructions 2319 // on the jump site. 2320 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2321 if (isJumpTableRelative()) 2322 return MachineJumpTableInfo::EK_LabelDifference32; 2323 2324 return TargetLowering::getJumpTableEncoding(); 2325 } 2326 2327 bool PPCTargetLowering::isJumpTableRelative() const { 2328 if (Subtarget.isPPC64()) 2329 return true; 2330 return TargetLowering::isJumpTableRelative(); 2331 } 2332 2333 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2334 SelectionDAG &DAG) const { 2335 if (!Subtarget.isPPC64()) 2336 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2337 2338 switch (getTargetMachine().getCodeModel()) { 2339 case CodeModel::Default: 2340 case CodeModel::Small: 2341 case CodeModel::Medium: 2342 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2343 default: 2344 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2345 getPointerTy(DAG.getDataLayout())); 2346 } 2347 } 2348 2349 const MCExpr * 2350 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2351 unsigned JTI, 2352 MCContext &Ctx) const { 2353 if (!Subtarget.isPPC64()) 2354 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2355 2356 switch (getTargetMachine().getCodeModel()) { 2357 case CodeModel::Default: 2358 case CodeModel::Small: 2359 case CodeModel::Medium: 2360 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2361 default: 2362 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2363 } 2364 } 2365 2366 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2367 EVT PtrVT = Op.getValueType(); 2368 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2369 2370 // 64-bit SVR4 ABI code is always position-independent. 2371 // The actual address of the GlobalValue is stored in the TOC. 2372 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2373 setUsesTOCBasePtr(DAG); 2374 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2375 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2376 } 2377 2378 unsigned MOHiFlag, MOLoFlag; 2379 bool IsPIC = isPositionIndependent(); 2380 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2381 2382 if (IsPIC && Subtarget.isSVR4ABI()) { 2383 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2384 PPCII::MO_PIC_FLAG); 2385 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2386 } 2387 2388 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2389 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2390 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2391 } 2392 2393 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2394 SelectionDAG &DAG) const { 2395 EVT PtrVT = Op.getValueType(); 2396 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2397 const BlockAddress *BA = BASDN->getBlockAddress(); 2398 2399 // 64-bit SVR4 ABI code is always position-independent. 2400 // The actual BlockAddress is stored in the TOC. 2401 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2402 setUsesTOCBasePtr(DAG); 2403 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2404 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2405 } 2406 2407 unsigned MOHiFlag, MOLoFlag; 2408 bool IsPIC = isPositionIndependent(); 2409 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2410 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2411 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2412 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2413 } 2414 2415 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2416 SelectionDAG &DAG) const { 2417 // FIXME: TLS addresses currently use medium model code sequences, 2418 // which is the most useful form. Eventually support for small and 2419 // large models could be added if users need it, at the cost of 2420 // additional complexity. 2421 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2422 if (DAG.getTarget().Options.EmulatedTLS) 2423 return LowerToTLSEmulatedModel(GA, DAG); 2424 2425 SDLoc dl(GA); 2426 const GlobalValue *GV = GA->getGlobal(); 2427 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2428 bool is64bit = Subtarget.isPPC64(); 2429 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2430 PICLevel::Level picLevel = M->getPICLevel(); 2431 2432 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2433 2434 if (Model == TLSModel::LocalExec) { 2435 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2436 PPCII::MO_TPREL_HA); 2437 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2438 PPCII::MO_TPREL_LO); 2439 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2440 is64bit ? MVT::i64 : MVT::i32); 2441 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2442 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2443 } 2444 2445 if (Model == TLSModel::InitialExec) { 2446 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2447 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2448 PPCII::MO_TLS); 2449 SDValue GOTPtr; 2450 if (is64bit) { 2451 setUsesTOCBasePtr(DAG); 2452 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2453 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2454 PtrVT, GOTReg, TGA); 2455 } else 2456 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2457 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2458 PtrVT, TGA, GOTPtr); 2459 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2460 } 2461 2462 if (Model == TLSModel::GeneralDynamic) { 2463 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2464 SDValue GOTPtr; 2465 if (is64bit) { 2466 setUsesTOCBasePtr(DAG); 2467 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2468 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2469 GOTReg, TGA); 2470 } else { 2471 if (picLevel == PICLevel::SmallPIC) 2472 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2473 else 2474 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2475 } 2476 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2477 GOTPtr, TGA, TGA); 2478 } 2479 2480 if (Model == TLSModel::LocalDynamic) { 2481 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2482 SDValue GOTPtr; 2483 if (is64bit) { 2484 setUsesTOCBasePtr(DAG); 2485 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2486 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2487 GOTReg, TGA); 2488 } else { 2489 if (picLevel == PICLevel::SmallPIC) 2490 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2491 else 2492 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2493 } 2494 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2495 PtrVT, GOTPtr, TGA, TGA); 2496 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2497 PtrVT, TLSAddr, TGA); 2498 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2499 } 2500 2501 llvm_unreachable("Unknown TLS model!"); 2502 } 2503 2504 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2505 SelectionDAG &DAG) const { 2506 EVT PtrVT = Op.getValueType(); 2507 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2508 SDLoc DL(GSDN); 2509 const GlobalValue *GV = GSDN->getGlobal(); 2510 2511 // 64-bit SVR4 ABI code is always position-independent. 2512 // The actual address of the GlobalValue is stored in the TOC. 2513 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2514 setUsesTOCBasePtr(DAG); 2515 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2516 return getTOCEntry(DAG, DL, true, GA); 2517 } 2518 2519 unsigned MOHiFlag, MOLoFlag; 2520 bool IsPIC = isPositionIndependent(); 2521 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2522 2523 if (IsPIC && Subtarget.isSVR4ABI()) { 2524 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2525 GSDN->getOffset(), 2526 PPCII::MO_PIC_FLAG); 2527 return getTOCEntry(DAG, DL, false, GA); 2528 } 2529 2530 SDValue GAHi = 2531 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2532 SDValue GALo = 2533 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2534 2535 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2536 2537 // If the global reference is actually to a non-lazy-pointer, we have to do an 2538 // extra load to get the address of the global. 2539 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2540 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2541 return Ptr; 2542 } 2543 2544 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2545 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2546 SDLoc dl(Op); 2547 2548 if (Op.getValueType() == MVT::v2i64) { 2549 // When the operands themselves are v2i64 values, we need to do something 2550 // special because VSX has no underlying comparison operations for these. 2551 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2552 // Equality can be handled by casting to the legal type for Altivec 2553 // comparisons, everything else needs to be expanded. 2554 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2555 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2556 DAG.getSetCC(dl, MVT::v4i32, 2557 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2558 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2559 CC)); 2560 } 2561 2562 return SDValue(); 2563 } 2564 2565 // We handle most of these in the usual way. 2566 return Op; 2567 } 2568 2569 // If we're comparing for equality to zero, expose the fact that this is 2570 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2571 // fold the new nodes. 2572 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 2573 return V; 2574 2575 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2576 // Leave comparisons against 0 and -1 alone for now, since they're usually 2577 // optimized. FIXME: revisit this when we can custom lower all setcc 2578 // optimizations. 2579 if (C->isAllOnesValue() || C->isNullValue()) 2580 return SDValue(); 2581 } 2582 2583 // If we have an integer seteq/setne, turn it into a compare against zero 2584 // by xor'ing the rhs with the lhs, which is faster than setting a 2585 // condition register, reading it back out, and masking the correct bit. The 2586 // normal approach here uses sub to do this instead of xor. Using xor exposes 2587 // the result to other bit-twiddling opportunities. 2588 EVT LHSVT = Op.getOperand(0).getValueType(); 2589 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2590 EVT VT = Op.getValueType(); 2591 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2592 Op.getOperand(1)); 2593 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2594 } 2595 return SDValue(); 2596 } 2597 2598 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2599 SDNode *Node = Op.getNode(); 2600 EVT VT = Node->getValueType(0); 2601 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2602 SDValue InChain = Node->getOperand(0); 2603 SDValue VAListPtr = Node->getOperand(1); 2604 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2605 SDLoc dl(Node); 2606 2607 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2608 2609 // gpr_index 2610 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2611 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2612 InChain = GprIndex.getValue(1); 2613 2614 if (VT == MVT::i64) { 2615 // Check if GprIndex is even 2616 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2617 DAG.getConstant(1, dl, MVT::i32)); 2618 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2619 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2620 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2621 DAG.getConstant(1, dl, MVT::i32)); 2622 // Align GprIndex to be even if it isn't 2623 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2624 GprIndex); 2625 } 2626 2627 // fpr index is 1 byte after gpr 2628 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2629 DAG.getConstant(1, dl, MVT::i32)); 2630 2631 // fpr 2632 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2633 FprPtr, MachinePointerInfo(SV), MVT::i8); 2634 InChain = FprIndex.getValue(1); 2635 2636 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2637 DAG.getConstant(8, dl, MVT::i32)); 2638 2639 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2640 DAG.getConstant(4, dl, MVT::i32)); 2641 2642 // areas 2643 SDValue OverflowArea = 2644 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2645 InChain = OverflowArea.getValue(1); 2646 2647 SDValue RegSaveArea = 2648 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2649 InChain = RegSaveArea.getValue(1); 2650 2651 // select overflow_area if index > 8 2652 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2653 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2654 2655 // adjustment constant gpr_index * 4/8 2656 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2657 VT.isInteger() ? GprIndex : FprIndex, 2658 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2659 MVT::i32)); 2660 2661 // OurReg = RegSaveArea + RegConstant 2662 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2663 RegConstant); 2664 2665 // Floating types are 32 bytes into RegSaveArea 2666 if (VT.isFloatingPoint()) 2667 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2668 DAG.getConstant(32, dl, MVT::i32)); 2669 2670 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2671 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2672 VT.isInteger() ? GprIndex : FprIndex, 2673 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2674 MVT::i32)); 2675 2676 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2677 VT.isInteger() ? VAListPtr : FprPtr, 2678 MachinePointerInfo(SV), MVT::i8); 2679 2680 // determine if we should load from reg_save_area or overflow_area 2681 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2682 2683 // increase overflow_area by 4/8 if gpr/fpr > 8 2684 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2685 DAG.getConstant(VT.isInteger() ? 4 : 8, 2686 dl, MVT::i32)); 2687 2688 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2689 OverflowAreaPlusN); 2690 2691 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 2692 MachinePointerInfo(), MVT::i32); 2693 2694 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 2695 } 2696 2697 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2698 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2699 2700 // We have to copy the entire va_list struct: 2701 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2702 return DAG.getMemcpy(Op.getOperand(0), Op, 2703 Op.getOperand(1), Op.getOperand(2), 2704 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2705 false, MachinePointerInfo(), MachinePointerInfo()); 2706 } 2707 2708 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2709 SelectionDAG &DAG) const { 2710 return Op.getOperand(0); 2711 } 2712 2713 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2714 SelectionDAG &DAG) const { 2715 SDValue Chain = Op.getOperand(0); 2716 SDValue Trmp = Op.getOperand(1); // trampoline 2717 SDValue FPtr = Op.getOperand(2); // nested function 2718 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2719 SDLoc dl(Op); 2720 2721 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2722 bool isPPC64 = (PtrVT == MVT::i64); 2723 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2724 2725 TargetLowering::ArgListTy Args; 2726 TargetLowering::ArgListEntry Entry; 2727 2728 Entry.Ty = IntPtrTy; 2729 Entry.Node = Trmp; Args.push_back(Entry); 2730 2731 // TrampSize == (isPPC64 ? 48 : 40); 2732 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2733 isPPC64 ? MVT::i64 : MVT::i32); 2734 Args.push_back(Entry); 2735 2736 Entry.Node = FPtr; Args.push_back(Entry); 2737 Entry.Node = Nest; Args.push_back(Entry); 2738 2739 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2740 TargetLowering::CallLoweringInfo CLI(DAG); 2741 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 2742 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2743 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 2744 2745 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2746 return CallResult.second; 2747 } 2748 2749 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2750 MachineFunction &MF = DAG.getMachineFunction(); 2751 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2752 EVT PtrVT = getPointerTy(MF.getDataLayout()); 2753 2754 SDLoc dl(Op); 2755 2756 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2757 // vastart just stores the address of the VarArgsFrameIndex slot into the 2758 // memory location argument. 2759 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2760 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2761 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2762 MachinePointerInfo(SV)); 2763 } 2764 2765 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2766 // We suppose the given va_list is already allocated. 2767 // 2768 // typedef struct { 2769 // char gpr; /* index into the array of 8 GPRs 2770 // * stored in the register save area 2771 // * gpr=0 corresponds to r3, 2772 // * gpr=1 to r4, etc. 2773 // */ 2774 // char fpr; /* index into the array of 8 FPRs 2775 // * stored in the register save area 2776 // * fpr=0 corresponds to f1, 2777 // * fpr=1 to f2, etc. 2778 // */ 2779 // char *overflow_arg_area; 2780 // /* location on stack that holds 2781 // * the next overflow argument 2782 // */ 2783 // char *reg_save_area; 2784 // /* where r3:r10 and f1:f8 (if saved) 2785 // * are stored 2786 // */ 2787 // } va_list[1]; 2788 2789 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2790 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2791 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2792 PtrVT); 2793 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2794 PtrVT); 2795 2796 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2797 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2798 2799 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2800 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2801 2802 uint64_t FPROffset = 1; 2803 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2804 2805 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2806 2807 // Store first byte : number of int regs 2808 SDValue firstStore = 2809 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 2810 MachinePointerInfo(SV), MVT::i8); 2811 uint64_t nextOffset = FPROffset; 2812 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2813 ConstFPROffset); 2814 2815 // Store second byte : number of float regs 2816 SDValue secondStore = 2817 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2818 MachinePointerInfo(SV, nextOffset), MVT::i8); 2819 nextOffset += StackOffset; 2820 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2821 2822 // Store second word : arguments given on stack 2823 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2824 MachinePointerInfo(SV, nextOffset)); 2825 nextOffset += FrameOffset; 2826 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2827 2828 // Store third word : arguments given in registers 2829 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2830 MachinePointerInfo(SV, nextOffset)); 2831 } 2832 2833 #include "PPCGenCallingConv.inc" 2834 2835 // Function whose sole purpose is to kill compiler warnings 2836 // stemming from unused functions included from PPCGenCallingConv.inc. 2837 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2838 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2839 } 2840 2841 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2842 CCValAssign::LocInfo &LocInfo, 2843 ISD::ArgFlagsTy &ArgFlags, 2844 CCState &State) { 2845 return true; 2846 } 2847 2848 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2849 MVT &LocVT, 2850 CCValAssign::LocInfo &LocInfo, 2851 ISD::ArgFlagsTy &ArgFlags, 2852 CCState &State) { 2853 static const MCPhysReg ArgRegs[] = { 2854 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2855 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2856 }; 2857 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2858 2859 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2860 2861 // Skip one register if the first unallocated register has an even register 2862 // number and there are still argument registers available which have not been 2863 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2864 // need to skip a register if RegNum is odd. 2865 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2866 State.AllocateReg(ArgRegs[RegNum]); 2867 } 2868 2869 // Always return false here, as this function only makes sure that the first 2870 // unallocated register has an odd register number and does not actually 2871 // allocate a register for the current argument. 2872 return false; 2873 } 2874 2875 bool 2876 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 2877 MVT &LocVT, 2878 CCValAssign::LocInfo &LocInfo, 2879 ISD::ArgFlagsTy &ArgFlags, 2880 CCState &State) { 2881 static const MCPhysReg ArgRegs[] = { 2882 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2883 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2884 }; 2885 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2886 2887 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2888 int RegsLeft = NumArgRegs - RegNum; 2889 2890 // Skip if there is not enough registers left for long double type (4 gpr regs 2891 // in soft float mode) and put long double argument on the stack. 2892 if (RegNum != NumArgRegs && RegsLeft < 4) { 2893 for (int i = 0; i < RegsLeft; i++) { 2894 State.AllocateReg(ArgRegs[RegNum + i]); 2895 } 2896 } 2897 2898 return false; 2899 } 2900 2901 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2902 MVT &LocVT, 2903 CCValAssign::LocInfo &LocInfo, 2904 ISD::ArgFlagsTy &ArgFlags, 2905 CCState &State) { 2906 static const MCPhysReg ArgRegs[] = { 2907 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2908 PPC::F8 2909 }; 2910 2911 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2912 2913 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2914 2915 // If there is only one Floating-point register left we need to put both f64 2916 // values of a split ppc_fp128 value on the stack. 2917 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2918 State.AllocateReg(ArgRegs[RegNum]); 2919 } 2920 2921 // Always return false here, as this function only makes sure that the two f64 2922 // values a ppc_fp128 value is split into are both passed in registers or both 2923 // passed on the stack and does not actually allocate a register for the 2924 // current argument. 2925 return false; 2926 } 2927 2928 /// FPR - The set of FP registers that should be allocated for arguments, 2929 /// on Darwin. 2930 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2931 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2932 PPC::F11, PPC::F12, PPC::F13}; 2933 2934 /// QFPR - The set of QPX registers that should be allocated for arguments. 2935 static const MCPhysReg QFPR[] = { 2936 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2937 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2938 2939 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2940 /// the stack. 2941 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2942 unsigned PtrByteSize) { 2943 unsigned ArgSize = ArgVT.getStoreSize(); 2944 if (Flags.isByVal()) 2945 ArgSize = Flags.getByValSize(); 2946 2947 // Round up to multiples of the pointer size, except for array members, 2948 // which are always packed. 2949 if (!Flags.isInConsecutiveRegs()) 2950 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2951 2952 return ArgSize; 2953 } 2954 2955 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2956 /// on the stack. 2957 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2958 ISD::ArgFlagsTy Flags, 2959 unsigned PtrByteSize) { 2960 unsigned Align = PtrByteSize; 2961 2962 // Altivec parameters are padded to a 16 byte boundary. 2963 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2964 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2965 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2966 ArgVT == MVT::v1i128) 2967 Align = 16; 2968 // QPX vector types stored in double-precision are padded to a 32 byte 2969 // boundary. 2970 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2971 Align = 32; 2972 2973 // ByVal parameters are aligned as requested. 2974 if (Flags.isByVal()) { 2975 unsigned BVAlign = Flags.getByValAlign(); 2976 if (BVAlign > PtrByteSize) { 2977 if (BVAlign % PtrByteSize != 0) 2978 llvm_unreachable( 2979 "ByVal alignment is not a multiple of the pointer size"); 2980 2981 Align = BVAlign; 2982 } 2983 } 2984 2985 // Array members are always packed to their original alignment. 2986 if (Flags.isInConsecutiveRegs()) { 2987 // If the array member was split into multiple registers, the first 2988 // needs to be aligned to the size of the full type. (Except for 2989 // ppcf128, which is only aligned as its f64 components.) 2990 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2991 Align = OrigVT.getStoreSize(); 2992 else 2993 Align = ArgVT.getStoreSize(); 2994 } 2995 2996 return Align; 2997 } 2998 2999 /// CalculateStackSlotUsed - Return whether this argument will use its 3000 /// stack slot (instead of being passed in registers). ArgOffset, 3001 /// AvailableFPRs, and AvailableVRs must hold the current argument 3002 /// position, and will be updated to account for this argument. 3003 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3004 ISD::ArgFlagsTy Flags, 3005 unsigned PtrByteSize, 3006 unsigned LinkageSize, 3007 unsigned ParamAreaSize, 3008 unsigned &ArgOffset, 3009 unsigned &AvailableFPRs, 3010 unsigned &AvailableVRs, bool HasQPX) { 3011 bool UseMemory = false; 3012 3013 // Respect alignment of argument on the stack. 3014 unsigned Align = 3015 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3016 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3017 // If there's no space left in the argument save area, we must 3018 // use memory (this check also catches zero-sized arguments). 3019 if (ArgOffset >= LinkageSize + ParamAreaSize) 3020 UseMemory = true; 3021 3022 // Allocate argument on the stack. 3023 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3024 if (Flags.isInConsecutiveRegsLast()) 3025 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3026 // If we overran the argument save area, we must use memory 3027 // (this check catches arguments passed partially in memory) 3028 if (ArgOffset > LinkageSize + ParamAreaSize) 3029 UseMemory = true; 3030 3031 // However, if the argument is actually passed in an FPR or a VR, 3032 // we don't use memory after all. 3033 if (!Flags.isByVal()) { 3034 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3035 // QPX registers overlap with the scalar FP registers. 3036 (HasQPX && (ArgVT == MVT::v4f32 || 3037 ArgVT == MVT::v4f64 || 3038 ArgVT == MVT::v4i1))) 3039 if (AvailableFPRs > 0) { 3040 --AvailableFPRs; 3041 return false; 3042 } 3043 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3044 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3045 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3046 ArgVT == MVT::v1i128) 3047 if (AvailableVRs > 0) { 3048 --AvailableVRs; 3049 return false; 3050 } 3051 } 3052 3053 return UseMemory; 3054 } 3055 3056 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3057 /// ensure minimum alignment required for target. 3058 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3059 unsigned NumBytes) { 3060 unsigned TargetAlign = Lowering->getStackAlignment(); 3061 unsigned AlignMask = TargetAlign - 1; 3062 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3063 return NumBytes; 3064 } 3065 3066 SDValue PPCTargetLowering::LowerFormalArguments( 3067 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3068 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3069 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3070 if (Subtarget.isSVR4ABI()) { 3071 if (Subtarget.isPPC64()) 3072 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 3073 dl, DAG, InVals); 3074 else 3075 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 3076 dl, DAG, InVals); 3077 } else { 3078 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 3079 dl, DAG, InVals); 3080 } 3081 } 3082 3083 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3084 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3085 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3086 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3087 3088 // 32-bit SVR4 ABI Stack Frame Layout: 3089 // +-----------------------------------+ 3090 // +--> | Back chain | 3091 // | +-----------------------------------+ 3092 // | | Floating-point register save area | 3093 // | +-----------------------------------+ 3094 // | | General register save area | 3095 // | +-----------------------------------+ 3096 // | | CR save word | 3097 // | +-----------------------------------+ 3098 // | | VRSAVE save word | 3099 // | +-----------------------------------+ 3100 // | | Alignment padding | 3101 // | +-----------------------------------+ 3102 // | | Vector register save area | 3103 // | +-----------------------------------+ 3104 // | | Local variable space | 3105 // | +-----------------------------------+ 3106 // | | Parameter list area | 3107 // | +-----------------------------------+ 3108 // | | LR save word | 3109 // | +-----------------------------------+ 3110 // SP--> +--- | Back chain | 3111 // +-----------------------------------+ 3112 // 3113 // Specifications: 3114 // System V Application Binary Interface PowerPC Processor Supplement 3115 // AltiVec Technology Programming Interface Manual 3116 3117 MachineFunction &MF = DAG.getMachineFunction(); 3118 MachineFrameInfo &MFI = MF.getFrameInfo(); 3119 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3120 3121 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3122 // Potential tail calls could cause overwriting of argument stack slots. 3123 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3124 (CallConv == CallingConv::Fast)); 3125 unsigned PtrByteSize = 4; 3126 3127 // Assign locations to all of the incoming arguments. 3128 SmallVector<CCValAssign, 16> ArgLocs; 3129 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3130 *DAG.getContext()); 3131 3132 // Reserve space for the linkage area on the stack. 3133 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3134 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3135 if (useSoftFloat()) 3136 CCInfo.PreAnalyzeFormalArguments(Ins); 3137 3138 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3139 CCInfo.clearWasPPCF128(); 3140 3141 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3142 CCValAssign &VA = ArgLocs[i]; 3143 3144 // Arguments stored in registers. 3145 if (VA.isRegLoc()) { 3146 const TargetRegisterClass *RC; 3147 EVT ValVT = VA.getValVT(); 3148 3149 switch (ValVT.getSimpleVT().SimpleTy) { 3150 default: 3151 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3152 case MVT::i1: 3153 case MVT::i32: 3154 RC = &PPC::GPRCRegClass; 3155 break; 3156 case MVT::f32: 3157 if (Subtarget.hasP8Vector()) 3158 RC = &PPC::VSSRCRegClass; 3159 else 3160 RC = &PPC::F4RCRegClass; 3161 break; 3162 case MVT::f64: 3163 if (Subtarget.hasVSX()) 3164 RC = &PPC::VSFRCRegClass; 3165 else 3166 RC = &PPC::F8RCRegClass; 3167 break; 3168 case MVT::v16i8: 3169 case MVT::v8i16: 3170 case MVT::v4i32: 3171 RC = &PPC::VRRCRegClass; 3172 break; 3173 case MVT::v4f32: 3174 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3175 break; 3176 case MVT::v2f64: 3177 case MVT::v2i64: 3178 RC = &PPC::VRRCRegClass; 3179 break; 3180 case MVT::v4f64: 3181 RC = &PPC::QFRCRegClass; 3182 break; 3183 case MVT::v4i1: 3184 RC = &PPC::QBRCRegClass; 3185 break; 3186 } 3187 3188 // Transform the arguments stored in physical registers into virtual ones. 3189 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3190 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3191 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3192 3193 if (ValVT == MVT::i1) 3194 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3195 3196 InVals.push_back(ArgValue); 3197 } else { 3198 // Argument stored in memory. 3199 assert(VA.isMemLoc()); 3200 3201 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3202 int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(), 3203 isImmutable); 3204 3205 // Create load nodes to retrieve arguments from the stack. 3206 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3207 InVals.push_back( 3208 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3209 } 3210 } 3211 3212 // Assign locations to all of the incoming aggregate by value arguments. 3213 // Aggregates passed by value are stored in the local variable space of the 3214 // caller's stack frame, right above the parameter list area. 3215 SmallVector<CCValAssign, 16> ByValArgLocs; 3216 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3217 ByValArgLocs, *DAG.getContext()); 3218 3219 // Reserve stack space for the allocations in CCInfo. 3220 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3221 3222 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3223 3224 // Area that is at least reserved in the caller of this function. 3225 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3226 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3227 3228 // Set the size that is at least reserved in caller of this function. Tail 3229 // call optimized function's reserved stack space needs to be aligned so that 3230 // taking the difference between two stack areas will result in an aligned 3231 // stack. 3232 MinReservedArea = 3233 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3234 FuncInfo->setMinReservedArea(MinReservedArea); 3235 3236 SmallVector<SDValue, 8> MemOps; 3237 3238 // If the function takes variable number of arguments, make a frame index for 3239 // the start of the first vararg value... for expansion of llvm.va_start. 3240 if (isVarArg) { 3241 static const MCPhysReg GPArgRegs[] = { 3242 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3243 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3244 }; 3245 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3246 3247 static const MCPhysReg FPArgRegs[] = { 3248 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3249 PPC::F8 3250 }; 3251 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3252 3253 if (useSoftFloat()) 3254 NumFPArgRegs = 0; 3255 3256 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3257 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3258 3259 // Make room for NumGPArgRegs and NumFPArgRegs. 3260 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3261 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3262 3263 FuncInfo->setVarArgsStackOffset( 3264 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3265 CCInfo.getNextStackOffset(), true)); 3266 3267 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3268 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3269 3270 // The fixed integer arguments of a variadic function are stored to the 3271 // VarArgsFrameIndex on the stack so that they may be loaded by 3272 // dereferencing the result of va_next. 3273 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3274 // Get an existing live-in vreg, or add a new one. 3275 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3276 if (!VReg) 3277 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3278 3279 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3280 SDValue Store = 3281 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3282 MemOps.push_back(Store); 3283 // Increment the address by four for the next argument to store 3284 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3285 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3286 } 3287 3288 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3289 // is set. 3290 // The double arguments are stored to the VarArgsFrameIndex 3291 // on the stack. 3292 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3293 // Get an existing live-in vreg, or add a new one. 3294 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3295 if (!VReg) 3296 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3297 3298 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3299 SDValue Store = 3300 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3301 MemOps.push_back(Store); 3302 // Increment the address by eight for the next argument to store 3303 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3304 PtrVT); 3305 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3306 } 3307 } 3308 3309 if (!MemOps.empty()) 3310 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3311 3312 return Chain; 3313 } 3314 3315 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3316 // value to MVT::i64 and then truncate to the correct register size. 3317 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3318 EVT ObjectVT, SelectionDAG &DAG, 3319 SDValue ArgVal, 3320 const SDLoc &dl) const { 3321 if (Flags.isSExt()) 3322 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3323 DAG.getValueType(ObjectVT)); 3324 else if (Flags.isZExt()) 3325 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3326 DAG.getValueType(ObjectVT)); 3327 3328 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3329 } 3330 3331 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3332 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3333 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3334 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3335 // TODO: add description of PPC stack frame format, or at least some docs. 3336 // 3337 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3338 bool isLittleEndian = Subtarget.isLittleEndian(); 3339 MachineFunction &MF = DAG.getMachineFunction(); 3340 MachineFrameInfo &MFI = MF.getFrameInfo(); 3341 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3342 3343 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3344 "fastcc not supported on varargs functions"); 3345 3346 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3347 // Potential tail calls could cause overwriting of argument stack slots. 3348 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3349 (CallConv == CallingConv::Fast)); 3350 unsigned PtrByteSize = 8; 3351 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3352 3353 static const MCPhysReg GPR[] = { 3354 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3355 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3356 }; 3357 static const MCPhysReg VR[] = { 3358 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3359 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3360 }; 3361 3362 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3363 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3364 const unsigned Num_VR_Regs = array_lengthof(VR); 3365 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3366 3367 // Do a first pass over the arguments to determine whether the ABI 3368 // guarantees that our caller has allocated the parameter save area 3369 // on its stack frame. In the ELFv1 ABI, this is always the case; 3370 // in the ELFv2 ABI, it is true if this is a vararg function or if 3371 // any parameter is located in a stack slot. 3372 3373 bool HasParameterArea = !isELFv2ABI || isVarArg; 3374 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3375 unsigned NumBytes = LinkageSize; 3376 unsigned AvailableFPRs = Num_FPR_Regs; 3377 unsigned AvailableVRs = Num_VR_Regs; 3378 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3379 if (Ins[i].Flags.isNest()) 3380 continue; 3381 3382 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3383 PtrByteSize, LinkageSize, ParamAreaSize, 3384 NumBytes, AvailableFPRs, AvailableVRs, 3385 Subtarget.hasQPX())) 3386 HasParameterArea = true; 3387 } 3388 3389 // Add DAG nodes to load the arguments or copy them out of registers. On 3390 // entry to a function on PPC, the arguments start after the linkage area, 3391 // although the first ones are often in registers. 3392 3393 unsigned ArgOffset = LinkageSize; 3394 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3395 unsigned &QFPR_idx = FPR_idx; 3396 SmallVector<SDValue, 8> MemOps; 3397 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3398 unsigned CurArgIdx = 0; 3399 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3400 SDValue ArgVal; 3401 bool needsLoad = false; 3402 EVT ObjectVT = Ins[ArgNo].VT; 3403 EVT OrigVT = Ins[ArgNo].ArgVT; 3404 unsigned ObjSize = ObjectVT.getStoreSize(); 3405 unsigned ArgSize = ObjSize; 3406 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3407 if (Ins[ArgNo].isOrigArg()) { 3408 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3409 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3410 } 3411 // We re-align the argument offset for each argument, except when using the 3412 // fast calling convention, when we need to make sure we do that only when 3413 // we'll actually use a stack slot. 3414 unsigned CurArgOffset, Align; 3415 auto ComputeArgOffset = [&]() { 3416 /* Respect alignment of argument on the stack. */ 3417 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3418 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3419 CurArgOffset = ArgOffset; 3420 }; 3421 3422 if (CallConv != CallingConv::Fast) { 3423 ComputeArgOffset(); 3424 3425 /* Compute GPR index associated with argument offset. */ 3426 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3427 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3428 } 3429 3430 // FIXME the codegen can be much improved in some cases. 3431 // We do not have to keep everything in memory. 3432 if (Flags.isByVal()) { 3433 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3434 3435 if (CallConv == CallingConv::Fast) 3436 ComputeArgOffset(); 3437 3438 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3439 ObjSize = Flags.getByValSize(); 3440 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3441 // Empty aggregate parameters do not take up registers. Examples: 3442 // struct { } a; 3443 // union { } b; 3444 // int c[0]; 3445 // etc. However, we have to provide a place-holder in InVals, so 3446 // pretend we have an 8-byte item at the current address for that 3447 // purpose. 3448 if (!ObjSize) { 3449 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3450 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3451 InVals.push_back(FIN); 3452 continue; 3453 } 3454 3455 // Create a stack object covering all stack doublewords occupied 3456 // by the argument. If the argument is (fully or partially) on 3457 // the stack, or if the argument is fully in registers but the 3458 // caller has allocated the parameter save anyway, we can refer 3459 // directly to the caller's stack frame. Otherwise, create a 3460 // local copy in our own frame. 3461 int FI; 3462 if (HasParameterArea || 3463 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3464 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3465 else 3466 FI = MFI.CreateStackObject(ArgSize, Align, false); 3467 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3468 3469 // Handle aggregates smaller than 8 bytes. 3470 if (ObjSize < PtrByteSize) { 3471 // The value of the object is its address, which differs from the 3472 // address of the enclosing doubleword on big-endian systems. 3473 SDValue Arg = FIN; 3474 if (!isLittleEndian) { 3475 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3476 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3477 } 3478 InVals.push_back(Arg); 3479 3480 if (GPR_idx != Num_GPR_Regs) { 3481 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3482 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3483 SDValue Store; 3484 3485 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3486 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3487 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3488 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3489 MachinePointerInfo(&*FuncArg), ObjType); 3490 } else { 3491 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3492 // store the whole register as-is to the parameter save area 3493 // slot. 3494 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3495 MachinePointerInfo(&*FuncArg)); 3496 } 3497 3498 MemOps.push_back(Store); 3499 } 3500 // Whether we copied from a register or not, advance the offset 3501 // into the parameter save area by a full doubleword. 3502 ArgOffset += PtrByteSize; 3503 continue; 3504 } 3505 3506 // The value of the object is its address, which is the address of 3507 // its first stack doubleword. 3508 InVals.push_back(FIN); 3509 3510 // Store whatever pieces of the object are in registers to memory. 3511 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3512 if (GPR_idx == Num_GPR_Regs) 3513 break; 3514 3515 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3516 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3517 SDValue Addr = FIN; 3518 if (j) { 3519 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3520 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3521 } 3522 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3523 MachinePointerInfo(&*FuncArg, j)); 3524 MemOps.push_back(Store); 3525 ++GPR_idx; 3526 } 3527 ArgOffset += ArgSize; 3528 continue; 3529 } 3530 3531 switch (ObjectVT.getSimpleVT().SimpleTy) { 3532 default: llvm_unreachable("Unhandled argument type!"); 3533 case MVT::i1: 3534 case MVT::i32: 3535 case MVT::i64: 3536 if (Flags.isNest()) { 3537 // The 'nest' parameter, if any, is passed in R11. 3538 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3539 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3540 3541 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3542 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3543 3544 break; 3545 } 3546 3547 // These can be scalar arguments or elements of an integer array type 3548 // passed directly. Clang may use those instead of "byval" aggregate 3549 // types to avoid forcing arguments to memory unnecessarily. 3550 if (GPR_idx != Num_GPR_Regs) { 3551 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3552 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3553 3554 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3555 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3556 // value to MVT::i64 and then truncate to the correct register size. 3557 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3558 } else { 3559 if (CallConv == CallingConv::Fast) 3560 ComputeArgOffset(); 3561 3562 needsLoad = true; 3563 ArgSize = PtrByteSize; 3564 } 3565 if (CallConv != CallingConv::Fast || needsLoad) 3566 ArgOffset += 8; 3567 break; 3568 3569 case MVT::f32: 3570 case MVT::f64: 3571 // These can be scalar arguments or elements of a float array type 3572 // passed directly. The latter are used to implement ELFv2 homogenous 3573 // float aggregates. 3574 if (FPR_idx != Num_FPR_Regs) { 3575 unsigned VReg; 3576 3577 if (ObjectVT == MVT::f32) 3578 VReg = MF.addLiveIn(FPR[FPR_idx], 3579 Subtarget.hasP8Vector() 3580 ? &PPC::VSSRCRegClass 3581 : &PPC::F4RCRegClass); 3582 else 3583 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3584 ? &PPC::VSFRCRegClass 3585 : &PPC::F8RCRegClass); 3586 3587 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3588 ++FPR_idx; 3589 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3590 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3591 // once we support fp <-> gpr moves. 3592 3593 // This can only ever happen in the presence of f32 array types, 3594 // since otherwise we never run out of FPRs before running out 3595 // of GPRs. 3596 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3597 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3598 3599 if (ObjectVT == MVT::f32) { 3600 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3601 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3602 DAG.getConstant(32, dl, MVT::i32)); 3603 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3604 } 3605 3606 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3607 } else { 3608 if (CallConv == CallingConv::Fast) 3609 ComputeArgOffset(); 3610 3611 needsLoad = true; 3612 } 3613 3614 // When passing an array of floats, the array occupies consecutive 3615 // space in the argument area; only round up to the next doubleword 3616 // at the end of the array. Otherwise, each float takes 8 bytes. 3617 if (CallConv != CallingConv::Fast || needsLoad) { 3618 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3619 ArgOffset += ArgSize; 3620 if (Flags.isInConsecutiveRegsLast()) 3621 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3622 } 3623 break; 3624 case MVT::v4f32: 3625 case MVT::v4i32: 3626 case MVT::v8i16: 3627 case MVT::v16i8: 3628 case MVT::v2f64: 3629 case MVT::v2i64: 3630 case MVT::v1i128: 3631 if (!Subtarget.hasQPX()) { 3632 // These can be scalar arguments or elements of a vector array type 3633 // passed directly. The latter are used to implement ELFv2 homogenous 3634 // vector aggregates. 3635 if (VR_idx != Num_VR_Regs) { 3636 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3637 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3638 ++VR_idx; 3639 } else { 3640 if (CallConv == CallingConv::Fast) 3641 ComputeArgOffset(); 3642 3643 needsLoad = true; 3644 } 3645 if (CallConv != CallingConv::Fast || needsLoad) 3646 ArgOffset += 16; 3647 break; 3648 } // not QPX 3649 3650 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3651 "Invalid QPX parameter type"); 3652 /* fall through */ 3653 3654 case MVT::v4f64: 3655 case MVT::v4i1: 3656 // QPX vectors are treated like their scalar floating-point subregisters 3657 // (except that they're larger). 3658 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3659 if (QFPR_idx != Num_QFPR_Regs) { 3660 const TargetRegisterClass *RC; 3661 switch (ObjectVT.getSimpleVT().SimpleTy) { 3662 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3663 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3664 default: RC = &PPC::QBRCRegClass; break; 3665 } 3666 3667 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3668 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3669 ++QFPR_idx; 3670 } else { 3671 if (CallConv == CallingConv::Fast) 3672 ComputeArgOffset(); 3673 needsLoad = true; 3674 } 3675 if (CallConv != CallingConv::Fast || needsLoad) 3676 ArgOffset += Sz; 3677 break; 3678 } 3679 3680 // We need to load the argument to a virtual register if we determined 3681 // above that we ran out of physical registers of the appropriate type. 3682 if (needsLoad) { 3683 if (ObjSize < ArgSize && !isLittleEndian) 3684 CurArgOffset += ArgSize - ObjSize; 3685 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3686 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3687 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3688 } 3689 3690 InVals.push_back(ArgVal); 3691 } 3692 3693 // Area that is at least reserved in the caller of this function. 3694 unsigned MinReservedArea; 3695 if (HasParameterArea) 3696 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3697 else 3698 MinReservedArea = LinkageSize; 3699 3700 // Set the size that is at least reserved in caller of this function. Tail 3701 // call optimized functions' reserved stack space needs to be aligned so that 3702 // taking the difference between two stack areas will result in an aligned 3703 // stack. 3704 MinReservedArea = 3705 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3706 FuncInfo->setMinReservedArea(MinReservedArea); 3707 3708 // If the function takes variable number of arguments, make a frame index for 3709 // the start of the first vararg value... for expansion of llvm.va_start. 3710 if (isVarArg) { 3711 int Depth = ArgOffset; 3712 3713 FuncInfo->setVarArgsFrameIndex( 3714 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 3715 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3716 3717 // If this function is vararg, store any remaining integer argument regs 3718 // to their spots on the stack so that they may be loaded by dereferencing 3719 // the result of va_next. 3720 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3721 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3722 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3723 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3724 SDValue Store = 3725 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3726 MemOps.push_back(Store); 3727 // Increment the address by four for the next argument to store 3728 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3729 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3730 } 3731 } 3732 3733 if (!MemOps.empty()) 3734 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3735 3736 return Chain; 3737 } 3738 3739 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 3740 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3741 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3742 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3743 // TODO: add description of PPC stack frame format, or at least some docs. 3744 // 3745 MachineFunction &MF = DAG.getMachineFunction(); 3746 MachineFrameInfo &MFI = MF.getFrameInfo(); 3747 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3748 3749 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3750 bool isPPC64 = PtrVT == MVT::i64; 3751 // Potential tail calls could cause overwriting of argument stack slots. 3752 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3753 (CallConv == CallingConv::Fast)); 3754 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3755 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3756 unsigned ArgOffset = LinkageSize; 3757 // Area that is at least reserved in caller of this function. 3758 unsigned MinReservedArea = ArgOffset; 3759 3760 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3761 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3762 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3763 }; 3764 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3765 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3766 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3767 }; 3768 static const MCPhysReg VR[] = { 3769 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3770 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3771 }; 3772 3773 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3774 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3775 const unsigned Num_VR_Regs = array_lengthof( VR); 3776 3777 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3778 3779 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3780 3781 // In 32-bit non-varargs functions, the stack space for vectors is after the 3782 // stack space for non-vectors. We do not use this space unless we have 3783 // too many vectors to fit in registers, something that only occurs in 3784 // constructed examples:), but we have to walk the arglist to figure 3785 // that out...for the pathological case, compute VecArgOffset as the 3786 // start of the vector parameter area. Computing VecArgOffset is the 3787 // entire point of the following loop. 3788 unsigned VecArgOffset = ArgOffset; 3789 if (!isVarArg && !isPPC64) { 3790 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3791 ++ArgNo) { 3792 EVT ObjectVT = Ins[ArgNo].VT; 3793 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3794 3795 if (Flags.isByVal()) { 3796 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3797 unsigned ObjSize = Flags.getByValSize(); 3798 unsigned ArgSize = 3799 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3800 VecArgOffset += ArgSize; 3801 continue; 3802 } 3803 3804 switch(ObjectVT.getSimpleVT().SimpleTy) { 3805 default: llvm_unreachable("Unhandled argument type!"); 3806 case MVT::i1: 3807 case MVT::i32: 3808 case MVT::f32: 3809 VecArgOffset += 4; 3810 break; 3811 case MVT::i64: // PPC64 3812 case MVT::f64: 3813 // FIXME: We are guaranteed to be !isPPC64 at this point. 3814 // Does MVT::i64 apply? 3815 VecArgOffset += 8; 3816 break; 3817 case MVT::v4f32: 3818 case MVT::v4i32: 3819 case MVT::v8i16: 3820 case MVT::v16i8: 3821 // Nothing to do, we're only looking at Nonvector args here. 3822 break; 3823 } 3824 } 3825 } 3826 // We've found where the vector parameter area in memory is. Skip the 3827 // first 12 parameters; these don't use that memory. 3828 VecArgOffset = ((VecArgOffset+15)/16)*16; 3829 VecArgOffset += 12*16; 3830 3831 // Add DAG nodes to load the arguments or copy them out of registers. On 3832 // entry to a function on PPC, the arguments start after the linkage area, 3833 // although the first ones are often in registers. 3834 3835 SmallVector<SDValue, 8> MemOps; 3836 unsigned nAltivecParamsAtEnd = 0; 3837 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3838 unsigned CurArgIdx = 0; 3839 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3840 SDValue ArgVal; 3841 bool needsLoad = false; 3842 EVT ObjectVT = Ins[ArgNo].VT; 3843 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3844 unsigned ArgSize = ObjSize; 3845 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3846 if (Ins[ArgNo].isOrigArg()) { 3847 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3848 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3849 } 3850 unsigned CurArgOffset = ArgOffset; 3851 3852 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3853 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3854 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3855 if (isVarArg || isPPC64) { 3856 MinReservedArea = ((MinReservedArea+15)/16)*16; 3857 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3858 Flags, 3859 PtrByteSize); 3860 } else nAltivecParamsAtEnd++; 3861 } else 3862 // Calculate min reserved area. 3863 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3864 Flags, 3865 PtrByteSize); 3866 3867 // FIXME the codegen can be much improved in some cases. 3868 // We do not have to keep everything in memory. 3869 if (Flags.isByVal()) { 3870 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3871 3872 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3873 ObjSize = Flags.getByValSize(); 3874 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3875 // Objects of size 1 and 2 are right justified, everything else is 3876 // left justified. This means the memory address is adjusted forwards. 3877 if (ObjSize==1 || ObjSize==2) { 3878 CurArgOffset = CurArgOffset + (4 - ObjSize); 3879 } 3880 // The value of the object is its address. 3881 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 3882 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3883 InVals.push_back(FIN); 3884 if (ObjSize==1 || ObjSize==2) { 3885 if (GPR_idx != Num_GPR_Regs) { 3886 unsigned VReg; 3887 if (isPPC64) 3888 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3889 else 3890 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3891 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3892 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3893 SDValue Store = 3894 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3895 MachinePointerInfo(&*FuncArg), ObjType); 3896 MemOps.push_back(Store); 3897 ++GPR_idx; 3898 } 3899 3900 ArgOffset += PtrByteSize; 3901 3902 continue; 3903 } 3904 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3905 // Store whatever pieces of the object are in registers 3906 // to memory. ArgOffset will be the address of the beginning 3907 // of the object. 3908 if (GPR_idx != Num_GPR_Regs) { 3909 unsigned VReg; 3910 if (isPPC64) 3911 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3912 else 3913 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3914 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3915 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3916 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3917 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3918 MachinePointerInfo(&*FuncArg, j)); 3919 MemOps.push_back(Store); 3920 ++GPR_idx; 3921 ArgOffset += PtrByteSize; 3922 } else { 3923 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3924 break; 3925 } 3926 } 3927 continue; 3928 } 3929 3930 switch (ObjectVT.getSimpleVT().SimpleTy) { 3931 default: llvm_unreachable("Unhandled argument type!"); 3932 case MVT::i1: 3933 case MVT::i32: 3934 if (!isPPC64) { 3935 if (GPR_idx != Num_GPR_Regs) { 3936 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3937 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3938 3939 if (ObjectVT == MVT::i1) 3940 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3941 3942 ++GPR_idx; 3943 } else { 3944 needsLoad = true; 3945 ArgSize = PtrByteSize; 3946 } 3947 // All int arguments reserve stack space in the Darwin ABI. 3948 ArgOffset += PtrByteSize; 3949 break; 3950 } 3951 LLVM_FALLTHROUGH; 3952 case MVT::i64: // PPC64 3953 if (GPR_idx != Num_GPR_Regs) { 3954 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3955 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3956 3957 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3958 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3959 // value to MVT::i64 and then truncate to the correct register size. 3960 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3961 3962 ++GPR_idx; 3963 } else { 3964 needsLoad = true; 3965 ArgSize = PtrByteSize; 3966 } 3967 // All int arguments reserve stack space in the Darwin ABI. 3968 ArgOffset += 8; 3969 break; 3970 3971 case MVT::f32: 3972 case MVT::f64: 3973 // Every 4 bytes of argument space consumes one of the GPRs available for 3974 // argument passing. 3975 if (GPR_idx != Num_GPR_Regs) { 3976 ++GPR_idx; 3977 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3978 ++GPR_idx; 3979 } 3980 if (FPR_idx != Num_FPR_Regs) { 3981 unsigned VReg; 3982 3983 if (ObjectVT == MVT::f32) 3984 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3985 else 3986 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3987 3988 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3989 ++FPR_idx; 3990 } else { 3991 needsLoad = true; 3992 } 3993 3994 // All FP arguments reserve stack space in the Darwin ABI. 3995 ArgOffset += isPPC64 ? 8 : ObjSize; 3996 break; 3997 case MVT::v4f32: 3998 case MVT::v4i32: 3999 case MVT::v8i16: 4000 case MVT::v16i8: 4001 // Note that vector arguments in registers don't reserve stack space, 4002 // except in varargs functions. 4003 if (VR_idx != Num_VR_Regs) { 4004 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4005 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4006 if (isVarArg) { 4007 while ((ArgOffset % 16) != 0) { 4008 ArgOffset += PtrByteSize; 4009 if (GPR_idx != Num_GPR_Regs) 4010 GPR_idx++; 4011 } 4012 ArgOffset += 16; 4013 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4014 } 4015 ++VR_idx; 4016 } else { 4017 if (!isVarArg && !isPPC64) { 4018 // Vectors go after all the nonvectors. 4019 CurArgOffset = VecArgOffset; 4020 VecArgOffset += 16; 4021 } else { 4022 // Vectors are aligned. 4023 ArgOffset = ((ArgOffset+15)/16)*16; 4024 CurArgOffset = ArgOffset; 4025 ArgOffset += 16; 4026 } 4027 needsLoad = true; 4028 } 4029 break; 4030 } 4031 4032 // We need to load the argument to a virtual register if we determined above 4033 // that we ran out of physical registers of the appropriate type. 4034 if (needsLoad) { 4035 int FI = MFI.CreateFixedObject(ObjSize, 4036 CurArgOffset + (ArgSize - ObjSize), 4037 isImmutable); 4038 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4039 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4040 } 4041 4042 InVals.push_back(ArgVal); 4043 } 4044 4045 // Allow for Altivec parameters at the end, if needed. 4046 if (nAltivecParamsAtEnd) { 4047 MinReservedArea = ((MinReservedArea+15)/16)*16; 4048 MinReservedArea += 16*nAltivecParamsAtEnd; 4049 } 4050 4051 // Area that is at least reserved in the caller of this function. 4052 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4053 4054 // Set the size that is at least reserved in caller of this function. Tail 4055 // call optimized functions' reserved stack space needs to be aligned so that 4056 // taking the difference between two stack areas will result in an aligned 4057 // stack. 4058 MinReservedArea = 4059 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4060 FuncInfo->setMinReservedArea(MinReservedArea); 4061 4062 // If the function takes variable number of arguments, make a frame index for 4063 // the start of the first vararg value... for expansion of llvm.va_start. 4064 if (isVarArg) { 4065 int Depth = ArgOffset; 4066 4067 FuncInfo->setVarArgsFrameIndex( 4068 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4069 Depth, true)); 4070 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4071 4072 // If this function is vararg, store any remaining integer argument regs 4073 // to their spots on the stack so that they may be loaded by dereferencing 4074 // the result of va_next. 4075 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4076 unsigned VReg; 4077 4078 if (isPPC64) 4079 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4080 else 4081 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4082 4083 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4084 SDValue Store = 4085 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4086 MemOps.push_back(Store); 4087 // Increment the address by four for the next argument to store 4088 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4089 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4090 } 4091 } 4092 4093 if (!MemOps.empty()) 4094 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4095 4096 return Chain; 4097 } 4098 4099 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4100 /// adjusted to accommodate the arguments for the tailcall. 4101 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4102 unsigned ParamSize) { 4103 4104 if (!isTailCall) return 0; 4105 4106 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4107 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4108 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4109 // Remember only if the new adjustement is bigger. 4110 if (SPDiff < FI->getTailCallSPDelta()) 4111 FI->setTailCallSPDelta(SPDiff); 4112 4113 return SPDiff; 4114 } 4115 4116 static bool isFunctionGlobalAddress(SDValue Callee); 4117 4118 static bool 4119 resideInSameSection(const Function *Caller, SDValue Callee, 4120 const TargetMachine &TM) { 4121 // If !G, Callee can be an external symbol. 4122 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4123 if (!G) 4124 return false; 4125 4126 const GlobalValue *GV = G->getGlobal(); 4127 if (!GV->isStrongDefinitionForLinker()) 4128 return false; 4129 4130 // Any explicitly-specified sections and section prefixes must also match. 4131 // Also, if we're using -ffunction-sections, then each function is always in 4132 // a different section (the same is true for COMDAT functions). 4133 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4134 GV->getSection() != Caller->getSection()) 4135 return false; 4136 if (const auto *F = dyn_cast<Function>(GV)) { 4137 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4138 return false; 4139 } 4140 4141 // If the callee might be interposed, then we can't assume the ultimate call 4142 // target will be in the same section. Even in cases where we can assume that 4143 // interposition won't happen, in any case where the linker might insert a 4144 // stub to allow for interposition, we must generate code as though 4145 // interposition might occur. To understand why this matters, consider a 4146 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4147 // in the same section, but a is in a different module (i.e. has a different 4148 // TOC base pointer). If the linker allows for interposition between b and c, 4149 // then it will generate a stub for the call edge between b and c which will 4150 // save the TOC pointer into the designated stack slot allocated by b. If we 4151 // return true here, and therefore allow a tail call between b and c, that 4152 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4153 // pointer into the stack slot allocated by a (where the a -> b stub saved 4154 // a's TOC base pointer). If we're not considering a tail call, but rather, 4155 // whether a nop is needed after the call instruction in b, because the linker 4156 // will insert a stub, it might complain about a missing nop if we omit it 4157 // (although many don't complain in this case). 4158 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4159 return false; 4160 4161 return true; 4162 } 4163 4164 static bool 4165 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4166 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4167 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 4168 4169 const unsigned PtrByteSize = 8; 4170 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4171 4172 static const MCPhysReg GPR[] = { 4173 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4174 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4175 }; 4176 static const MCPhysReg VR[] = { 4177 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4178 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4179 }; 4180 4181 const unsigned NumGPRs = array_lengthof(GPR); 4182 const unsigned NumFPRs = 13; 4183 const unsigned NumVRs = array_lengthof(VR); 4184 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4185 4186 unsigned NumBytes = LinkageSize; 4187 unsigned AvailableFPRs = NumFPRs; 4188 unsigned AvailableVRs = NumVRs; 4189 4190 for (const ISD::OutputArg& Param : Outs) { 4191 if (Param.Flags.isNest()) continue; 4192 4193 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4194 PtrByteSize, LinkageSize, ParamAreaSize, 4195 NumBytes, AvailableFPRs, AvailableVRs, 4196 Subtarget.hasQPX())) 4197 return true; 4198 } 4199 return false; 4200 } 4201 4202 static bool 4203 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS) { 4204 if (CS->arg_size() != CallerFn->arg_size()) 4205 return false; 4206 4207 ImmutableCallSite::arg_iterator CalleeArgIter = CS->arg_begin(); 4208 ImmutableCallSite::arg_iterator CalleeArgEnd = CS->arg_end(); 4209 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4210 4211 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4212 const Value* CalleeArg = *CalleeArgIter; 4213 const Value* CallerArg = &(*CallerArgIter); 4214 if (CalleeArg == CallerArg) 4215 continue; 4216 4217 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4218 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4219 // } 4220 // 1st argument of callee is undef and has the same type as caller. 4221 if (CalleeArg->getType() == CallerArg->getType() && 4222 isa<UndefValue>(CalleeArg)) 4223 continue; 4224 4225 return false; 4226 } 4227 4228 return true; 4229 } 4230 4231 bool 4232 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4233 SDValue Callee, 4234 CallingConv::ID CalleeCC, 4235 ImmutableCallSite *CS, 4236 bool isVarArg, 4237 const SmallVectorImpl<ISD::OutputArg> &Outs, 4238 const SmallVectorImpl<ISD::InputArg> &Ins, 4239 SelectionDAG& DAG) const { 4240 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4241 4242 if (DisableSCO && !TailCallOpt) return false; 4243 4244 // Variadic argument functions are not supported. 4245 if (isVarArg) return false; 4246 4247 MachineFunction &MF = DAG.getMachineFunction(); 4248 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4249 4250 // Tail or Sibling call optimization (TCO/SCO) needs callee and caller has 4251 // the same calling convention 4252 if (CallerCC != CalleeCC) return false; 4253 4254 // SCO support C calling convention 4255 if (CalleeCC != CallingConv::Fast && CalleeCC != CallingConv::C) 4256 return false; 4257 4258 // Caller contains any byval parameter is not supported. 4259 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4260 return false; 4261 4262 // Callee contains any byval parameter is not supported, too. 4263 // Note: This is a quick work around, because in some cases, e.g. 4264 // caller's stack size > callee's stack size, we are still able to apply 4265 // sibling call optimization. See: https://reviews.llvm.org/D23441#513574 4266 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4267 return false; 4268 4269 // No TCO/SCO on indirect call because Caller have to restore its TOC 4270 if (!isFunctionGlobalAddress(Callee) && 4271 !isa<ExternalSymbolSDNode>(Callee)) 4272 return false; 4273 4274 // Check if Callee resides in the same section, because for now, PPC64 SVR4 4275 // ABI (ELFv1/ELFv2) doesn't allow tail calls to a symbol resides in another 4276 // section. 4277 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4278 if (!resideInSameSection(MF.getFunction(), Callee, getTargetMachine())) 4279 return false; 4280 4281 // TCO allows altering callee ABI, so we don't have to check further. 4282 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4283 return true; 4284 4285 if (DisableSCO) return false; 4286 4287 // If callee use the same argument list that caller is using, then we can 4288 // apply SCO on this case. If it is not, then we need to check if callee needs 4289 // stack for passing arguments. 4290 if (!hasSameArgumentList(MF.getFunction(), CS) && 4291 needStackSlotPassParameters(Subtarget, Outs)) { 4292 return false; 4293 } 4294 4295 return true; 4296 } 4297 4298 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4299 /// for tail call optimization. Targets which want to do tail call 4300 /// optimization should implement this function. 4301 bool 4302 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4303 CallingConv::ID CalleeCC, 4304 bool isVarArg, 4305 const SmallVectorImpl<ISD::InputArg> &Ins, 4306 SelectionDAG& DAG) const { 4307 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4308 return false; 4309 4310 // Variable argument functions are not supported. 4311 if (isVarArg) 4312 return false; 4313 4314 MachineFunction &MF = DAG.getMachineFunction(); 4315 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4316 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4317 // Functions containing by val parameters are not supported. 4318 for (unsigned i = 0; i != Ins.size(); i++) { 4319 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4320 if (Flags.isByVal()) return false; 4321 } 4322 4323 // Non-PIC/GOT tail calls are supported. 4324 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4325 return true; 4326 4327 // At the moment we can only do local tail calls (in same module, hidden 4328 // or protected) if we are generating PIC. 4329 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4330 return G->getGlobal()->hasHiddenVisibility() 4331 || G->getGlobal()->hasProtectedVisibility(); 4332 } 4333 4334 return false; 4335 } 4336 4337 /// isCallCompatibleAddress - Return the immediate to use if the specified 4338 /// 32-bit value is representable in the immediate field of a BxA instruction. 4339 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4340 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4341 if (!C) return nullptr; 4342 4343 int Addr = C->getZExtValue(); 4344 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4345 SignExtend32<26>(Addr) != Addr) 4346 return nullptr; // Top 6 bits have to be sext of immediate. 4347 4348 return DAG 4349 .getConstant( 4350 (int)C->getZExtValue() >> 2, SDLoc(Op), 4351 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4352 .getNode(); 4353 } 4354 4355 namespace { 4356 4357 struct TailCallArgumentInfo { 4358 SDValue Arg; 4359 SDValue FrameIdxOp; 4360 int FrameIdx = 0; 4361 4362 TailCallArgumentInfo() = default; 4363 }; 4364 4365 } // end anonymous namespace 4366 4367 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4368 static void StoreTailCallArgumentsToStackSlot( 4369 SelectionDAG &DAG, SDValue Chain, 4370 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4371 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4372 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4373 SDValue Arg = TailCallArgs[i].Arg; 4374 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4375 int FI = TailCallArgs[i].FrameIdx; 4376 // Store relative to framepointer. 4377 MemOpChains.push_back(DAG.getStore( 4378 Chain, dl, Arg, FIN, 4379 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4380 } 4381 } 4382 4383 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4384 /// the appropriate stack slot for the tail call optimized function call. 4385 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4386 SDValue OldRetAddr, SDValue OldFP, 4387 int SPDiff, const SDLoc &dl) { 4388 if (SPDiff) { 4389 // Calculate the new stack slot for the return address. 4390 MachineFunction &MF = DAG.getMachineFunction(); 4391 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4392 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4393 bool isPPC64 = Subtarget.isPPC64(); 4394 int SlotSize = isPPC64 ? 8 : 4; 4395 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4396 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4397 NewRetAddrLoc, true); 4398 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4399 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4400 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4401 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4402 4403 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4404 // slot as the FP is never overwritten. 4405 if (Subtarget.isDarwinABI()) { 4406 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4407 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4408 true); 4409 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4410 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4411 MachinePointerInfo::getFixedStack( 4412 DAG.getMachineFunction(), NewFPIdx)); 4413 } 4414 } 4415 return Chain; 4416 } 4417 4418 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4419 /// the position of the argument. 4420 static void 4421 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4422 SDValue Arg, int SPDiff, unsigned ArgOffset, 4423 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4424 int Offset = ArgOffset + SPDiff; 4425 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4426 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4427 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4428 SDValue FIN = DAG.getFrameIndex(FI, VT); 4429 TailCallArgumentInfo Info; 4430 Info.Arg = Arg; 4431 Info.FrameIdxOp = FIN; 4432 Info.FrameIdx = FI; 4433 TailCallArguments.push_back(Info); 4434 } 4435 4436 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4437 /// stack slot. Returns the chain as result and the loaded frame pointers in 4438 /// LROpOut/FPOpout. Used when tail calling. 4439 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4440 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4441 SDValue &FPOpOut, const SDLoc &dl) const { 4442 if (SPDiff) { 4443 // Load the LR and FP stack slot for later adjusting. 4444 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4445 LROpOut = getReturnAddrFrameIndex(DAG); 4446 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4447 Chain = SDValue(LROpOut.getNode(), 1); 4448 4449 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4450 // slot as the FP is never overwritten. 4451 if (Subtarget.isDarwinABI()) { 4452 FPOpOut = getFramePointerFrameIndex(DAG); 4453 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4454 Chain = SDValue(FPOpOut.getNode(), 1); 4455 } 4456 } 4457 return Chain; 4458 } 4459 4460 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4461 /// by "Src" to address "Dst" of size "Size". Alignment information is 4462 /// specified by the specific parameter attribute. The copy will be passed as 4463 /// a byval function parameter. 4464 /// Sometimes what we are copying is the end of a larger object, the part that 4465 /// does not fit in registers. 4466 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4467 SDValue Chain, ISD::ArgFlagsTy Flags, 4468 SelectionDAG &DAG, const SDLoc &dl) { 4469 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4470 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4471 false, false, false, MachinePointerInfo(), 4472 MachinePointerInfo()); 4473 } 4474 4475 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4476 /// tail calls. 4477 static void LowerMemOpCallTo( 4478 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4479 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4480 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4481 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4482 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4483 if (!isTailCall) { 4484 if (isVector) { 4485 SDValue StackPtr; 4486 if (isPPC64) 4487 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4488 else 4489 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4490 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4491 DAG.getConstant(ArgOffset, dl, PtrVT)); 4492 } 4493 MemOpChains.push_back( 4494 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4495 // Calculate and remember argument location. 4496 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4497 TailCallArguments); 4498 } 4499 4500 static void 4501 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4502 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4503 SDValue FPOp, 4504 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4505 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4506 // might overwrite each other in case of tail call optimization. 4507 SmallVector<SDValue, 8> MemOpChains2; 4508 // Do not flag preceding copytoreg stuff together with the following stuff. 4509 InFlag = SDValue(); 4510 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4511 MemOpChains2, dl); 4512 if (!MemOpChains2.empty()) 4513 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4514 4515 // Store the return address to the appropriate stack slot. 4516 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4517 4518 // Emit callseq_end just before tailcall node. 4519 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4520 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4521 InFlag = Chain.getValue(1); 4522 } 4523 4524 // Is this global address that of a function that can be called by name? (as 4525 // opposed to something that must hold a descriptor for an indirect call). 4526 static bool isFunctionGlobalAddress(SDValue Callee) { 4527 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4528 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4529 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4530 return false; 4531 4532 return G->getGlobal()->getValueType()->isFunctionTy(); 4533 } 4534 4535 return false; 4536 } 4537 4538 static unsigned 4539 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4540 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4541 bool isPatchPoint, bool hasNest, 4542 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4543 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4544 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4545 bool isPPC64 = Subtarget.isPPC64(); 4546 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4547 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4548 4549 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4550 NodeTys.push_back(MVT::Other); // Returns a chain 4551 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4552 4553 unsigned CallOpc = PPCISD::CALL; 4554 4555 bool needIndirectCall = true; 4556 if (!isSVR4ABI || !isPPC64) 4557 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4558 // If this is an absolute destination address, use the munged value. 4559 Callee = SDValue(Dest, 0); 4560 needIndirectCall = false; 4561 } 4562 4563 // PC-relative references to external symbols should go through $stub, unless 4564 // we're building with the leopard linker or later, which automatically 4565 // synthesizes these stubs. 4566 const TargetMachine &TM = DAG.getTarget(); 4567 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent(); 4568 const GlobalValue *GV = nullptr; 4569 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4570 GV = G->getGlobal(); 4571 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4572 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4573 4574 if (isFunctionGlobalAddress(Callee)) { 4575 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4576 // A call to a TLS address is actually an indirect call to a 4577 // thread-specific pointer. 4578 unsigned OpFlags = 0; 4579 if (UsePlt) 4580 OpFlags = PPCII::MO_PLT; 4581 4582 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4583 // every direct call is) turn it into a TargetGlobalAddress / 4584 // TargetExternalSymbol node so that legalize doesn't hack it. 4585 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4586 Callee.getValueType(), 0, OpFlags); 4587 needIndirectCall = false; 4588 } 4589 4590 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4591 unsigned char OpFlags = 0; 4592 4593 if (UsePlt) 4594 OpFlags = PPCII::MO_PLT; 4595 4596 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4597 OpFlags); 4598 needIndirectCall = false; 4599 } 4600 4601 if (isPatchPoint) { 4602 // We'll form an invalid direct call when lowering a patchpoint; the full 4603 // sequence for an indirect call is complicated, and many of the 4604 // instructions introduced might have side effects (and, thus, can't be 4605 // removed later). The call itself will be removed as soon as the 4606 // argument/return lowering is complete, so the fact that it has the wrong 4607 // kind of operands should not really matter. 4608 needIndirectCall = false; 4609 } 4610 4611 if (needIndirectCall) { 4612 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4613 // to do the call, we can't use PPCISD::CALL. 4614 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4615 4616 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4617 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4618 // entry point, but to the function descriptor (the function entry point 4619 // address is part of the function descriptor though). 4620 // The function descriptor is a three doubleword structure with the 4621 // following fields: function entry point, TOC base address and 4622 // environment pointer. 4623 // Thus for a call through a function pointer, the following actions need 4624 // to be performed: 4625 // 1. Save the TOC of the caller in the TOC save area of its stack 4626 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4627 // 2. Load the address of the function entry point from the function 4628 // descriptor. 4629 // 3. Load the TOC of the callee from the function descriptor into r2. 4630 // 4. Load the environment pointer from the function descriptor into 4631 // r11. 4632 // 5. Branch to the function entry point address. 4633 // 6. On return of the callee, the TOC of the caller needs to be 4634 // restored (this is done in FinishCall()). 4635 // 4636 // The loads are scheduled at the beginning of the call sequence, and the 4637 // register copies are flagged together to ensure that no other 4638 // operations can be scheduled in between. E.g. without flagging the 4639 // copies together, a TOC access in the caller could be scheduled between 4640 // the assignment of the callee TOC and the branch to the callee, which 4641 // results in the TOC access going through the TOC of the callee instead 4642 // of going through the TOC of the caller, which leads to incorrect code. 4643 4644 // Load the address of the function entry point from the function 4645 // descriptor. 4646 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4647 if (LDChain.getValueType() == MVT::Glue) 4648 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4649 4650 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 4651 ? (MachineMemOperand::MODereferenceable | 4652 MachineMemOperand::MOInvariant) 4653 : MachineMemOperand::MONone; 4654 4655 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4656 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4657 /* Alignment = */ 8, MMOFlags); 4658 4659 // Load environment pointer into r11. 4660 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4661 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4662 SDValue LoadEnvPtr = 4663 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 4664 /* Alignment = */ 8, MMOFlags); 4665 4666 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4667 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4668 SDValue TOCPtr = 4669 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 4670 /* Alignment = */ 8, MMOFlags); 4671 4672 setUsesTOCBasePtr(DAG); 4673 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4674 InFlag); 4675 Chain = TOCVal.getValue(0); 4676 InFlag = TOCVal.getValue(1); 4677 4678 // If the function call has an explicit 'nest' parameter, it takes the 4679 // place of the environment pointer. 4680 if (!hasNest) { 4681 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4682 InFlag); 4683 4684 Chain = EnvVal.getValue(0); 4685 InFlag = EnvVal.getValue(1); 4686 } 4687 4688 MTCTROps[0] = Chain; 4689 MTCTROps[1] = LoadFuncPtr; 4690 MTCTROps[2] = InFlag; 4691 } 4692 4693 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4694 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4695 InFlag = Chain.getValue(1); 4696 4697 NodeTys.clear(); 4698 NodeTys.push_back(MVT::Other); 4699 NodeTys.push_back(MVT::Glue); 4700 Ops.push_back(Chain); 4701 CallOpc = PPCISD::BCTRL; 4702 Callee.setNode(nullptr); 4703 // Add use of X11 (holding environment pointer) 4704 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4705 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4706 // Add CTR register as callee so a bctr can be emitted later. 4707 if (isTailCall) 4708 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4709 } 4710 4711 // If this is a direct call, pass the chain and the callee. 4712 if (Callee.getNode()) { 4713 Ops.push_back(Chain); 4714 Ops.push_back(Callee); 4715 } 4716 // If this is a tail call add stack pointer delta. 4717 if (isTailCall) 4718 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4719 4720 // Add argument registers to the end of the list so that they are known live 4721 // into the call. 4722 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4723 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4724 RegsToPass[i].second.getValueType())); 4725 4726 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4727 // into the call. 4728 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 4729 setUsesTOCBasePtr(DAG); 4730 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4731 } 4732 4733 return CallOpc; 4734 } 4735 4736 SDValue PPCTargetLowering::LowerCallResult( 4737 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4738 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4739 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4740 SmallVector<CCValAssign, 16> RVLocs; 4741 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4742 *DAG.getContext()); 4743 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4744 4745 // Copy all of the result registers out of their specified physreg. 4746 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4747 CCValAssign &VA = RVLocs[i]; 4748 assert(VA.isRegLoc() && "Can only return in registers!"); 4749 4750 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4751 VA.getLocReg(), VA.getLocVT(), InFlag); 4752 Chain = Val.getValue(1); 4753 InFlag = Val.getValue(2); 4754 4755 switch (VA.getLocInfo()) { 4756 default: llvm_unreachable("Unknown loc info!"); 4757 case CCValAssign::Full: break; 4758 case CCValAssign::AExt: 4759 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4760 break; 4761 case CCValAssign::ZExt: 4762 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4763 DAG.getValueType(VA.getValVT())); 4764 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4765 break; 4766 case CCValAssign::SExt: 4767 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4768 DAG.getValueType(VA.getValVT())); 4769 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4770 break; 4771 } 4772 4773 InVals.push_back(Val); 4774 } 4775 4776 return Chain; 4777 } 4778 4779 SDValue PPCTargetLowering::FinishCall( 4780 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 4781 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 4782 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 4783 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 4784 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 4785 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite *CS) const { 4786 std::vector<EVT> NodeTys; 4787 SmallVector<SDValue, 8> Ops; 4788 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4789 SPDiff, isTailCall, isPatchPoint, hasNest, 4790 RegsToPass, Ops, NodeTys, CS, Subtarget); 4791 4792 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4793 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4794 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4795 4796 // When performing tail call optimization the callee pops its arguments off 4797 // the stack. Account for this here so these bytes can be pushed back on in 4798 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4799 int BytesCalleePops = 4800 (CallConv == CallingConv::Fast && 4801 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4802 4803 // Add a register mask operand representing the call-preserved registers. 4804 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4805 const uint32_t *Mask = 4806 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4807 assert(Mask && "Missing call preserved mask for calling convention"); 4808 Ops.push_back(DAG.getRegisterMask(Mask)); 4809 4810 if (InFlag.getNode()) 4811 Ops.push_back(InFlag); 4812 4813 // Emit tail call. 4814 if (isTailCall) { 4815 assert(((Callee.getOpcode() == ISD::Register && 4816 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4817 Callee.getOpcode() == ISD::TargetExternalSymbol || 4818 Callee.getOpcode() == ISD::TargetGlobalAddress || 4819 isa<ConstantSDNode>(Callee)) && 4820 "Expecting an global address, external symbol, absolute value or register"); 4821 4822 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 4823 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4824 } 4825 4826 // Add a NOP immediately after the branch instruction when using the 64-bit 4827 // SVR4 ABI. At link time, if caller and callee are in a different module and 4828 // thus have a different TOC, the call will be replaced with a call to a stub 4829 // function which saves the current TOC, loads the TOC of the callee and 4830 // branches to the callee. The NOP will be replaced with a load instruction 4831 // which restores the TOC of the caller from the TOC save slot of the current 4832 // stack frame. If caller and callee belong to the same module (and have the 4833 // same TOC), the NOP will remain unchanged. 4834 4835 MachineFunction &MF = DAG.getMachineFunction(); 4836 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4837 !isPatchPoint) { 4838 if (CallOpc == PPCISD::BCTRL) { 4839 // This is a call through a function pointer. 4840 // Restore the caller TOC from the save area into R2. 4841 // See PrepareCall() for more information about calls through function 4842 // pointers in the 64-bit SVR4 ABI. 4843 // We are using a target-specific load with r2 hard coded, because the 4844 // result of a target-independent load would never go directly into r2, 4845 // since r2 is a reserved register (which prevents the register allocator 4846 // from allocating it), resulting in an additional register being 4847 // allocated and an unnecessary move instruction being generated. 4848 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4849 4850 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 4851 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4852 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4853 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4854 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4855 4856 // The address needs to go after the chain input but before the flag (or 4857 // any other variadic arguments). 4858 Ops.insert(std::next(Ops.begin()), AddTOC); 4859 } else if (CallOpc == PPCISD::CALL && 4860 !resideInSameSection(MF.getFunction(), Callee, DAG.getTarget())) { 4861 // Otherwise insert NOP for non-local calls. 4862 CallOpc = PPCISD::CALL_NOP; 4863 } 4864 } 4865 4866 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4867 InFlag = Chain.getValue(1); 4868 4869 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4870 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4871 InFlag, dl); 4872 if (!Ins.empty()) 4873 InFlag = Chain.getValue(1); 4874 4875 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4876 Ins, dl, DAG, InVals); 4877 } 4878 4879 SDValue 4880 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4881 SmallVectorImpl<SDValue> &InVals) const { 4882 SelectionDAG &DAG = CLI.DAG; 4883 SDLoc &dl = CLI.DL; 4884 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4885 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4886 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4887 SDValue Chain = CLI.Chain; 4888 SDValue Callee = CLI.Callee; 4889 bool &isTailCall = CLI.IsTailCall; 4890 CallingConv::ID CallConv = CLI.CallConv; 4891 bool isVarArg = CLI.IsVarArg; 4892 bool isPatchPoint = CLI.IsPatchPoint; 4893 ImmutableCallSite *CS = CLI.CS; 4894 4895 if (isTailCall) { 4896 if (Subtarget.useLongCalls() && !(CS && CS->isMustTailCall())) 4897 isTailCall = false; 4898 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 4899 isTailCall = 4900 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 4901 isVarArg, Outs, Ins, DAG); 4902 else 4903 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4904 Ins, DAG); 4905 if (isTailCall) { 4906 ++NumTailCalls; 4907 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4908 ++NumSiblingCalls; 4909 4910 assert(isa<GlobalAddressSDNode>(Callee) && 4911 "Callee should be an llvm::Function object."); 4912 DEBUG( 4913 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 4914 const unsigned Width = 80 - strlen("TCO caller: ") 4915 - strlen(", callee linkage: 0, 0"); 4916 dbgs() << "TCO caller: " 4917 << left_justify(DAG.getMachineFunction().getName(), Width) 4918 << ", callee linkage: " 4919 << GV->getVisibility() << ", " << GV->getLinkage() << "\n" 4920 ); 4921 } 4922 } 4923 4924 if (!isTailCall && CS && CS->isMustTailCall()) 4925 report_fatal_error("failed to perform tail call elimination on a call " 4926 "site marked musttail"); 4927 4928 // When long calls (i.e. indirect calls) are always used, calls are always 4929 // made via function pointer. If we have a function name, first translate it 4930 // into a pointer. 4931 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 4932 !isTailCall) 4933 Callee = LowerGlobalAddress(Callee, DAG); 4934 4935 if (Subtarget.isSVR4ABI()) { 4936 if (Subtarget.isPPC64()) 4937 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4938 isTailCall, isPatchPoint, Outs, OutVals, Ins, 4939 dl, DAG, InVals, CS); 4940 else 4941 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4942 isTailCall, isPatchPoint, Outs, OutVals, Ins, 4943 dl, DAG, InVals, CS); 4944 } 4945 4946 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4947 isTailCall, isPatchPoint, Outs, OutVals, Ins, 4948 dl, DAG, InVals, CS); 4949 } 4950 4951 SDValue PPCTargetLowering::LowerCall_32SVR4( 4952 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 4953 bool isTailCall, bool isPatchPoint, 4954 const SmallVectorImpl<ISD::OutputArg> &Outs, 4955 const SmallVectorImpl<SDValue> &OutVals, 4956 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4957 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 4958 ImmutableCallSite *CS) const { 4959 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4960 // of the 32-bit SVR4 ABI stack frame layout. 4961 4962 assert((CallConv == CallingConv::C || 4963 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4964 4965 unsigned PtrByteSize = 4; 4966 4967 MachineFunction &MF = DAG.getMachineFunction(); 4968 4969 // Mark this function as potentially containing a function that contains a 4970 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4971 // and restoring the callers stack pointer in this functions epilog. This is 4972 // done because by tail calling the called function might overwrite the value 4973 // in this function's (MF) stack pointer stack slot 0(SP). 4974 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4975 CallConv == CallingConv::Fast) 4976 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4977 4978 // Count how many bytes are to be pushed on the stack, including the linkage 4979 // area, parameter list area and the part of the local variable space which 4980 // contains copies of aggregates which are passed by value. 4981 4982 // Assign locations to all of the outgoing arguments. 4983 SmallVector<CCValAssign, 16> ArgLocs; 4984 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 4985 4986 // Reserve space for the linkage area on the stack. 4987 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4988 PtrByteSize); 4989 if (useSoftFloat()) 4990 CCInfo.PreAnalyzeCallOperands(Outs); 4991 4992 if (isVarArg) { 4993 // Handle fixed and variable vector arguments differently. 4994 // Fixed vector arguments go into registers as long as registers are 4995 // available. Variable vector arguments always go into memory. 4996 unsigned NumArgs = Outs.size(); 4997 4998 for (unsigned i = 0; i != NumArgs; ++i) { 4999 MVT ArgVT = Outs[i].VT; 5000 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5001 bool Result; 5002 5003 if (Outs[i].IsFixed) { 5004 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5005 CCInfo); 5006 } else { 5007 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5008 ArgFlags, CCInfo); 5009 } 5010 5011 if (Result) { 5012 #ifndef NDEBUG 5013 errs() << "Call operand #" << i << " has unhandled type " 5014 << EVT(ArgVT).getEVTString() << "\n"; 5015 #endif 5016 llvm_unreachable(nullptr); 5017 } 5018 } 5019 } else { 5020 // All arguments are treated the same. 5021 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5022 } 5023 CCInfo.clearWasPPCF128(); 5024 5025 // Assign locations to all of the outgoing aggregate by value arguments. 5026 SmallVector<CCValAssign, 16> ByValArgLocs; 5027 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5028 5029 // Reserve stack space for the allocations in CCInfo. 5030 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5031 5032 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5033 5034 // Size of the linkage area, parameter list area and the part of the local 5035 // space variable where copies of aggregates which are passed by value are 5036 // stored. 5037 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5038 5039 // Calculate by how many bytes the stack has to be adjusted in case of tail 5040 // call optimization. 5041 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5042 5043 // Adjust the stack pointer for the new arguments... 5044 // These operations are automatically eliminated by the prolog/epilog pass 5045 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5046 SDValue CallSeqStart = Chain; 5047 5048 // Load the return address and frame pointer so it can be moved somewhere else 5049 // later. 5050 SDValue LROp, FPOp; 5051 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5052 5053 // Set up a copy of the stack pointer for use loading and storing any 5054 // arguments that may not fit in the registers available for argument 5055 // passing. 5056 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5057 5058 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5059 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5060 SmallVector<SDValue, 8> MemOpChains; 5061 5062 bool seenFloatArg = false; 5063 // Walk the register/memloc assignments, inserting copies/loads. 5064 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 5065 i != e; 5066 ++i) { 5067 CCValAssign &VA = ArgLocs[i]; 5068 SDValue Arg = OutVals[i]; 5069 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5070 5071 if (Flags.isByVal()) { 5072 // Argument is an aggregate which is passed by value, thus we need to 5073 // create a copy of it in the local variable space of the current stack 5074 // frame (which is the stack frame of the caller) and pass the address of 5075 // this copy to the callee. 5076 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5077 CCValAssign &ByValVA = ByValArgLocs[j++]; 5078 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5079 5080 // Memory reserved in the local variable space of the callers stack frame. 5081 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5082 5083 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5084 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5085 StackPtr, PtrOff); 5086 5087 // Create a copy of the argument in the local area of the current 5088 // stack frame. 5089 SDValue MemcpyCall = 5090 CreateCopyOfByValArgument(Arg, PtrOff, 5091 CallSeqStart.getNode()->getOperand(0), 5092 Flags, DAG, dl); 5093 5094 // This must go outside the CALLSEQ_START..END. 5095 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5096 SDLoc(MemcpyCall)); 5097 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5098 NewCallSeqStart.getNode()); 5099 Chain = CallSeqStart = NewCallSeqStart; 5100 5101 // Pass the address of the aggregate copy on the stack either in a 5102 // physical register or in the parameter list area of the current stack 5103 // frame to the callee. 5104 Arg = PtrOff; 5105 } 5106 5107 if (VA.isRegLoc()) { 5108 if (Arg.getValueType() == MVT::i1) 5109 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 5110 5111 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5112 // Put argument in a physical register. 5113 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5114 } else { 5115 // Put argument in the parameter list area of the current stack frame. 5116 assert(VA.isMemLoc()); 5117 unsigned LocMemOffset = VA.getLocMemOffset(); 5118 5119 if (!isTailCall) { 5120 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5121 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5122 StackPtr, PtrOff); 5123 5124 MemOpChains.push_back( 5125 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5126 } else { 5127 // Calculate and remember argument location. 5128 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5129 TailCallArguments); 5130 } 5131 } 5132 } 5133 5134 if (!MemOpChains.empty()) 5135 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5136 5137 // Build a sequence of copy-to-reg nodes chained together with token chain 5138 // and flag operands which copy the outgoing args into the appropriate regs. 5139 SDValue InFlag; 5140 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5141 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5142 RegsToPass[i].second, InFlag); 5143 InFlag = Chain.getValue(1); 5144 } 5145 5146 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5147 // registers. 5148 if (isVarArg) { 5149 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5150 SDValue Ops[] = { Chain, InFlag }; 5151 5152 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5153 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5154 5155 InFlag = Chain.getValue(1); 5156 } 5157 5158 if (isTailCall) 5159 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5160 TailCallArguments); 5161 5162 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5163 /* unused except on PPC64 ELFv1 */ false, DAG, 5164 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5165 NumBytes, Ins, InVals, CS); 5166 } 5167 5168 // Copy an argument into memory, being careful to do this outside the 5169 // call sequence for the call to which the argument belongs. 5170 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5171 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5172 SelectionDAG &DAG, const SDLoc &dl) const { 5173 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5174 CallSeqStart.getNode()->getOperand(0), 5175 Flags, DAG, dl); 5176 // The MEMCPY must go outside the CALLSEQ_START..END. 5177 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5178 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5179 SDLoc(MemcpyCall)); 5180 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5181 NewCallSeqStart.getNode()); 5182 return NewCallSeqStart; 5183 } 5184 5185 SDValue PPCTargetLowering::LowerCall_64SVR4( 5186 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5187 bool isTailCall, bool isPatchPoint, 5188 const SmallVectorImpl<ISD::OutputArg> &Outs, 5189 const SmallVectorImpl<SDValue> &OutVals, 5190 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5191 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5192 ImmutableCallSite *CS) const { 5193 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5194 bool isLittleEndian = Subtarget.isLittleEndian(); 5195 unsigned NumOps = Outs.size(); 5196 bool hasNest = false; 5197 bool IsSibCall = false; 5198 5199 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5200 unsigned PtrByteSize = 8; 5201 5202 MachineFunction &MF = DAG.getMachineFunction(); 5203 5204 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5205 IsSibCall = true; 5206 5207 // Mark this function as potentially containing a function that contains a 5208 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5209 // and restoring the callers stack pointer in this functions epilog. This is 5210 // done because by tail calling the called function might overwrite the value 5211 // in this function's (MF) stack pointer stack slot 0(SP). 5212 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5213 CallConv == CallingConv::Fast) 5214 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5215 5216 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5217 "fastcc not supported on varargs functions"); 5218 5219 // Count how many bytes are to be pushed on the stack, including the linkage 5220 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5221 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5222 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5223 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5224 unsigned NumBytes = LinkageSize; 5225 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5226 unsigned &QFPR_idx = FPR_idx; 5227 5228 static const MCPhysReg GPR[] = { 5229 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5230 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5231 }; 5232 static const MCPhysReg VR[] = { 5233 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5234 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5235 }; 5236 5237 const unsigned NumGPRs = array_lengthof(GPR); 5238 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5239 const unsigned NumVRs = array_lengthof(VR); 5240 const unsigned NumQFPRs = NumFPRs; 5241 5242 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5243 // can be passed to the callee in registers. 5244 // For the fast calling convention, there is another check below. 5245 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5246 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5247 if (!HasParameterArea) { 5248 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5249 unsigned AvailableFPRs = NumFPRs; 5250 unsigned AvailableVRs = NumVRs; 5251 unsigned NumBytesTmp = NumBytes; 5252 for (unsigned i = 0; i != NumOps; ++i) { 5253 if (Outs[i].Flags.isNest()) continue; 5254 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5255 PtrByteSize, LinkageSize, ParamAreaSize, 5256 NumBytesTmp, AvailableFPRs, AvailableVRs, 5257 Subtarget.hasQPX())) 5258 HasParameterArea = true; 5259 } 5260 } 5261 5262 // When using the fast calling convention, we don't provide backing for 5263 // arguments that will be in registers. 5264 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5265 5266 // Add up all the space actually used. 5267 for (unsigned i = 0; i != NumOps; ++i) { 5268 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5269 EVT ArgVT = Outs[i].VT; 5270 EVT OrigVT = Outs[i].ArgVT; 5271 5272 if (Flags.isNest()) 5273 continue; 5274 5275 if (CallConv == CallingConv::Fast) { 5276 if (Flags.isByVal()) 5277 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5278 else 5279 switch (ArgVT.getSimpleVT().SimpleTy) { 5280 default: llvm_unreachable("Unexpected ValueType for argument!"); 5281 case MVT::i1: 5282 case MVT::i32: 5283 case MVT::i64: 5284 if (++NumGPRsUsed <= NumGPRs) 5285 continue; 5286 break; 5287 case MVT::v4i32: 5288 case MVT::v8i16: 5289 case MVT::v16i8: 5290 case MVT::v2f64: 5291 case MVT::v2i64: 5292 case MVT::v1i128: 5293 if (++NumVRsUsed <= NumVRs) 5294 continue; 5295 break; 5296 case MVT::v4f32: 5297 // When using QPX, this is handled like a FP register, otherwise, it 5298 // is an Altivec register. 5299 if (Subtarget.hasQPX()) { 5300 if (++NumFPRsUsed <= NumFPRs) 5301 continue; 5302 } else { 5303 if (++NumVRsUsed <= NumVRs) 5304 continue; 5305 } 5306 break; 5307 case MVT::f32: 5308 case MVT::f64: 5309 case MVT::v4f64: // QPX 5310 case MVT::v4i1: // QPX 5311 if (++NumFPRsUsed <= NumFPRs) 5312 continue; 5313 break; 5314 } 5315 } 5316 5317 /* Respect alignment of argument on the stack. */ 5318 unsigned Align = 5319 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5320 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5321 5322 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5323 if (Flags.isInConsecutiveRegsLast()) 5324 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5325 } 5326 5327 unsigned NumBytesActuallyUsed = NumBytes; 5328 5329 // In the old ELFv1 ABI, 5330 // the prolog code of the callee may store up to 8 GPR argument registers to 5331 // the stack, allowing va_start to index over them in memory if its varargs. 5332 // Because we cannot tell if this is needed on the caller side, we have to 5333 // conservatively assume that it is needed. As such, make sure we have at 5334 // least enough stack space for the caller to store the 8 GPRs. 5335 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5336 // really requires memory operands, e.g. a vararg function. 5337 if (HasParameterArea) 5338 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5339 else 5340 NumBytes = LinkageSize; 5341 5342 // Tail call needs the stack to be aligned. 5343 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5344 CallConv == CallingConv::Fast) 5345 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5346 5347 int SPDiff = 0; 5348 5349 // Calculate by how many bytes the stack has to be adjusted in case of tail 5350 // call optimization. 5351 if (!IsSibCall) 5352 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5353 5354 // To protect arguments on the stack from being clobbered in a tail call, 5355 // force all the loads to happen before doing any other lowering. 5356 if (isTailCall) 5357 Chain = DAG.getStackArgumentTokenFactor(Chain); 5358 5359 // Adjust the stack pointer for the new arguments... 5360 // These operations are automatically eliminated by the prolog/epilog pass 5361 if (!IsSibCall) 5362 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5363 SDValue CallSeqStart = Chain; 5364 5365 // Load the return address and frame pointer so it can be move somewhere else 5366 // later. 5367 SDValue LROp, FPOp; 5368 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5369 5370 // Set up a copy of the stack pointer for use loading and storing any 5371 // arguments that may not fit in the registers available for argument 5372 // passing. 5373 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5374 5375 // Figure out which arguments are going to go in registers, and which in 5376 // memory. Also, if this is a vararg function, floating point operations 5377 // must be stored to our stack, and loaded into integer regs as well, if 5378 // any integer regs are available for argument passing. 5379 unsigned ArgOffset = LinkageSize; 5380 5381 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5382 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5383 5384 SmallVector<SDValue, 8> MemOpChains; 5385 for (unsigned i = 0; i != NumOps; ++i) { 5386 SDValue Arg = OutVals[i]; 5387 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5388 EVT ArgVT = Outs[i].VT; 5389 EVT OrigVT = Outs[i].ArgVT; 5390 5391 // PtrOff will be used to store the current argument to the stack if a 5392 // register cannot be found for it. 5393 SDValue PtrOff; 5394 5395 // We re-align the argument offset for each argument, except when using the 5396 // fast calling convention, when we need to make sure we do that only when 5397 // we'll actually use a stack slot. 5398 auto ComputePtrOff = [&]() { 5399 /* Respect alignment of argument on the stack. */ 5400 unsigned Align = 5401 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5402 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5403 5404 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5405 5406 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5407 }; 5408 5409 if (CallConv != CallingConv::Fast) { 5410 ComputePtrOff(); 5411 5412 /* Compute GPR index associated with argument offset. */ 5413 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5414 GPR_idx = std::min(GPR_idx, NumGPRs); 5415 } 5416 5417 // Promote integers to 64-bit values. 5418 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5419 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5420 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5421 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5422 } 5423 5424 // FIXME memcpy is used way more than necessary. Correctness first. 5425 // Note: "by value" is code for passing a structure by value, not 5426 // basic types. 5427 if (Flags.isByVal()) { 5428 // Note: Size includes alignment padding, so 5429 // struct x { short a; char b; } 5430 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5431 // These are the proper values we need for right-justifying the 5432 // aggregate in a parameter register. 5433 unsigned Size = Flags.getByValSize(); 5434 5435 // An empty aggregate parameter takes up no storage and no 5436 // registers. 5437 if (Size == 0) 5438 continue; 5439 5440 if (CallConv == CallingConv::Fast) 5441 ComputePtrOff(); 5442 5443 // All aggregates smaller than 8 bytes must be passed right-justified. 5444 if (Size==1 || Size==2 || Size==4) { 5445 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5446 if (GPR_idx != NumGPRs) { 5447 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5448 MachinePointerInfo(), VT); 5449 MemOpChains.push_back(Load.getValue(1)); 5450 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5451 5452 ArgOffset += PtrByteSize; 5453 continue; 5454 } 5455 } 5456 5457 if (GPR_idx == NumGPRs && Size < 8) { 5458 SDValue AddPtr = PtrOff; 5459 if (!isLittleEndian) { 5460 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5461 PtrOff.getValueType()); 5462 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5463 } 5464 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5465 CallSeqStart, 5466 Flags, DAG, dl); 5467 ArgOffset += PtrByteSize; 5468 continue; 5469 } 5470 // Copy entire object into memory. There are cases where gcc-generated 5471 // code assumes it is there, even if it could be put entirely into 5472 // registers. (This is not what the doc says.) 5473 5474 // FIXME: The above statement is likely due to a misunderstanding of the 5475 // documents. All arguments must be copied into the parameter area BY 5476 // THE CALLEE in the event that the callee takes the address of any 5477 // formal argument. That has not yet been implemented. However, it is 5478 // reasonable to use the stack area as a staging area for the register 5479 // load. 5480 5481 // Skip this for small aggregates, as we will use the same slot for a 5482 // right-justified copy, below. 5483 if (Size >= 8) 5484 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5485 CallSeqStart, 5486 Flags, DAG, dl); 5487 5488 // When a register is available, pass a small aggregate right-justified. 5489 if (Size < 8 && GPR_idx != NumGPRs) { 5490 // The easiest way to get this right-justified in a register 5491 // is to copy the structure into the rightmost portion of a 5492 // local variable slot, then load the whole slot into the 5493 // register. 5494 // FIXME: The memcpy seems to produce pretty awful code for 5495 // small aggregates, particularly for packed ones. 5496 // FIXME: It would be preferable to use the slot in the 5497 // parameter save area instead of a new local variable. 5498 SDValue AddPtr = PtrOff; 5499 if (!isLittleEndian) { 5500 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5501 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5502 } 5503 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5504 CallSeqStart, 5505 Flags, DAG, dl); 5506 5507 // Load the slot into the register. 5508 SDValue Load = 5509 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5510 MemOpChains.push_back(Load.getValue(1)); 5511 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5512 5513 // Done with this argument. 5514 ArgOffset += PtrByteSize; 5515 continue; 5516 } 5517 5518 // For aggregates larger than PtrByteSize, copy the pieces of the 5519 // object that fit into registers from the parameter save area. 5520 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5521 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5522 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5523 if (GPR_idx != NumGPRs) { 5524 SDValue Load = 5525 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5526 MemOpChains.push_back(Load.getValue(1)); 5527 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5528 ArgOffset += PtrByteSize; 5529 } else { 5530 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5531 break; 5532 } 5533 } 5534 continue; 5535 } 5536 5537 switch (Arg.getSimpleValueType().SimpleTy) { 5538 default: llvm_unreachable("Unexpected ValueType for argument!"); 5539 case MVT::i1: 5540 case MVT::i32: 5541 case MVT::i64: 5542 if (Flags.isNest()) { 5543 // The 'nest' parameter, if any, is passed in R11. 5544 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5545 hasNest = true; 5546 break; 5547 } 5548 5549 // These can be scalar arguments or elements of an integer array type 5550 // passed directly. Clang may use those instead of "byval" aggregate 5551 // types to avoid forcing arguments to memory unnecessarily. 5552 if (GPR_idx != NumGPRs) { 5553 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5554 } else { 5555 if (CallConv == CallingConv::Fast) 5556 ComputePtrOff(); 5557 5558 assert(HasParameterArea && 5559 "Parameter area must exist to pass an argument in memory."); 5560 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5561 true, isTailCall, false, MemOpChains, 5562 TailCallArguments, dl); 5563 if (CallConv == CallingConv::Fast) 5564 ArgOffset += PtrByteSize; 5565 } 5566 if (CallConv != CallingConv::Fast) 5567 ArgOffset += PtrByteSize; 5568 break; 5569 case MVT::f32: 5570 case MVT::f64: { 5571 // These can be scalar arguments or elements of a float array type 5572 // passed directly. The latter are used to implement ELFv2 homogenous 5573 // float aggregates. 5574 5575 // Named arguments go into FPRs first, and once they overflow, the 5576 // remaining arguments go into GPRs and then the parameter save area. 5577 // Unnamed arguments for vararg functions always go to GPRs and 5578 // then the parameter save area. For now, put all arguments to vararg 5579 // routines always in both locations (FPR *and* GPR or stack slot). 5580 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5581 bool NeededLoad = false; 5582 5583 // First load the argument into the next available FPR. 5584 if (FPR_idx != NumFPRs) 5585 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5586 5587 // Next, load the argument into GPR or stack slot if needed. 5588 if (!NeedGPROrStack) 5589 ; 5590 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5591 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5592 // once we support fp <-> gpr moves. 5593 5594 // In the non-vararg case, this can only ever happen in the 5595 // presence of f32 array types, since otherwise we never run 5596 // out of FPRs before running out of GPRs. 5597 SDValue ArgVal; 5598 5599 // Double values are always passed in a single GPR. 5600 if (Arg.getValueType() != MVT::f32) { 5601 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5602 5603 // Non-array float values are extended and passed in a GPR. 5604 } else if (!Flags.isInConsecutiveRegs()) { 5605 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5606 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5607 5608 // If we have an array of floats, we collect every odd element 5609 // together with its predecessor into one GPR. 5610 } else if (ArgOffset % PtrByteSize != 0) { 5611 SDValue Lo, Hi; 5612 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5613 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5614 if (!isLittleEndian) 5615 std::swap(Lo, Hi); 5616 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5617 5618 // The final element, if even, goes into the first half of a GPR. 5619 } else if (Flags.isInConsecutiveRegsLast()) { 5620 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5621 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5622 if (!isLittleEndian) 5623 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5624 DAG.getConstant(32, dl, MVT::i32)); 5625 5626 // Non-final even elements are skipped; they will be handled 5627 // together the with subsequent argument on the next go-around. 5628 } else 5629 ArgVal = SDValue(); 5630 5631 if (ArgVal.getNode()) 5632 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5633 } else { 5634 if (CallConv == CallingConv::Fast) 5635 ComputePtrOff(); 5636 5637 // Single-precision floating-point values are mapped to the 5638 // second (rightmost) word of the stack doubleword. 5639 if (Arg.getValueType() == MVT::f32 && 5640 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5641 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5642 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5643 } 5644 5645 assert(HasParameterArea && 5646 "Parameter area must exist to pass an argument in memory."); 5647 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5648 true, isTailCall, false, MemOpChains, 5649 TailCallArguments, dl); 5650 5651 NeededLoad = true; 5652 } 5653 // When passing an array of floats, the array occupies consecutive 5654 // space in the argument area; only round up to the next doubleword 5655 // at the end of the array. Otherwise, each float takes 8 bytes. 5656 if (CallConv != CallingConv::Fast || NeededLoad) { 5657 ArgOffset += (Arg.getValueType() == MVT::f32 && 5658 Flags.isInConsecutiveRegs()) ? 4 : 8; 5659 if (Flags.isInConsecutiveRegsLast()) 5660 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5661 } 5662 break; 5663 } 5664 case MVT::v4f32: 5665 case MVT::v4i32: 5666 case MVT::v8i16: 5667 case MVT::v16i8: 5668 case MVT::v2f64: 5669 case MVT::v2i64: 5670 case MVT::v1i128: 5671 if (!Subtarget.hasQPX()) { 5672 // These can be scalar arguments or elements of a vector array type 5673 // passed directly. The latter are used to implement ELFv2 homogenous 5674 // vector aggregates. 5675 5676 // For a varargs call, named arguments go into VRs or on the stack as 5677 // usual; unnamed arguments always go to the stack or the corresponding 5678 // GPRs when within range. For now, we always put the value in both 5679 // locations (or even all three). 5680 if (isVarArg) { 5681 assert(HasParameterArea && 5682 "Parameter area must exist if we have a varargs call."); 5683 // We could elide this store in the case where the object fits 5684 // entirely in R registers. Maybe later. 5685 SDValue Store = 5686 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5687 MemOpChains.push_back(Store); 5688 if (VR_idx != NumVRs) { 5689 SDValue Load = 5690 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 5691 MemOpChains.push_back(Load.getValue(1)); 5692 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5693 } 5694 ArgOffset += 16; 5695 for (unsigned i=0; i<16; i+=PtrByteSize) { 5696 if (GPR_idx == NumGPRs) 5697 break; 5698 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5699 DAG.getConstant(i, dl, PtrVT)); 5700 SDValue Load = 5701 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5702 MemOpChains.push_back(Load.getValue(1)); 5703 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5704 } 5705 break; 5706 } 5707 5708 // Non-varargs Altivec params go into VRs or on the stack. 5709 if (VR_idx != NumVRs) { 5710 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5711 } else { 5712 if (CallConv == CallingConv::Fast) 5713 ComputePtrOff(); 5714 5715 assert(HasParameterArea && 5716 "Parameter area must exist to pass an argument in memory."); 5717 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5718 true, isTailCall, true, MemOpChains, 5719 TailCallArguments, dl); 5720 if (CallConv == CallingConv::Fast) 5721 ArgOffset += 16; 5722 } 5723 5724 if (CallConv != CallingConv::Fast) 5725 ArgOffset += 16; 5726 break; 5727 } // not QPX 5728 5729 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5730 "Invalid QPX parameter type"); 5731 5732 /* fall through */ 5733 case MVT::v4f64: 5734 case MVT::v4i1: { 5735 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5736 if (isVarArg) { 5737 assert(HasParameterArea && 5738 "Parameter area must exist if we have a varargs call."); 5739 // We could elide this store in the case where the object fits 5740 // entirely in R registers. Maybe later. 5741 SDValue Store = 5742 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5743 MemOpChains.push_back(Store); 5744 if (QFPR_idx != NumQFPRs) { 5745 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 5746 PtrOff, MachinePointerInfo()); 5747 MemOpChains.push_back(Load.getValue(1)); 5748 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5749 } 5750 ArgOffset += (IsF32 ? 16 : 32); 5751 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5752 if (GPR_idx == NumGPRs) 5753 break; 5754 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5755 DAG.getConstant(i, dl, PtrVT)); 5756 SDValue Load = 5757 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5758 MemOpChains.push_back(Load.getValue(1)); 5759 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5760 } 5761 break; 5762 } 5763 5764 // Non-varargs QPX params go into registers or on the stack. 5765 if (QFPR_idx != NumQFPRs) { 5766 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5767 } else { 5768 if (CallConv == CallingConv::Fast) 5769 ComputePtrOff(); 5770 5771 assert(HasParameterArea && 5772 "Parameter area must exist to pass an argument in memory."); 5773 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5774 true, isTailCall, true, MemOpChains, 5775 TailCallArguments, dl); 5776 if (CallConv == CallingConv::Fast) 5777 ArgOffset += (IsF32 ? 16 : 32); 5778 } 5779 5780 if (CallConv != CallingConv::Fast) 5781 ArgOffset += (IsF32 ? 16 : 32); 5782 break; 5783 } 5784 } 5785 } 5786 5787 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 5788 "mismatch in size of parameter area"); 5789 (void)NumBytesActuallyUsed; 5790 5791 if (!MemOpChains.empty()) 5792 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5793 5794 // Check if this is an indirect call (MTCTR/BCTRL). 5795 // See PrepareCall() for more information about calls through function 5796 // pointers in the 64-bit SVR4 ABI. 5797 if (!isTailCall && !isPatchPoint && 5798 !isFunctionGlobalAddress(Callee) && 5799 !isa<ExternalSymbolSDNode>(Callee)) { 5800 // Load r2 into a virtual register and store it to the TOC save area. 5801 setUsesTOCBasePtr(DAG); 5802 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5803 // TOC save area offset. 5804 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5805 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5806 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5807 Chain = DAG.getStore( 5808 Val.getValue(1), dl, Val, AddPtr, 5809 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 5810 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5811 // This does not mean the MTCTR instruction must use R12; it's easier 5812 // to model this as an extra parameter, so do that. 5813 if (isELFv2ABI && !isPatchPoint) 5814 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5815 } 5816 5817 // Build a sequence of copy-to-reg nodes chained together with token chain 5818 // and flag operands which copy the outgoing args into the appropriate regs. 5819 SDValue InFlag; 5820 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5821 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5822 RegsToPass[i].second, InFlag); 5823 InFlag = Chain.getValue(1); 5824 } 5825 5826 if (isTailCall && !IsSibCall) 5827 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5828 TailCallArguments); 5829 5830 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 5831 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 5832 SPDiff, NumBytes, Ins, InVals, CS); 5833 } 5834 5835 SDValue PPCTargetLowering::LowerCall_Darwin( 5836 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5837 bool isTailCall, bool isPatchPoint, 5838 const SmallVectorImpl<ISD::OutputArg> &Outs, 5839 const SmallVectorImpl<SDValue> &OutVals, 5840 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5841 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5842 ImmutableCallSite *CS) const { 5843 unsigned NumOps = Outs.size(); 5844 5845 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5846 bool isPPC64 = PtrVT == MVT::i64; 5847 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5848 5849 MachineFunction &MF = DAG.getMachineFunction(); 5850 5851 // Mark this function as potentially containing a function that contains a 5852 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5853 // and restoring the callers stack pointer in this functions epilog. This is 5854 // done because by tail calling the called function might overwrite the value 5855 // in this function's (MF) stack pointer stack slot 0(SP). 5856 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5857 CallConv == CallingConv::Fast) 5858 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5859 5860 // Count how many bytes are to be pushed on the stack, including the linkage 5861 // area, and parameter passing area. We start with 24/48 bytes, which is 5862 // prereserved space for [SP][CR][LR][3 x unused]. 5863 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5864 unsigned NumBytes = LinkageSize; 5865 5866 // Add up all the space actually used. 5867 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5868 // they all go in registers, but we must reserve stack space for them for 5869 // possible use by the caller. In varargs or 64-bit calls, parameters are 5870 // assigned stack space in order, with padding so Altivec parameters are 5871 // 16-byte aligned. 5872 unsigned nAltivecParamsAtEnd = 0; 5873 for (unsigned i = 0; i != NumOps; ++i) { 5874 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5875 EVT ArgVT = Outs[i].VT; 5876 // Varargs Altivec parameters are padded to a 16 byte boundary. 5877 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5878 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5879 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5880 if (!isVarArg && !isPPC64) { 5881 // Non-varargs Altivec parameters go after all the non-Altivec 5882 // parameters; handle those later so we know how much padding we need. 5883 nAltivecParamsAtEnd++; 5884 continue; 5885 } 5886 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5887 NumBytes = ((NumBytes+15)/16)*16; 5888 } 5889 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5890 } 5891 5892 // Allow for Altivec parameters at the end, if needed. 5893 if (nAltivecParamsAtEnd) { 5894 NumBytes = ((NumBytes+15)/16)*16; 5895 NumBytes += 16*nAltivecParamsAtEnd; 5896 } 5897 5898 // The prolog code of the callee may store up to 8 GPR argument registers to 5899 // the stack, allowing va_start to index over them in memory if its varargs. 5900 // Because we cannot tell if this is needed on the caller side, we have to 5901 // conservatively assume that it is needed. As such, make sure we have at 5902 // least enough stack space for the caller to store the 8 GPRs. 5903 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5904 5905 // Tail call needs the stack to be aligned. 5906 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5907 CallConv == CallingConv::Fast) 5908 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5909 5910 // Calculate by how many bytes the stack has to be adjusted in case of tail 5911 // call optimization. 5912 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5913 5914 // To protect arguments on the stack from being clobbered in a tail call, 5915 // force all the loads to happen before doing any other lowering. 5916 if (isTailCall) 5917 Chain = DAG.getStackArgumentTokenFactor(Chain); 5918 5919 // Adjust the stack pointer for the new arguments... 5920 // These operations are automatically eliminated by the prolog/epilog pass 5921 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5922 SDValue CallSeqStart = Chain; 5923 5924 // Load the return address and frame pointer so it can be move somewhere else 5925 // later. 5926 SDValue LROp, FPOp; 5927 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5928 5929 // Set up a copy of the stack pointer for use loading and storing any 5930 // arguments that may not fit in the registers available for argument 5931 // passing. 5932 SDValue StackPtr; 5933 if (isPPC64) 5934 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5935 else 5936 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5937 5938 // Figure out which arguments are going to go in registers, and which in 5939 // memory. Also, if this is a vararg function, floating point operations 5940 // must be stored to our stack, and loaded into integer regs as well, if 5941 // any integer regs are available for argument passing. 5942 unsigned ArgOffset = LinkageSize; 5943 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5944 5945 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5946 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5947 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5948 }; 5949 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5950 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5951 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5952 }; 5953 static const MCPhysReg VR[] = { 5954 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5955 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5956 }; 5957 const unsigned NumGPRs = array_lengthof(GPR_32); 5958 const unsigned NumFPRs = 13; 5959 const unsigned NumVRs = array_lengthof(VR); 5960 5961 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5962 5963 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5964 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5965 5966 SmallVector<SDValue, 8> MemOpChains; 5967 for (unsigned i = 0; i != NumOps; ++i) { 5968 SDValue Arg = OutVals[i]; 5969 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5970 5971 // PtrOff will be used to store the current argument to the stack if a 5972 // register cannot be found for it. 5973 SDValue PtrOff; 5974 5975 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5976 5977 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5978 5979 // On PPC64, promote integers to 64-bit values. 5980 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5981 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5982 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5983 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5984 } 5985 5986 // FIXME memcpy is used way more than necessary. Correctness first. 5987 // Note: "by value" is code for passing a structure by value, not 5988 // basic types. 5989 if (Flags.isByVal()) { 5990 unsigned Size = Flags.getByValSize(); 5991 // Very small objects are passed right-justified. Everything else is 5992 // passed left-justified. 5993 if (Size==1 || Size==2) { 5994 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5995 if (GPR_idx != NumGPRs) { 5996 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5997 MachinePointerInfo(), VT); 5998 MemOpChains.push_back(Load.getValue(1)); 5999 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6000 6001 ArgOffset += PtrByteSize; 6002 } else { 6003 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6004 PtrOff.getValueType()); 6005 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6006 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6007 CallSeqStart, 6008 Flags, DAG, dl); 6009 ArgOffset += PtrByteSize; 6010 } 6011 continue; 6012 } 6013 // Copy entire object into memory. There are cases where gcc-generated 6014 // code assumes it is there, even if it could be put entirely into 6015 // registers. (This is not what the doc says.) 6016 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6017 CallSeqStart, 6018 Flags, DAG, dl); 6019 6020 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6021 // copy the pieces of the object that fit into registers from the 6022 // parameter save area. 6023 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6024 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6025 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6026 if (GPR_idx != NumGPRs) { 6027 SDValue Load = 6028 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6029 MemOpChains.push_back(Load.getValue(1)); 6030 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6031 ArgOffset += PtrByteSize; 6032 } else { 6033 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6034 break; 6035 } 6036 } 6037 continue; 6038 } 6039 6040 switch (Arg.getSimpleValueType().SimpleTy) { 6041 default: llvm_unreachable("Unexpected ValueType for argument!"); 6042 case MVT::i1: 6043 case MVT::i32: 6044 case MVT::i64: 6045 if (GPR_idx != NumGPRs) { 6046 if (Arg.getValueType() == MVT::i1) 6047 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6048 6049 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6050 } else { 6051 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6052 isPPC64, isTailCall, false, MemOpChains, 6053 TailCallArguments, dl); 6054 } 6055 ArgOffset += PtrByteSize; 6056 break; 6057 case MVT::f32: 6058 case MVT::f64: 6059 if (FPR_idx != NumFPRs) { 6060 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6061 6062 if (isVarArg) { 6063 SDValue Store = 6064 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6065 MemOpChains.push_back(Store); 6066 6067 // Float varargs are always shadowed in available integer registers 6068 if (GPR_idx != NumGPRs) { 6069 SDValue Load = 6070 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6071 MemOpChains.push_back(Load.getValue(1)); 6072 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6073 } 6074 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6075 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6076 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6077 SDValue Load = 6078 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6079 MemOpChains.push_back(Load.getValue(1)); 6080 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6081 } 6082 } else { 6083 // If we have any FPRs remaining, we may also have GPRs remaining. 6084 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6085 // GPRs. 6086 if (GPR_idx != NumGPRs) 6087 ++GPR_idx; 6088 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6089 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6090 ++GPR_idx; 6091 } 6092 } else 6093 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6094 isPPC64, isTailCall, false, MemOpChains, 6095 TailCallArguments, dl); 6096 if (isPPC64) 6097 ArgOffset += 8; 6098 else 6099 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6100 break; 6101 case MVT::v4f32: 6102 case MVT::v4i32: 6103 case MVT::v8i16: 6104 case MVT::v16i8: 6105 if (isVarArg) { 6106 // These go aligned on the stack, or in the corresponding R registers 6107 // when within range. The Darwin PPC ABI doc claims they also go in 6108 // V registers; in fact gcc does this only for arguments that are 6109 // prototyped, not for those that match the ... We do it for all 6110 // arguments, seems to work. 6111 while (ArgOffset % 16 !=0) { 6112 ArgOffset += PtrByteSize; 6113 if (GPR_idx != NumGPRs) 6114 GPR_idx++; 6115 } 6116 // We could elide this store in the case where the object fits 6117 // entirely in R registers. Maybe later. 6118 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6119 DAG.getConstant(ArgOffset, dl, PtrVT)); 6120 SDValue Store = 6121 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6122 MemOpChains.push_back(Store); 6123 if (VR_idx != NumVRs) { 6124 SDValue Load = 6125 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6126 MemOpChains.push_back(Load.getValue(1)); 6127 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6128 } 6129 ArgOffset += 16; 6130 for (unsigned i=0; i<16; i+=PtrByteSize) { 6131 if (GPR_idx == NumGPRs) 6132 break; 6133 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6134 DAG.getConstant(i, dl, PtrVT)); 6135 SDValue Load = 6136 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6137 MemOpChains.push_back(Load.getValue(1)); 6138 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6139 } 6140 break; 6141 } 6142 6143 // Non-varargs Altivec params generally go in registers, but have 6144 // stack space allocated at the end. 6145 if (VR_idx != NumVRs) { 6146 // Doesn't have GPR space allocated. 6147 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6148 } else if (nAltivecParamsAtEnd==0) { 6149 // We are emitting Altivec params in order. 6150 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6151 isPPC64, isTailCall, true, MemOpChains, 6152 TailCallArguments, dl); 6153 ArgOffset += 16; 6154 } 6155 break; 6156 } 6157 } 6158 // If all Altivec parameters fit in registers, as they usually do, 6159 // they get stack space following the non-Altivec parameters. We 6160 // don't track this here because nobody below needs it. 6161 // If there are more Altivec parameters than fit in registers emit 6162 // the stores here. 6163 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6164 unsigned j = 0; 6165 // Offset is aligned; skip 1st 12 params which go in V registers. 6166 ArgOffset = ((ArgOffset+15)/16)*16; 6167 ArgOffset += 12*16; 6168 for (unsigned i = 0; i != NumOps; ++i) { 6169 SDValue Arg = OutVals[i]; 6170 EVT ArgType = Outs[i].VT; 6171 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6172 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6173 if (++j > NumVRs) { 6174 SDValue PtrOff; 6175 // We are emitting Altivec params in order. 6176 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6177 isPPC64, isTailCall, true, MemOpChains, 6178 TailCallArguments, dl); 6179 ArgOffset += 16; 6180 } 6181 } 6182 } 6183 } 6184 6185 if (!MemOpChains.empty()) 6186 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6187 6188 // On Darwin, R12 must contain the address of an indirect callee. This does 6189 // not mean the MTCTR instruction must use R12; it's easier to model this as 6190 // an extra parameter, so do that. 6191 if (!isTailCall && 6192 !isFunctionGlobalAddress(Callee) && 6193 !isa<ExternalSymbolSDNode>(Callee) && 6194 !isBLACompatibleAddress(Callee, DAG)) 6195 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6196 PPC::R12), Callee)); 6197 6198 // Build a sequence of copy-to-reg nodes chained together with token chain 6199 // and flag operands which copy the outgoing args into the appropriate regs. 6200 SDValue InFlag; 6201 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6202 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6203 RegsToPass[i].second, InFlag); 6204 InFlag = Chain.getValue(1); 6205 } 6206 6207 if (isTailCall) 6208 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6209 TailCallArguments); 6210 6211 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6212 /* unused except on PPC64 ELFv1 */ false, DAG, 6213 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6214 NumBytes, Ins, InVals, CS); 6215 } 6216 6217 bool 6218 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6219 MachineFunction &MF, bool isVarArg, 6220 const SmallVectorImpl<ISD::OutputArg> &Outs, 6221 LLVMContext &Context) const { 6222 SmallVector<CCValAssign, 16> RVLocs; 6223 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6224 return CCInfo.CheckReturn(Outs, RetCC_PPC); 6225 } 6226 6227 SDValue 6228 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6229 bool isVarArg, 6230 const SmallVectorImpl<ISD::OutputArg> &Outs, 6231 const SmallVectorImpl<SDValue> &OutVals, 6232 const SDLoc &dl, SelectionDAG &DAG) const { 6233 SmallVector<CCValAssign, 16> RVLocs; 6234 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6235 *DAG.getContext()); 6236 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 6237 6238 SDValue Flag; 6239 SmallVector<SDValue, 4> RetOps(1, Chain); 6240 6241 // Copy the result values into the output registers. 6242 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6243 CCValAssign &VA = RVLocs[i]; 6244 assert(VA.isRegLoc() && "Can only return in registers!"); 6245 6246 SDValue Arg = OutVals[i]; 6247 6248 switch (VA.getLocInfo()) { 6249 default: llvm_unreachable("Unknown loc info!"); 6250 case CCValAssign::Full: break; 6251 case CCValAssign::AExt: 6252 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6253 break; 6254 case CCValAssign::ZExt: 6255 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6256 break; 6257 case CCValAssign::SExt: 6258 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6259 break; 6260 } 6261 6262 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6263 Flag = Chain.getValue(1); 6264 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6265 } 6266 6267 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6268 const MCPhysReg *I = 6269 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6270 if (I) { 6271 for (; *I; ++I) { 6272 6273 if (PPC::G8RCRegClass.contains(*I)) 6274 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6275 else if (PPC::F8RCRegClass.contains(*I)) 6276 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6277 else if (PPC::CRRCRegClass.contains(*I)) 6278 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6279 else if (PPC::VRRCRegClass.contains(*I)) 6280 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6281 else 6282 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6283 } 6284 } 6285 6286 RetOps[0] = Chain; // Update chain. 6287 6288 // Add the flag if we have it. 6289 if (Flag.getNode()) 6290 RetOps.push_back(Flag); 6291 6292 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6293 } 6294 6295 SDValue 6296 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6297 SelectionDAG &DAG) const { 6298 SDLoc dl(Op); 6299 6300 // Get the corect type for integers. 6301 EVT IntVT = Op.getValueType(); 6302 6303 // Get the inputs. 6304 SDValue Chain = Op.getOperand(0); 6305 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6306 // Build a DYNAREAOFFSET node. 6307 SDValue Ops[2] = {Chain, FPSIdx}; 6308 SDVTList VTs = DAG.getVTList(IntVT); 6309 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6310 } 6311 6312 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6313 SelectionDAG &DAG) const { 6314 // When we pop the dynamic allocation we need to restore the SP link. 6315 SDLoc dl(Op); 6316 6317 // Get the corect type for pointers. 6318 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6319 6320 // Construct the stack pointer operand. 6321 bool isPPC64 = Subtarget.isPPC64(); 6322 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6323 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6324 6325 // Get the operands for the STACKRESTORE. 6326 SDValue Chain = Op.getOperand(0); 6327 SDValue SaveSP = Op.getOperand(1); 6328 6329 // Load the old link SP. 6330 SDValue LoadLinkSP = 6331 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6332 6333 // Restore the stack pointer. 6334 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6335 6336 // Store the old link SP. 6337 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6338 } 6339 6340 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6341 MachineFunction &MF = DAG.getMachineFunction(); 6342 bool isPPC64 = Subtarget.isPPC64(); 6343 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6344 6345 // Get current frame pointer save index. The users of this index will be 6346 // primarily DYNALLOC instructions. 6347 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6348 int RASI = FI->getReturnAddrSaveIndex(); 6349 6350 // If the frame pointer save index hasn't been defined yet. 6351 if (!RASI) { 6352 // Find out what the fix offset of the frame pointer save area. 6353 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6354 // Allocate the frame index for frame pointer save area. 6355 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6356 // Save the result. 6357 FI->setReturnAddrSaveIndex(RASI); 6358 } 6359 return DAG.getFrameIndex(RASI, PtrVT); 6360 } 6361 6362 SDValue 6363 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6364 MachineFunction &MF = DAG.getMachineFunction(); 6365 bool isPPC64 = Subtarget.isPPC64(); 6366 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6367 6368 // Get current frame pointer save index. The users of this index will be 6369 // primarily DYNALLOC instructions. 6370 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6371 int FPSI = FI->getFramePointerSaveIndex(); 6372 6373 // If the frame pointer save index hasn't been defined yet. 6374 if (!FPSI) { 6375 // Find out what the fix offset of the frame pointer save area. 6376 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6377 // Allocate the frame index for frame pointer save area. 6378 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6379 // Save the result. 6380 FI->setFramePointerSaveIndex(FPSI); 6381 } 6382 return DAG.getFrameIndex(FPSI, PtrVT); 6383 } 6384 6385 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6386 SelectionDAG &DAG) const { 6387 // Get the inputs. 6388 SDValue Chain = Op.getOperand(0); 6389 SDValue Size = Op.getOperand(1); 6390 SDLoc dl(Op); 6391 6392 // Get the corect type for pointers. 6393 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6394 // Negate the size. 6395 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6396 DAG.getConstant(0, dl, PtrVT), Size); 6397 // Construct a node for the frame pointer save index. 6398 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6399 // Build a DYNALLOC node. 6400 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6401 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6402 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6403 } 6404 6405 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 6406 SelectionDAG &DAG) const { 6407 MachineFunction &MF = DAG.getMachineFunction(); 6408 6409 bool isPPC64 = Subtarget.isPPC64(); 6410 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6411 6412 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 6413 return DAG.getFrameIndex(FI, PtrVT); 6414 } 6415 6416 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6417 SelectionDAG &DAG) const { 6418 SDLoc DL(Op); 6419 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6420 DAG.getVTList(MVT::i32, MVT::Other), 6421 Op.getOperand(0), Op.getOperand(1)); 6422 } 6423 6424 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6425 SelectionDAG &DAG) const { 6426 SDLoc DL(Op); 6427 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6428 Op.getOperand(0), Op.getOperand(1)); 6429 } 6430 6431 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6432 if (Op.getValueType().isVector()) 6433 return LowerVectorLoad(Op, DAG); 6434 6435 assert(Op.getValueType() == MVT::i1 && 6436 "Custom lowering only for i1 loads"); 6437 6438 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6439 6440 SDLoc dl(Op); 6441 LoadSDNode *LD = cast<LoadSDNode>(Op); 6442 6443 SDValue Chain = LD->getChain(); 6444 SDValue BasePtr = LD->getBasePtr(); 6445 MachineMemOperand *MMO = LD->getMemOperand(); 6446 6447 SDValue NewLD = 6448 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6449 BasePtr, MVT::i8, MMO); 6450 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6451 6452 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6453 return DAG.getMergeValues(Ops, dl); 6454 } 6455 6456 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6457 if (Op.getOperand(1).getValueType().isVector()) 6458 return LowerVectorStore(Op, DAG); 6459 6460 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6461 "Custom lowering only for i1 stores"); 6462 6463 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6464 6465 SDLoc dl(Op); 6466 StoreSDNode *ST = cast<StoreSDNode>(Op); 6467 6468 SDValue Chain = ST->getChain(); 6469 SDValue BasePtr = ST->getBasePtr(); 6470 SDValue Value = ST->getValue(); 6471 MachineMemOperand *MMO = ST->getMemOperand(); 6472 6473 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6474 Value); 6475 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6476 } 6477 6478 // FIXME: Remove this once the ANDI glue bug is fixed: 6479 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6480 assert(Op.getValueType() == MVT::i1 && 6481 "Custom lowering only for i1 results"); 6482 6483 SDLoc DL(Op); 6484 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6485 Op.getOperand(0)); 6486 } 6487 6488 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6489 /// possible. 6490 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6491 // Not FP? Not a fsel. 6492 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6493 !Op.getOperand(2).getValueType().isFloatingPoint()) 6494 return Op; 6495 6496 // We might be able to do better than this under some circumstances, but in 6497 // general, fsel-based lowering of select is a finite-math-only optimization. 6498 // For more information, see section F.3 of the 2.06 ISA specification. 6499 if (!DAG.getTarget().Options.NoInfsFPMath || 6500 !DAG.getTarget().Options.NoNaNsFPMath) 6501 return Op; 6502 // TODO: Propagate flags from the select rather than global settings. 6503 SDNodeFlags Flags; 6504 Flags.setNoInfs(true); 6505 Flags.setNoNaNs(true); 6506 6507 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6508 6509 EVT ResVT = Op.getValueType(); 6510 EVT CmpVT = Op.getOperand(0).getValueType(); 6511 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6512 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6513 SDLoc dl(Op); 6514 6515 // If the RHS of the comparison is a 0.0, we don't need to do the 6516 // subtraction at all. 6517 SDValue Sel1; 6518 if (isFloatingPointZero(RHS)) 6519 switch (CC) { 6520 default: break; // SETUO etc aren't handled by fsel. 6521 case ISD::SETNE: 6522 std::swap(TV, FV); 6523 case ISD::SETEQ: 6524 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6525 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6526 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6527 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6528 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6529 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6530 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6531 case ISD::SETULT: 6532 case ISD::SETLT: 6533 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6534 case ISD::SETOGE: 6535 case ISD::SETGE: 6536 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6537 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6538 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6539 case ISD::SETUGT: 6540 case ISD::SETGT: 6541 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6542 case ISD::SETOLE: 6543 case ISD::SETLE: 6544 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6545 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6546 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6547 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6548 } 6549 6550 SDValue Cmp; 6551 switch (CC) { 6552 default: break; // SETUO etc aren't handled by fsel. 6553 case ISD::SETNE: 6554 std::swap(TV, FV); 6555 case ISD::SETEQ: 6556 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6557 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6558 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6559 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6560 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6561 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6562 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6563 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6564 case ISD::SETULT: 6565 case ISD::SETLT: 6566 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6567 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6568 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6569 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6570 case ISD::SETOGE: 6571 case ISD::SETGE: 6572 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6573 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6574 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6575 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6576 case ISD::SETUGT: 6577 case ISD::SETGT: 6578 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6579 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6580 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6581 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6582 case ISD::SETOLE: 6583 case ISD::SETLE: 6584 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6585 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6586 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6587 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6588 } 6589 return Op; 6590 } 6591 6592 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6593 SelectionDAG &DAG, 6594 const SDLoc &dl) const { 6595 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6596 SDValue Src = Op.getOperand(0); 6597 if (Src.getValueType() == MVT::f32) 6598 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6599 6600 SDValue Tmp; 6601 switch (Op.getSimpleValueType().SimpleTy) { 6602 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6603 case MVT::i32: 6604 Tmp = DAG.getNode( 6605 Op.getOpcode() == ISD::FP_TO_SINT 6606 ? PPCISD::FCTIWZ 6607 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6608 dl, MVT::f64, Src); 6609 break; 6610 case MVT::i64: 6611 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6612 "i64 FP_TO_UINT is supported only with FPCVT"); 6613 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6614 PPCISD::FCTIDUZ, 6615 dl, MVT::f64, Src); 6616 break; 6617 } 6618 6619 // Convert the FP value to an int value through memory. 6620 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6621 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6622 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6623 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6624 MachinePointerInfo MPI = 6625 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6626 6627 // Emit a store to the stack slot. 6628 SDValue Chain; 6629 if (i32Stack) { 6630 MachineFunction &MF = DAG.getMachineFunction(); 6631 MachineMemOperand *MMO = 6632 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6633 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6634 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6635 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6636 } else 6637 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 6638 6639 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6640 // add in a bias on big endian. 6641 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6642 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6643 DAG.getConstant(4, dl, FIPtr.getValueType())); 6644 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6645 } 6646 6647 RLI.Chain = Chain; 6648 RLI.Ptr = FIPtr; 6649 RLI.MPI = MPI; 6650 } 6651 6652 /// \brief Custom lowers floating point to integer conversions to use 6653 /// the direct move instructions available in ISA 2.07 to avoid the 6654 /// need for load/store combinations. 6655 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6656 SelectionDAG &DAG, 6657 const SDLoc &dl) const { 6658 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6659 SDValue Src = Op.getOperand(0); 6660 6661 if (Src.getValueType() == MVT::f32) 6662 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6663 6664 SDValue Tmp; 6665 switch (Op.getSimpleValueType().SimpleTy) { 6666 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6667 case MVT::i32: 6668 Tmp = DAG.getNode( 6669 Op.getOpcode() == ISD::FP_TO_SINT 6670 ? PPCISD::FCTIWZ 6671 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6672 dl, MVT::f64, Src); 6673 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6674 break; 6675 case MVT::i64: 6676 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6677 "i64 FP_TO_UINT is supported only with FPCVT"); 6678 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6679 PPCISD::FCTIDUZ, 6680 dl, MVT::f64, Src); 6681 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6682 break; 6683 } 6684 return Tmp; 6685 } 6686 6687 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6688 const SDLoc &dl) const { 6689 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6690 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6691 6692 ReuseLoadInfo RLI; 6693 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6694 6695 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 6696 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 6697 } 6698 6699 // We're trying to insert a regular store, S, and then a load, L. If the 6700 // incoming value, O, is a load, we might just be able to have our load use the 6701 // address used by O. However, we don't know if anything else will store to 6702 // that address before we can load from it. To prevent this situation, we need 6703 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6704 // the same chain operand as O, we create a token factor from the chain results 6705 // of O and L, and we replace all uses of O's chain result with that token 6706 // factor (see spliceIntoChain below for this last part). 6707 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6708 ReuseLoadInfo &RLI, 6709 SelectionDAG &DAG, 6710 ISD::LoadExtType ET) const { 6711 SDLoc dl(Op); 6712 if (ET == ISD::NON_EXTLOAD && 6713 (Op.getOpcode() == ISD::FP_TO_UINT || 6714 Op.getOpcode() == ISD::FP_TO_SINT) && 6715 isOperationLegalOrCustom(Op.getOpcode(), 6716 Op.getOperand(0).getValueType())) { 6717 6718 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6719 return true; 6720 } 6721 6722 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6723 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6724 LD->isNonTemporal()) 6725 return false; 6726 if (LD->getMemoryVT() != MemVT) 6727 return false; 6728 6729 RLI.Ptr = LD->getBasePtr(); 6730 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 6731 assert(LD->getAddressingMode() == ISD::PRE_INC && 6732 "Non-pre-inc AM on PPC?"); 6733 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6734 LD->getOffset()); 6735 } 6736 6737 RLI.Chain = LD->getChain(); 6738 RLI.MPI = LD->getPointerInfo(); 6739 RLI.IsDereferenceable = LD->isDereferenceable(); 6740 RLI.IsInvariant = LD->isInvariant(); 6741 RLI.Alignment = LD->getAlignment(); 6742 RLI.AAInfo = LD->getAAInfo(); 6743 RLI.Ranges = LD->getRanges(); 6744 6745 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6746 return true; 6747 } 6748 6749 // Given the head of the old chain, ResChain, insert a token factor containing 6750 // it and NewResChain, and make users of ResChain now be users of that token 6751 // factor. 6752 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6753 SDValue NewResChain, 6754 SelectionDAG &DAG) const { 6755 if (!ResChain) 6756 return; 6757 6758 SDLoc dl(NewResChain); 6759 6760 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6761 NewResChain, DAG.getUNDEF(MVT::Other)); 6762 assert(TF.getNode() != NewResChain.getNode() && 6763 "A new TF really is required here"); 6764 6765 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6766 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6767 } 6768 6769 /// \brief Analyze profitability of direct move 6770 /// prefer float load to int load plus direct move 6771 /// when there is no integer use of int load 6772 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 6773 SDNode *Origin = Op.getOperand(0).getNode(); 6774 if (Origin->getOpcode() != ISD::LOAD) 6775 return true; 6776 6777 // If there is no LXSIBZX/LXSIHZX, like Power8, 6778 // prefer direct move if the memory size is 1 or 2 bytes. 6779 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 6780 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 6781 return true; 6782 6783 for (SDNode::use_iterator UI = Origin->use_begin(), 6784 UE = Origin->use_end(); 6785 UI != UE; ++UI) { 6786 6787 // Only look at the users of the loaded value. 6788 if (UI.getUse().get().getResNo() != 0) 6789 continue; 6790 6791 if (UI->getOpcode() != ISD::SINT_TO_FP && 6792 UI->getOpcode() != ISD::UINT_TO_FP) 6793 return true; 6794 } 6795 6796 return false; 6797 } 6798 6799 /// \brief Custom lowers integer to floating point conversions to use 6800 /// the direct move instructions available in ISA 2.07 to avoid the 6801 /// need for load/store combinations. 6802 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6803 SelectionDAG &DAG, 6804 const SDLoc &dl) const { 6805 assert((Op.getValueType() == MVT::f32 || 6806 Op.getValueType() == MVT::f64) && 6807 "Invalid floating point type as target of conversion"); 6808 assert(Subtarget.hasFPCVT() && 6809 "Int to FP conversions with direct moves require FPCVT"); 6810 SDValue FP; 6811 SDValue Src = Op.getOperand(0); 6812 bool SinglePrec = Op.getValueType() == MVT::f32; 6813 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6814 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6815 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6816 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6817 6818 if (WordInt) { 6819 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6820 dl, MVT::f64, Src); 6821 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6822 } 6823 else { 6824 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6825 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6826 } 6827 6828 return FP; 6829 } 6830 6831 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6832 SelectionDAG &DAG) const { 6833 SDLoc dl(Op); 6834 6835 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6836 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6837 return SDValue(); 6838 6839 SDValue Value = Op.getOperand(0); 6840 // The values are now known to be -1 (false) or 1 (true). To convert this 6841 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6842 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6843 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6844 6845 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 6846 6847 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6848 6849 if (Op.getValueType() != MVT::v4f64) 6850 Value = DAG.getNode(ISD::FP_ROUND, dl, 6851 Op.getValueType(), Value, 6852 DAG.getIntPtrConstant(1, dl)); 6853 return Value; 6854 } 6855 6856 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6857 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6858 return SDValue(); 6859 6860 if (Op.getOperand(0).getValueType() == MVT::i1) 6861 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6862 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6863 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6864 6865 // If we have direct moves, we can do all the conversion, skip the store/load 6866 // however, without FPCVT we can't do most conversions. 6867 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 6868 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6869 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6870 6871 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6872 "UINT_TO_FP is supported only with FPCVT"); 6873 6874 // If we have FCFIDS, then use it when converting to single-precision. 6875 // Otherwise, convert to double-precision and then round. 6876 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6877 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6878 : PPCISD::FCFIDS) 6879 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6880 : PPCISD::FCFID); 6881 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6882 ? MVT::f32 6883 : MVT::f64; 6884 6885 if (Op.getOperand(0).getValueType() == MVT::i64) { 6886 SDValue SINT = Op.getOperand(0); 6887 // When converting to single-precision, we actually need to convert 6888 // to double-precision first and then round to single-precision. 6889 // To avoid double-rounding effects during that operation, we have 6890 // to prepare the input operand. Bits that might be truncated when 6891 // converting to double-precision are replaced by a bit that won't 6892 // be lost at this stage, but is below the single-precision rounding 6893 // position. 6894 // 6895 // However, if -enable-unsafe-fp-math is in effect, accept double 6896 // rounding to avoid the extra overhead. 6897 if (Op.getValueType() == MVT::f32 && 6898 !Subtarget.hasFPCVT() && 6899 !DAG.getTarget().Options.UnsafeFPMath) { 6900 6901 // Twiddle input to make sure the low 11 bits are zero. (If this 6902 // is the case, we are guaranteed the value will fit into the 53 bit 6903 // mantissa of an IEEE double-precision value without rounding.) 6904 // If any of those low 11 bits were not zero originally, make sure 6905 // bit 12 (value 2048) is set instead, so that the final rounding 6906 // to single-precision gets the correct result. 6907 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6908 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6909 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6910 Round, DAG.getConstant(2047, dl, MVT::i64)); 6911 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6912 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6913 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6914 6915 // However, we cannot use that value unconditionally: if the magnitude 6916 // of the input value is small, the bit-twiddling we did above might 6917 // end up visibly changing the output. Fortunately, in that case, we 6918 // don't need to twiddle bits since the original input will convert 6919 // exactly to double-precision floating-point already. Therefore, 6920 // construct a conditional to use the original value if the top 11 6921 // bits are all sign-bit copies, and use the rounded value computed 6922 // above otherwise. 6923 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6924 SINT, DAG.getConstant(53, dl, MVT::i32)); 6925 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6926 Cond, DAG.getConstant(1, dl, MVT::i64)); 6927 Cond = DAG.getSetCC(dl, MVT::i32, 6928 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6929 6930 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6931 } 6932 6933 ReuseLoadInfo RLI; 6934 SDValue Bits; 6935 6936 MachineFunction &MF = DAG.getMachineFunction(); 6937 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6938 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 6939 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 6940 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6941 } else if (Subtarget.hasLFIWAX() && 6942 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6943 MachineMemOperand *MMO = 6944 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6945 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6946 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6947 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6948 DAG.getVTList(MVT::f64, MVT::Other), 6949 Ops, MVT::i32, MMO); 6950 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6951 } else if (Subtarget.hasFPCVT() && 6952 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6953 MachineMemOperand *MMO = 6954 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6955 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6956 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6957 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6958 DAG.getVTList(MVT::f64, MVT::Other), 6959 Ops, MVT::i32, MMO); 6960 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6961 } else if (((Subtarget.hasLFIWAX() && 6962 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6963 (Subtarget.hasFPCVT() && 6964 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6965 SINT.getOperand(0).getValueType() == MVT::i32) { 6966 MachineFrameInfo &MFI = MF.getFrameInfo(); 6967 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6968 6969 int FrameIdx = MFI.CreateStackObject(4, 4, false); 6970 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6971 6972 SDValue Store = 6973 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6974 MachinePointerInfo::getFixedStack( 6975 DAG.getMachineFunction(), FrameIdx)); 6976 6977 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6978 "Expected an i32 store"); 6979 6980 RLI.Ptr = FIdx; 6981 RLI.Chain = Store; 6982 RLI.MPI = 6983 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6984 RLI.Alignment = 4; 6985 6986 MachineMemOperand *MMO = 6987 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6988 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6989 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6990 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6991 PPCISD::LFIWZX : PPCISD::LFIWAX, 6992 dl, DAG.getVTList(MVT::f64, MVT::Other), 6993 Ops, MVT::i32, MMO); 6994 } else 6995 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6996 6997 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6998 6999 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7000 FP = DAG.getNode(ISD::FP_ROUND, dl, 7001 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 7002 return FP; 7003 } 7004 7005 assert(Op.getOperand(0).getValueType() == MVT::i32 && 7006 "Unhandled INT_TO_FP type in custom expander!"); 7007 // Since we only generate this in 64-bit mode, we can take advantage of 7008 // 64-bit registers. In particular, sign extend the input value into the 7009 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 7010 // then lfd it and fcfid it. 7011 MachineFunction &MF = DAG.getMachineFunction(); 7012 MachineFrameInfo &MFI = MF.getFrameInfo(); 7013 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7014 7015 SDValue Ld; 7016 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 7017 ReuseLoadInfo RLI; 7018 bool ReusingLoad; 7019 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 7020 DAG))) { 7021 int FrameIdx = MFI.CreateStackObject(4, 4, false); 7022 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7023 7024 SDValue Store = 7025 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7026 MachinePointerInfo::getFixedStack( 7027 DAG.getMachineFunction(), FrameIdx)); 7028 7029 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 7030 "Expected an i32 store"); 7031 7032 RLI.Ptr = FIdx; 7033 RLI.Chain = Store; 7034 RLI.MPI = 7035 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7036 RLI.Alignment = 4; 7037 } 7038 7039 MachineMemOperand *MMO = 7040 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 7041 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 7042 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 7043 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 7044 PPCISD::LFIWZX : PPCISD::LFIWAX, 7045 dl, DAG.getVTList(MVT::f64, MVT::Other), 7046 Ops, MVT::i32, MMO); 7047 if (ReusingLoad) 7048 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 7049 } else { 7050 assert(Subtarget.isPPC64() && 7051 "i32->FP without LFIWAX supported only on PPC64"); 7052 7053 int FrameIdx = MFI.CreateStackObject(8, 8, false); 7054 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7055 7056 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 7057 Op.getOperand(0)); 7058 7059 // STD the extended value into the stack slot. 7060 SDValue Store = DAG.getStore( 7061 DAG.getEntryNode(), dl, Ext64, FIdx, 7062 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7063 7064 // Load the value as a double. 7065 Ld = DAG.getLoad( 7066 MVT::f64, dl, Store, FIdx, 7067 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 7068 } 7069 7070 // FCFID it and return it. 7071 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 7072 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 7073 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 7074 DAG.getIntPtrConstant(0, dl)); 7075 return FP; 7076 } 7077 7078 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 7079 SelectionDAG &DAG) const { 7080 SDLoc dl(Op); 7081 /* 7082 The rounding mode is in bits 30:31 of FPSR, and has the following 7083 settings: 7084 00 Round to nearest 7085 01 Round to 0 7086 10 Round to +inf 7087 11 Round to -inf 7088 7089 FLT_ROUNDS, on the other hand, expects the following: 7090 -1 Undefined 7091 0 Round to 0 7092 1 Round to nearest 7093 2 Round to +inf 7094 3 Round to -inf 7095 7096 To perform the conversion, we do: 7097 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7098 */ 7099 7100 MachineFunction &MF = DAG.getMachineFunction(); 7101 EVT VT = Op.getValueType(); 7102 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7103 7104 // Save FP Control Word to register 7105 EVT NodeTys[] = { 7106 MVT::f64, // return register 7107 MVT::Glue // unused in this context 7108 }; 7109 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7110 7111 // Save FP register to stack slot 7112 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7113 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7114 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7115 MachinePointerInfo()); 7116 7117 // Load FP Control Word from low 32 bits of stack slot. 7118 SDValue Four = DAG.getConstant(4, dl, PtrVT); 7119 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 7120 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 7121 7122 // Transform as necessary 7123 SDValue CWD1 = 7124 DAG.getNode(ISD::AND, dl, MVT::i32, 7125 CWD, DAG.getConstant(3, dl, MVT::i32)); 7126 SDValue CWD2 = 7127 DAG.getNode(ISD::SRL, dl, MVT::i32, 7128 DAG.getNode(ISD::AND, dl, MVT::i32, 7129 DAG.getNode(ISD::XOR, dl, MVT::i32, 7130 CWD, DAG.getConstant(3, dl, MVT::i32)), 7131 DAG.getConstant(3, dl, MVT::i32)), 7132 DAG.getConstant(1, dl, MVT::i32)); 7133 7134 SDValue RetVal = 7135 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 7136 7137 return DAG.getNode((VT.getSizeInBits() < 16 ? 7138 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 7139 } 7140 7141 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7142 EVT VT = Op.getValueType(); 7143 unsigned BitWidth = VT.getSizeInBits(); 7144 SDLoc dl(Op); 7145 assert(Op.getNumOperands() == 3 && 7146 VT == Op.getOperand(1).getValueType() && 7147 "Unexpected SHL!"); 7148 7149 // Expand into a bunch of logical ops. Note that these ops 7150 // depend on the PPC behavior for oversized shift amounts. 7151 SDValue Lo = Op.getOperand(0); 7152 SDValue Hi = Op.getOperand(1); 7153 SDValue Amt = Op.getOperand(2); 7154 EVT AmtVT = Amt.getValueType(); 7155 7156 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7157 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7158 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 7159 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 7160 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 7161 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7162 DAG.getConstant(-BitWidth, dl, AmtVT)); 7163 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 7164 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7165 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 7166 SDValue OutOps[] = { OutLo, OutHi }; 7167 return DAG.getMergeValues(OutOps, dl); 7168 } 7169 7170 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7171 EVT VT = Op.getValueType(); 7172 SDLoc dl(Op); 7173 unsigned BitWidth = VT.getSizeInBits(); 7174 assert(Op.getNumOperands() == 3 && 7175 VT == Op.getOperand(1).getValueType() && 7176 "Unexpected SRL!"); 7177 7178 // Expand into a bunch of logical ops. Note that these ops 7179 // depend on the PPC behavior for oversized shift amounts. 7180 SDValue Lo = Op.getOperand(0); 7181 SDValue Hi = Op.getOperand(1); 7182 SDValue Amt = Op.getOperand(2); 7183 EVT AmtVT = Amt.getValueType(); 7184 7185 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7186 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7187 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7188 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7189 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7190 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7191 DAG.getConstant(-BitWidth, dl, AmtVT)); 7192 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 7193 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7194 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 7195 SDValue OutOps[] = { OutLo, OutHi }; 7196 return DAG.getMergeValues(OutOps, dl); 7197 } 7198 7199 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 7200 SDLoc dl(Op); 7201 EVT VT = Op.getValueType(); 7202 unsigned BitWidth = VT.getSizeInBits(); 7203 assert(Op.getNumOperands() == 3 && 7204 VT == Op.getOperand(1).getValueType() && 7205 "Unexpected SRA!"); 7206 7207 // Expand into a bunch of logical ops, followed by a select_cc. 7208 SDValue Lo = Op.getOperand(0); 7209 SDValue Hi = Op.getOperand(1); 7210 SDValue Amt = Op.getOperand(2); 7211 EVT AmtVT = Amt.getValueType(); 7212 7213 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7214 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7215 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7216 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7217 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7218 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7219 DAG.getConstant(-BitWidth, dl, AmtVT)); 7220 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 7221 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 7222 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 7223 Tmp4, Tmp6, ISD::SETLE); 7224 SDValue OutOps[] = { OutLo, OutHi }; 7225 return DAG.getMergeValues(OutOps, dl); 7226 } 7227 7228 //===----------------------------------------------------------------------===// 7229 // Vector related lowering. 7230 // 7231 7232 /// BuildSplatI - Build a canonical splati of Val with an element size of 7233 /// SplatSize. Cast the result to VT. 7234 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7235 SelectionDAG &DAG, const SDLoc &dl) { 7236 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7237 7238 static const MVT VTys[] = { // canonical VT to use for each size. 7239 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7240 }; 7241 7242 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7243 7244 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7245 if (Val == -1) 7246 SplatSize = 1; 7247 7248 EVT CanonicalVT = VTys[SplatSize-1]; 7249 7250 // Build a canonical splat for this value. 7251 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7252 } 7253 7254 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7255 /// specified intrinsic ID. 7256 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7257 const SDLoc &dl, EVT DestVT = MVT::Other) { 7258 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7259 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7260 DAG.getConstant(IID, dl, MVT::i32), Op); 7261 } 7262 7263 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7264 /// specified intrinsic ID. 7265 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7266 SelectionDAG &DAG, const SDLoc &dl, 7267 EVT DestVT = MVT::Other) { 7268 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7269 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7270 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7271 } 7272 7273 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7274 /// specified intrinsic ID. 7275 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7276 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7277 EVT DestVT = MVT::Other) { 7278 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7279 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7280 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7281 } 7282 7283 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7284 /// amount. The result has the specified value type. 7285 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7286 SelectionDAG &DAG, const SDLoc &dl) { 7287 // Force LHS/RHS to be the right type. 7288 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7289 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7290 7291 int Ops[16]; 7292 for (unsigned i = 0; i != 16; ++i) 7293 Ops[i] = i + Amt; 7294 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7295 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7296 } 7297 7298 /// Do we have an efficient pattern in a .td file for this node? 7299 /// 7300 /// \param V - pointer to the BuildVectorSDNode being matched 7301 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 7302 /// 7303 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 7304 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 7305 /// the opposite is true (expansion is beneficial) are: 7306 /// - The node builds a vector out of integers that are not 32 or 64-bits 7307 /// - The node builds a vector out of constants 7308 /// - The node is a "load-and-splat" 7309 /// In all other cases, we will choose to keep the BUILD_VECTOR. 7310 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 7311 bool HasDirectMove) { 7312 EVT VecVT = V->getValueType(0); 7313 bool RightType = VecVT == MVT::v2f64 || VecVT == MVT::v4f32 || 7314 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 7315 if (!RightType) 7316 return false; 7317 7318 bool IsSplat = true; 7319 bool IsLoad = false; 7320 SDValue Op0 = V->getOperand(0); 7321 7322 // This function is called in a block that confirms the node is not a constant 7323 // splat. So a constant BUILD_VECTOR here means the vector is built out of 7324 // different constants. 7325 if (V->isConstant()) 7326 return false; 7327 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 7328 if (V->getOperand(i).isUndef()) 7329 return false; 7330 // We want to expand nodes that represent load-and-splat even if the 7331 // loaded value is a floating point truncation or conversion to int. 7332 if (V->getOperand(i).getOpcode() == ISD::LOAD || 7333 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 7334 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7335 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 7336 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7337 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 7338 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 7339 IsLoad = true; 7340 // If the operands are different or the input is not a load and has more 7341 // uses than just this BV node, then it isn't a splat. 7342 if (V->getOperand(i) != Op0 || 7343 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 7344 IsSplat = false; 7345 } 7346 return !(IsSplat && IsLoad); 7347 } 7348 7349 // If this is a case we can't handle, return null and let the default 7350 // expansion code take care of it. If we CAN select this case, and if it 7351 // selects to a single instruction, return Op. Otherwise, if we can codegen 7352 // this case more efficiently than a constant pool load, lower it to the 7353 // sequence of ops that should be used. 7354 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7355 SelectionDAG &DAG) const { 7356 SDLoc dl(Op); 7357 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7358 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7359 7360 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7361 // We first build an i32 vector, load it into a QPX register, 7362 // then convert it to a floating-point vector and compare it 7363 // to a zero vector to get the boolean result. 7364 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7365 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7366 MachinePointerInfo PtrInfo = 7367 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7368 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7369 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7370 7371 assert(BVN->getNumOperands() == 4 && 7372 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7373 7374 bool IsConst = true; 7375 for (unsigned i = 0; i < 4; ++i) { 7376 if (BVN->getOperand(i).isUndef()) continue; 7377 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7378 IsConst = false; 7379 break; 7380 } 7381 } 7382 7383 if (IsConst) { 7384 Constant *One = 7385 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7386 Constant *NegOne = 7387 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7388 7389 Constant *CV[4]; 7390 for (unsigned i = 0; i < 4; ++i) { 7391 if (BVN->getOperand(i).isUndef()) 7392 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7393 else if (isNullConstant(BVN->getOperand(i))) 7394 CV[i] = NegOne; 7395 else 7396 CV[i] = One; 7397 } 7398 7399 Constant *CP = ConstantVector::get(CV); 7400 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7401 16 /* alignment */); 7402 7403 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7404 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7405 return DAG.getMemIntrinsicNode( 7406 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7407 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7408 } 7409 7410 SmallVector<SDValue, 4> Stores; 7411 for (unsigned i = 0; i < 4; ++i) { 7412 if (BVN->getOperand(i).isUndef()) continue; 7413 7414 unsigned Offset = 4*i; 7415 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7416 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7417 7418 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7419 if (StoreSize > 4) { 7420 Stores.push_back( 7421 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7422 PtrInfo.getWithOffset(Offset), MVT::i32)); 7423 } else { 7424 SDValue StoreValue = BVN->getOperand(i); 7425 if (StoreSize < 4) 7426 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7427 7428 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7429 PtrInfo.getWithOffset(Offset))); 7430 } 7431 } 7432 7433 SDValue StoreChain; 7434 if (!Stores.empty()) 7435 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7436 else 7437 StoreChain = DAG.getEntryNode(); 7438 7439 // Now load from v4i32 into the QPX register; this will extend it to 7440 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7441 // is typed as v4f64 because the QPX register integer states are not 7442 // explicitly represented. 7443 7444 SDValue Ops[] = {StoreChain, 7445 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7446 FIdx}; 7447 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7448 7449 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7450 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7451 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7452 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7453 LoadedVect); 7454 7455 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7456 7457 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7458 } 7459 7460 // All other QPX vectors are handled by generic code. 7461 if (Subtarget.hasQPX()) 7462 return SDValue(); 7463 7464 // Check if this is a splat of a constant value. 7465 APInt APSplatBits, APSplatUndef; 7466 unsigned SplatBitSize; 7467 bool HasAnyUndefs; 7468 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7469 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7470 SplatBitSize > 32) { 7471 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 7472 // lowered to VSX instructions under certain conditions. 7473 // Without VSX, there is no pattern more efficient than expanding the node. 7474 if (Subtarget.hasVSX() && 7475 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove())) 7476 return Op; 7477 return SDValue(); 7478 } 7479 7480 unsigned SplatBits = APSplatBits.getZExtValue(); 7481 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7482 unsigned SplatSize = SplatBitSize / 8; 7483 7484 // First, handle single instruction cases. 7485 7486 // All zeros? 7487 if (SplatBits == 0) { 7488 // Canonicalize all zero vectors to be v4i32. 7489 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7490 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7491 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7492 } 7493 return Op; 7494 } 7495 7496 // We have XXSPLTIB for constant splats one byte wide 7497 if (Subtarget.hasP9Vector() && SplatSize == 1) { 7498 // This is a splat of 1-byte elements with some elements potentially undef. 7499 // Rather than trying to match undef in the SDAG patterns, ensure that all 7500 // elements are the same constant. 7501 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 7502 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 7503 dl, MVT::i32)); 7504 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 7505 if (Op.getValueType() != MVT::v16i8) 7506 return DAG.getBitcast(Op.getValueType(), NewBV); 7507 return NewBV; 7508 } 7509 return Op; 7510 } 7511 7512 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7513 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7514 (32-SplatBitSize)); 7515 if (SextVal >= -16 && SextVal <= 15) 7516 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7517 7518 // Two instruction sequences. 7519 7520 // If this value is in the range [-32,30] and is even, use: 7521 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7522 // If this value is in the range [17,31] and is odd, use: 7523 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7524 // If this value is in the range [-31,-17] and is odd, use: 7525 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7526 // Note the last two are three-instruction sequences. 7527 if (SextVal >= -32 && SextVal <= 31) { 7528 // To avoid having these optimizations undone by constant folding, 7529 // we convert to a pseudo that will be expanded later into one of 7530 // the above forms. 7531 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7532 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7533 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7534 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7535 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7536 if (VT == Op.getValueType()) 7537 return RetVal; 7538 else 7539 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7540 } 7541 7542 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7543 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7544 // for fneg/fabs. 7545 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7546 // Make -1 and vspltisw -1: 7547 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7548 7549 // Make the VSLW intrinsic, computing 0x8000_0000. 7550 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7551 OnesV, DAG, dl); 7552 7553 // xor by OnesV to invert it. 7554 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7555 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7556 } 7557 7558 // Check to see if this is a wide variety of vsplti*, binop self cases. 7559 static const signed char SplatCsts[] = { 7560 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7561 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7562 }; 7563 7564 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7565 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7566 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7567 int i = SplatCsts[idx]; 7568 7569 // Figure out what shift amount will be used by altivec if shifted by i in 7570 // this splat size. 7571 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7572 7573 // vsplti + shl self. 7574 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7575 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7576 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7577 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7578 Intrinsic::ppc_altivec_vslw 7579 }; 7580 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7581 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7582 } 7583 7584 // vsplti + srl self. 7585 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7586 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7587 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7588 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7589 Intrinsic::ppc_altivec_vsrw 7590 }; 7591 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7592 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7593 } 7594 7595 // vsplti + sra self. 7596 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7597 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7598 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7599 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7600 Intrinsic::ppc_altivec_vsraw 7601 }; 7602 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7603 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7604 } 7605 7606 // vsplti + rol self. 7607 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7608 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7609 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7610 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7611 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7612 Intrinsic::ppc_altivec_vrlw 7613 }; 7614 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7615 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7616 } 7617 7618 // t = vsplti c, result = vsldoi t, t, 1 7619 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7620 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7621 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7622 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7623 } 7624 // t = vsplti c, result = vsldoi t, t, 2 7625 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7626 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7627 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7628 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7629 } 7630 // t = vsplti c, result = vsldoi t, t, 3 7631 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7632 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7633 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7634 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7635 } 7636 } 7637 7638 return SDValue(); 7639 } 7640 7641 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7642 /// the specified operations to build the shuffle. 7643 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7644 SDValue RHS, SelectionDAG &DAG, 7645 const SDLoc &dl) { 7646 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7647 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7648 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7649 7650 enum { 7651 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7652 OP_VMRGHW, 7653 OP_VMRGLW, 7654 OP_VSPLTISW0, 7655 OP_VSPLTISW1, 7656 OP_VSPLTISW2, 7657 OP_VSPLTISW3, 7658 OP_VSLDOI4, 7659 OP_VSLDOI8, 7660 OP_VSLDOI12 7661 }; 7662 7663 if (OpNum == OP_COPY) { 7664 if (LHSID == (1*9+2)*9+3) return LHS; 7665 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7666 return RHS; 7667 } 7668 7669 SDValue OpLHS, OpRHS; 7670 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7671 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7672 7673 int ShufIdxs[16]; 7674 switch (OpNum) { 7675 default: llvm_unreachable("Unknown i32 permute!"); 7676 case OP_VMRGHW: 7677 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7678 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7679 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7680 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7681 break; 7682 case OP_VMRGLW: 7683 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7684 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7685 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7686 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7687 break; 7688 case OP_VSPLTISW0: 7689 for (unsigned i = 0; i != 16; ++i) 7690 ShufIdxs[i] = (i&3)+0; 7691 break; 7692 case OP_VSPLTISW1: 7693 for (unsigned i = 0; i != 16; ++i) 7694 ShufIdxs[i] = (i&3)+4; 7695 break; 7696 case OP_VSPLTISW2: 7697 for (unsigned i = 0; i != 16; ++i) 7698 ShufIdxs[i] = (i&3)+8; 7699 break; 7700 case OP_VSPLTISW3: 7701 for (unsigned i = 0; i != 16; ++i) 7702 ShufIdxs[i] = (i&3)+12; 7703 break; 7704 case OP_VSLDOI4: 7705 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7706 case OP_VSLDOI8: 7707 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7708 case OP_VSLDOI12: 7709 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7710 } 7711 EVT VT = OpLHS.getValueType(); 7712 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7713 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7714 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7715 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7716 } 7717 7718 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7719 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7720 /// return the code it can be lowered into. Worst case, it can always be 7721 /// lowered into a vperm. 7722 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7723 SelectionDAG &DAG) const { 7724 SDLoc dl(Op); 7725 SDValue V1 = Op.getOperand(0); 7726 SDValue V2 = Op.getOperand(1); 7727 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7728 EVT VT = Op.getValueType(); 7729 bool isLittleEndian = Subtarget.isLittleEndian(); 7730 7731 unsigned ShiftElts, InsertAtByte; 7732 bool Swap; 7733 if (Subtarget.hasP9Vector() && 7734 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 7735 isLittleEndian)) { 7736 if (Swap) 7737 std::swap(V1, V2); 7738 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7739 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 7740 if (ShiftElts) { 7741 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 7742 DAG.getConstant(ShiftElts, dl, MVT::i32)); 7743 SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Shl, 7744 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 7745 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 7746 } 7747 SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Conv2, 7748 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 7749 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 7750 } 7751 7752 7753 if (Subtarget.hasVSX() && 7754 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 7755 if (Swap) 7756 std::swap(V1, V2); 7757 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7758 SDValue Conv2 = 7759 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 7760 7761 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 7762 DAG.getConstant(ShiftElts, dl, MVT::i32)); 7763 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 7764 } 7765 7766 if (Subtarget.hasVSX()) { 7767 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 7768 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 7769 7770 // If the source for the shuffle is a scalar_to_vector that came from a 7771 // 32-bit load, it will have used LXVWSX so we don't need to splat again. 7772 if (Subtarget.hasP9Vector() && 7773 ((isLittleEndian && SplatIdx == 3) || 7774 (!isLittleEndian && SplatIdx == 0))) { 7775 SDValue Src = V1.getOperand(0); 7776 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR && 7777 Src.getOperand(0).getOpcode() == ISD::LOAD && 7778 Src.getOperand(0).hasOneUse()) 7779 return V1; 7780 } 7781 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7782 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 7783 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7784 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 7785 } 7786 7787 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 7788 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 7789 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 7790 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 7791 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 7792 } 7793 } 7794 7795 if (Subtarget.hasQPX()) { 7796 if (VT.getVectorNumElements() != 4) 7797 return SDValue(); 7798 7799 if (V2.isUndef()) V2 = V1; 7800 7801 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 7802 if (AlignIdx != -1) { 7803 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7804 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7805 } else if (SVOp->isSplat()) { 7806 int SplatIdx = SVOp->getSplatIndex(); 7807 if (SplatIdx >= 4) { 7808 std::swap(V1, V2); 7809 SplatIdx -= 4; 7810 } 7811 7812 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7813 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7814 } 7815 7816 // Lower this into a qvgpci/qvfperm pair. 7817 7818 // Compute the qvgpci literal 7819 unsigned idx = 0; 7820 for (unsigned i = 0; i < 4; ++i) { 7821 int m = SVOp->getMaskElt(i); 7822 unsigned mm = m >= 0 ? (unsigned) m : i; 7823 idx |= mm << (3-i)*3; 7824 } 7825 7826 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7827 DAG.getConstant(idx, dl, MVT::i32)); 7828 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7829 } 7830 7831 // Cases that are handled by instructions that take permute immediates 7832 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7833 // selected by the instruction selector. 7834 if (V2.isUndef()) { 7835 if (PPC::isSplatShuffleMask(SVOp, 1) || 7836 PPC::isSplatShuffleMask(SVOp, 2) || 7837 PPC::isSplatShuffleMask(SVOp, 4) || 7838 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7839 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7840 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7841 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7842 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7843 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7844 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7845 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7846 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 7847 (Subtarget.hasP8Altivec() && ( 7848 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7849 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 7850 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 7851 return Op; 7852 } 7853 } 7854 7855 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7856 // and produce a fixed permutation. If any of these match, do not lower to 7857 // VPERM. 7858 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7859 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7860 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7861 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7862 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7863 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7864 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7865 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7866 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7867 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7868 (Subtarget.hasP8Altivec() && ( 7869 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7870 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 7871 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 7872 return Op; 7873 7874 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7875 // perfect shuffle table to emit an optimal matching sequence. 7876 ArrayRef<int> PermMask = SVOp->getMask(); 7877 7878 unsigned PFIndexes[4]; 7879 bool isFourElementShuffle = true; 7880 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7881 unsigned EltNo = 8; // Start out undef. 7882 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7883 if (PermMask[i*4+j] < 0) 7884 continue; // Undef, ignore it. 7885 7886 unsigned ByteSource = PermMask[i*4+j]; 7887 if ((ByteSource & 3) != j) { 7888 isFourElementShuffle = false; 7889 break; 7890 } 7891 7892 if (EltNo == 8) { 7893 EltNo = ByteSource/4; 7894 } else if (EltNo != ByteSource/4) { 7895 isFourElementShuffle = false; 7896 break; 7897 } 7898 } 7899 PFIndexes[i] = EltNo; 7900 } 7901 7902 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7903 // perfect shuffle vector to determine if it is cost effective to do this as 7904 // discrete instructions, or whether we should use a vperm. 7905 // For now, we skip this for little endian until such time as we have a 7906 // little-endian perfect shuffle table. 7907 if (isFourElementShuffle && !isLittleEndian) { 7908 // Compute the index in the perfect shuffle table. 7909 unsigned PFTableIndex = 7910 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7911 7912 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7913 unsigned Cost = (PFEntry >> 30); 7914 7915 // Determining when to avoid vperm is tricky. Many things affect the cost 7916 // of vperm, particularly how many times the perm mask needs to be computed. 7917 // For example, if the perm mask can be hoisted out of a loop or is already 7918 // used (perhaps because there are multiple permutes with the same shuffle 7919 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7920 // the loop requires an extra register. 7921 // 7922 // As a compromise, we only emit discrete instructions if the shuffle can be 7923 // generated in 3 or fewer operations. When we have loop information 7924 // available, if this block is within a loop, we should avoid using vperm 7925 // for 3-operation perms and use a constant pool load instead. 7926 if (Cost < 3) 7927 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7928 } 7929 7930 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7931 // vector that will get spilled to the constant pool. 7932 if (V2.isUndef()) V2 = V1; 7933 7934 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7935 // that it is in input element units, not in bytes. Convert now. 7936 7937 // For little endian, the order of the input vectors is reversed, and 7938 // the permutation mask is complemented with respect to 31. This is 7939 // necessary to produce proper semantics with the big-endian-biased vperm 7940 // instruction. 7941 EVT EltVT = V1.getValueType().getVectorElementType(); 7942 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7943 7944 SmallVector<SDValue, 16> ResultMask; 7945 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7946 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7947 7948 for (unsigned j = 0; j != BytesPerElement; ++j) 7949 if (isLittleEndian) 7950 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7951 dl, MVT::i32)); 7952 else 7953 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7954 MVT::i32)); 7955 } 7956 7957 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 7958 if (isLittleEndian) 7959 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7960 V2, V1, VPermMask); 7961 else 7962 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7963 V1, V2, VPermMask); 7964 } 7965 7966 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 7967 /// vector comparison. If it is, return true and fill in Opc/isDot with 7968 /// information about the intrinsic. 7969 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 7970 bool &isDot, const PPCSubtarget &Subtarget) { 7971 unsigned IntrinsicID = 7972 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7973 CompareOpc = -1; 7974 isDot = false; 7975 switch (IntrinsicID) { 7976 default: 7977 return false; 7978 // Comparison predicates. 7979 case Intrinsic::ppc_altivec_vcmpbfp_p: 7980 CompareOpc = 966; 7981 isDot = true; 7982 break; 7983 case Intrinsic::ppc_altivec_vcmpeqfp_p: 7984 CompareOpc = 198; 7985 isDot = true; 7986 break; 7987 case Intrinsic::ppc_altivec_vcmpequb_p: 7988 CompareOpc = 6; 7989 isDot = true; 7990 break; 7991 case Intrinsic::ppc_altivec_vcmpequh_p: 7992 CompareOpc = 70; 7993 isDot = true; 7994 break; 7995 case Intrinsic::ppc_altivec_vcmpequw_p: 7996 CompareOpc = 134; 7997 isDot = true; 7998 break; 7999 case Intrinsic::ppc_altivec_vcmpequd_p: 8000 if (Subtarget.hasP8Altivec()) { 8001 CompareOpc = 199; 8002 isDot = true; 8003 } else 8004 return false; 8005 break; 8006 case Intrinsic::ppc_altivec_vcmpneb_p: 8007 case Intrinsic::ppc_altivec_vcmpneh_p: 8008 case Intrinsic::ppc_altivec_vcmpnew_p: 8009 case Intrinsic::ppc_altivec_vcmpnezb_p: 8010 case Intrinsic::ppc_altivec_vcmpnezh_p: 8011 case Intrinsic::ppc_altivec_vcmpnezw_p: 8012 if (Subtarget.hasP9Altivec()) { 8013 switch (IntrinsicID) { 8014 default: 8015 llvm_unreachable("Unknown comparison intrinsic."); 8016 case Intrinsic::ppc_altivec_vcmpneb_p: 8017 CompareOpc = 7; 8018 break; 8019 case Intrinsic::ppc_altivec_vcmpneh_p: 8020 CompareOpc = 71; 8021 break; 8022 case Intrinsic::ppc_altivec_vcmpnew_p: 8023 CompareOpc = 135; 8024 break; 8025 case Intrinsic::ppc_altivec_vcmpnezb_p: 8026 CompareOpc = 263; 8027 break; 8028 case Intrinsic::ppc_altivec_vcmpnezh_p: 8029 CompareOpc = 327; 8030 break; 8031 case Intrinsic::ppc_altivec_vcmpnezw_p: 8032 CompareOpc = 391; 8033 break; 8034 } 8035 isDot = true; 8036 } else 8037 return false; 8038 break; 8039 case Intrinsic::ppc_altivec_vcmpgefp_p: 8040 CompareOpc = 454; 8041 isDot = true; 8042 break; 8043 case Intrinsic::ppc_altivec_vcmpgtfp_p: 8044 CompareOpc = 710; 8045 isDot = true; 8046 break; 8047 case Intrinsic::ppc_altivec_vcmpgtsb_p: 8048 CompareOpc = 774; 8049 isDot = true; 8050 break; 8051 case Intrinsic::ppc_altivec_vcmpgtsh_p: 8052 CompareOpc = 838; 8053 isDot = true; 8054 break; 8055 case Intrinsic::ppc_altivec_vcmpgtsw_p: 8056 CompareOpc = 902; 8057 isDot = true; 8058 break; 8059 case Intrinsic::ppc_altivec_vcmpgtsd_p: 8060 if (Subtarget.hasP8Altivec()) { 8061 CompareOpc = 967; 8062 isDot = true; 8063 } else 8064 return false; 8065 break; 8066 case Intrinsic::ppc_altivec_vcmpgtub_p: 8067 CompareOpc = 518; 8068 isDot = true; 8069 break; 8070 case Intrinsic::ppc_altivec_vcmpgtuh_p: 8071 CompareOpc = 582; 8072 isDot = true; 8073 break; 8074 case Intrinsic::ppc_altivec_vcmpgtuw_p: 8075 CompareOpc = 646; 8076 isDot = true; 8077 break; 8078 case Intrinsic::ppc_altivec_vcmpgtud_p: 8079 if (Subtarget.hasP8Altivec()) { 8080 CompareOpc = 711; 8081 isDot = true; 8082 } else 8083 return false; 8084 break; 8085 8086 // VSX predicate comparisons use the same infrastructure 8087 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8088 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8089 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8090 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8091 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8092 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8093 if (Subtarget.hasVSX()) { 8094 switch (IntrinsicID) { 8095 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 8096 CompareOpc = 99; 8097 break; 8098 case Intrinsic::ppc_vsx_xvcmpgedp_p: 8099 CompareOpc = 115; 8100 break; 8101 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 8102 CompareOpc = 107; 8103 break; 8104 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8105 CompareOpc = 67; 8106 break; 8107 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8108 CompareOpc = 83; 8109 break; 8110 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8111 CompareOpc = 75; 8112 break; 8113 } 8114 isDot = true; 8115 } else 8116 return false; 8117 break; 8118 8119 // Normal Comparisons. 8120 case Intrinsic::ppc_altivec_vcmpbfp: 8121 CompareOpc = 966; 8122 break; 8123 case Intrinsic::ppc_altivec_vcmpeqfp: 8124 CompareOpc = 198; 8125 break; 8126 case Intrinsic::ppc_altivec_vcmpequb: 8127 CompareOpc = 6; 8128 break; 8129 case Intrinsic::ppc_altivec_vcmpequh: 8130 CompareOpc = 70; 8131 break; 8132 case Intrinsic::ppc_altivec_vcmpequw: 8133 CompareOpc = 134; 8134 break; 8135 case Intrinsic::ppc_altivec_vcmpequd: 8136 if (Subtarget.hasP8Altivec()) 8137 CompareOpc = 199; 8138 else 8139 return false; 8140 break; 8141 case Intrinsic::ppc_altivec_vcmpneb: 8142 case Intrinsic::ppc_altivec_vcmpneh: 8143 case Intrinsic::ppc_altivec_vcmpnew: 8144 case Intrinsic::ppc_altivec_vcmpnezb: 8145 case Intrinsic::ppc_altivec_vcmpnezh: 8146 case Intrinsic::ppc_altivec_vcmpnezw: 8147 if (Subtarget.hasP9Altivec()) 8148 switch (IntrinsicID) { 8149 default: 8150 llvm_unreachable("Unknown comparison intrinsic."); 8151 case Intrinsic::ppc_altivec_vcmpneb: 8152 CompareOpc = 7; 8153 break; 8154 case Intrinsic::ppc_altivec_vcmpneh: 8155 CompareOpc = 71; 8156 break; 8157 case Intrinsic::ppc_altivec_vcmpnew: 8158 CompareOpc = 135; 8159 break; 8160 case Intrinsic::ppc_altivec_vcmpnezb: 8161 CompareOpc = 263; 8162 break; 8163 case Intrinsic::ppc_altivec_vcmpnezh: 8164 CompareOpc = 327; 8165 break; 8166 case Intrinsic::ppc_altivec_vcmpnezw: 8167 CompareOpc = 391; 8168 break; 8169 } 8170 else 8171 return false; 8172 break; 8173 case Intrinsic::ppc_altivec_vcmpgefp: 8174 CompareOpc = 454; 8175 break; 8176 case Intrinsic::ppc_altivec_vcmpgtfp: 8177 CompareOpc = 710; 8178 break; 8179 case Intrinsic::ppc_altivec_vcmpgtsb: 8180 CompareOpc = 774; 8181 break; 8182 case Intrinsic::ppc_altivec_vcmpgtsh: 8183 CompareOpc = 838; 8184 break; 8185 case Intrinsic::ppc_altivec_vcmpgtsw: 8186 CompareOpc = 902; 8187 break; 8188 case Intrinsic::ppc_altivec_vcmpgtsd: 8189 if (Subtarget.hasP8Altivec()) 8190 CompareOpc = 967; 8191 else 8192 return false; 8193 break; 8194 case Intrinsic::ppc_altivec_vcmpgtub: 8195 CompareOpc = 518; 8196 break; 8197 case Intrinsic::ppc_altivec_vcmpgtuh: 8198 CompareOpc = 582; 8199 break; 8200 case Intrinsic::ppc_altivec_vcmpgtuw: 8201 CompareOpc = 646; 8202 break; 8203 case Intrinsic::ppc_altivec_vcmpgtud: 8204 if (Subtarget.hasP8Altivec()) 8205 CompareOpc = 711; 8206 else 8207 return false; 8208 break; 8209 } 8210 return true; 8211 } 8212 8213 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 8214 /// lower, do it, otherwise return null. 8215 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 8216 SelectionDAG &DAG) const { 8217 unsigned IntrinsicID = 8218 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8219 8220 if (IntrinsicID == Intrinsic::thread_pointer) { 8221 // Reads the thread pointer register, used for __builtin_thread_pointer. 8222 bool is64bit = Subtarget.isPPC64(); 8223 return DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 8224 is64bit ? MVT::i64 : MVT::i32); 8225 } 8226 8227 // If this is a lowered altivec predicate compare, CompareOpc is set to the 8228 // opcode number of the comparison. 8229 SDLoc dl(Op); 8230 int CompareOpc; 8231 bool isDot; 8232 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 8233 return SDValue(); // Don't custom lower most intrinsics. 8234 8235 // If this is a non-dot comparison, make the VCMP node and we are done. 8236 if (!isDot) { 8237 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 8238 Op.getOperand(1), Op.getOperand(2), 8239 DAG.getConstant(CompareOpc, dl, MVT::i32)); 8240 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 8241 } 8242 8243 // Create the PPCISD altivec 'dot' comparison node. 8244 SDValue Ops[] = { 8245 Op.getOperand(2), // LHS 8246 Op.getOperand(3), // RHS 8247 DAG.getConstant(CompareOpc, dl, MVT::i32) 8248 }; 8249 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 8250 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 8251 8252 // Now that we have the comparison, emit a copy from the CR to a GPR. 8253 // This is flagged to the above dot comparison. 8254 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 8255 DAG.getRegister(PPC::CR6, MVT::i32), 8256 CompNode.getValue(1)); 8257 8258 // Unpack the result based on how the target uses it. 8259 unsigned BitNo; // Bit # of CR6. 8260 bool InvertBit; // Invert result? 8261 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 8262 default: // Can't happen, don't crash on invalid number though. 8263 case 0: // Return the value of the EQ bit of CR6. 8264 BitNo = 0; InvertBit = false; 8265 break; 8266 case 1: // Return the inverted value of the EQ bit of CR6. 8267 BitNo = 0; InvertBit = true; 8268 break; 8269 case 2: // Return the value of the LT bit of CR6. 8270 BitNo = 2; InvertBit = false; 8271 break; 8272 case 3: // Return the inverted value of the LT bit of CR6. 8273 BitNo = 2; InvertBit = true; 8274 break; 8275 } 8276 8277 // Shift the bit into the low position. 8278 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 8279 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 8280 // Isolate the bit. 8281 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 8282 DAG.getConstant(1, dl, MVT::i32)); 8283 8284 // If we are supposed to, toggle the bit. 8285 if (InvertBit) 8286 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 8287 DAG.getConstant(1, dl, MVT::i32)); 8288 return Flags; 8289 } 8290 8291 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 8292 SelectionDAG &DAG) const { 8293 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 8294 // the beginning of the argument list. 8295 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 8296 SDLoc DL(Op); 8297 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 8298 case Intrinsic::ppc_cfence: { 8299 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 8300 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 8301 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 8302 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 8303 Op.getOperand(ArgStart + 1)), 8304 Op.getOperand(0)), 8305 0); 8306 } 8307 default: 8308 break; 8309 } 8310 return SDValue(); 8311 } 8312 8313 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 8314 SelectionDAG &DAG) const { 8315 SDLoc dl(Op); 8316 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 8317 // instructions), but for smaller types, we need to first extend up to v2i32 8318 // before doing going farther. 8319 if (Op.getValueType() == MVT::v2i64) { 8320 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 8321 if (ExtVT != MVT::v2i32) { 8322 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 8323 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 8324 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 8325 ExtVT.getVectorElementType(), 4))); 8326 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 8327 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 8328 DAG.getValueType(MVT::v2i32)); 8329 } 8330 8331 return Op; 8332 } 8333 8334 return SDValue(); 8335 } 8336 8337 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 8338 SelectionDAG &DAG) const { 8339 SDLoc dl(Op); 8340 // Create a stack slot that is 16-byte aligned. 8341 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8342 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8343 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8344 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8345 8346 // Store the input value into Value#0 of the stack slot. 8347 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 8348 MachinePointerInfo()); 8349 // Load it out. 8350 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 8351 } 8352 8353 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 8354 SelectionDAG &DAG) const { 8355 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 8356 "Should only be called for ISD::INSERT_VECTOR_ELT"); 8357 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 8358 // We have legal lowering for constant indices but not for variable ones. 8359 if (C) 8360 return Op; 8361 return SDValue(); 8362 } 8363 8364 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 8365 SelectionDAG &DAG) const { 8366 SDLoc dl(Op); 8367 SDNode *N = Op.getNode(); 8368 8369 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 8370 "Unknown extract_vector_elt type"); 8371 8372 SDValue Value = N->getOperand(0); 8373 8374 // The first part of this is like the store lowering except that we don't 8375 // need to track the chain. 8376 8377 // The values are now known to be -1 (false) or 1 (true). To convert this 8378 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8379 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8380 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8381 8382 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8383 // understand how to form the extending load. 8384 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8385 8386 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8387 8388 // Now convert to an integer and store. 8389 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8390 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8391 Value); 8392 8393 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8394 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8395 MachinePointerInfo PtrInfo = 8396 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8397 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8398 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8399 8400 SDValue StoreChain = DAG.getEntryNode(); 8401 SDValue Ops[] = {StoreChain, 8402 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8403 Value, FIdx}; 8404 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8405 8406 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8407 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8408 8409 // Extract the value requested. 8410 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 8411 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8412 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8413 8414 SDValue IntVal = 8415 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 8416 8417 if (!Subtarget.useCRBits()) 8418 return IntVal; 8419 8420 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 8421 } 8422 8423 /// Lowering for QPX v4i1 loads 8424 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 8425 SelectionDAG &DAG) const { 8426 SDLoc dl(Op); 8427 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 8428 SDValue LoadChain = LN->getChain(); 8429 SDValue BasePtr = LN->getBasePtr(); 8430 8431 if (Op.getValueType() == MVT::v4f64 || 8432 Op.getValueType() == MVT::v4f32) { 8433 EVT MemVT = LN->getMemoryVT(); 8434 unsigned Alignment = LN->getAlignment(); 8435 8436 // If this load is properly aligned, then it is legal. 8437 if (Alignment >= MemVT.getStoreSize()) 8438 return Op; 8439 8440 EVT ScalarVT = Op.getValueType().getScalarType(), 8441 ScalarMemVT = MemVT.getScalarType(); 8442 unsigned Stride = ScalarMemVT.getStoreSize(); 8443 8444 SDValue Vals[4], LoadChains[4]; 8445 for (unsigned Idx = 0; Idx < 4; ++Idx) { 8446 SDValue Load; 8447 if (ScalarVT != ScalarMemVT) 8448 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 8449 BasePtr, 8450 LN->getPointerInfo().getWithOffset(Idx * Stride), 8451 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 8452 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8453 else 8454 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 8455 LN->getPointerInfo().getWithOffset(Idx * Stride), 8456 MinAlign(Alignment, Idx * Stride), 8457 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8458 8459 if (Idx == 0 && LN->isIndexed()) { 8460 assert(LN->getAddressingMode() == ISD::PRE_INC && 8461 "Unknown addressing mode on vector load"); 8462 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 8463 LN->getAddressingMode()); 8464 } 8465 8466 Vals[Idx] = Load; 8467 LoadChains[Idx] = Load.getValue(1); 8468 8469 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8470 DAG.getConstant(Stride, dl, 8471 BasePtr.getValueType())); 8472 } 8473 8474 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8475 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 8476 8477 if (LN->isIndexed()) { 8478 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 8479 return DAG.getMergeValues(RetOps, dl); 8480 } 8481 8482 SDValue RetOps[] = { Value, TF }; 8483 return DAG.getMergeValues(RetOps, dl); 8484 } 8485 8486 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 8487 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 8488 8489 // To lower v4i1 from a byte array, we load the byte elements of the 8490 // vector and then reuse the BUILD_VECTOR logic. 8491 8492 SDValue VectElmts[4], VectElmtChains[4]; 8493 for (unsigned i = 0; i < 4; ++i) { 8494 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8495 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8496 8497 VectElmts[i] = DAG.getExtLoad( 8498 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 8499 LN->getPointerInfo().getWithOffset(i), MVT::i8, 8500 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8501 VectElmtChains[i] = VectElmts[i].getValue(1); 8502 } 8503 8504 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 8505 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 8506 8507 SDValue RVals[] = { Value, LoadChain }; 8508 return DAG.getMergeValues(RVals, dl); 8509 } 8510 8511 /// Lowering for QPX v4i1 stores 8512 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 8513 SelectionDAG &DAG) const { 8514 SDLoc dl(Op); 8515 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 8516 SDValue StoreChain = SN->getChain(); 8517 SDValue BasePtr = SN->getBasePtr(); 8518 SDValue Value = SN->getValue(); 8519 8520 if (Value.getValueType() == MVT::v4f64 || 8521 Value.getValueType() == MVT::v4f32) { 8522 EVT MemVT = SN->getMemoryVT(); 8523 unsigned Alignment = SN->getAlignment(); 8524 8525 // If this store is properly aligned, then it is legal. 8526 if (Alignment >= MemVT.getStoreSize()) 8527 return Op; 8528 8529 EVT ScalarVT = Value.getValueType().getScalarType(), 8530 ScalarMemVT = MemVT.getScalarType(); 8531 unsigned Stride = ScalarMemVT.getStoreSize(); 8532 8533 SDValue Stores[4]; 8534 for (unsigned Idx = 0; Idx < 4; ++Idx) { 8535 SDValue Ex = DAG.getNode( 8536 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 8537 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 8538 SDValue Store; 8539 if (ScalarVT != ScalarMemVT) 8540 Store = 8541 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 8542 SN->getPointerInfo().getWithOffset(Idx * Stride), 8543 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 8544 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 8545 else 8546 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 8547 SN->getPointerInfo().getWithOffset(Idx * Stride), 8548 MinAlign(Alignment, Idx * Stride), 8549 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 8550 8551 if (Idx == 0 && SN->isIndexed()) { 8552 assert(SN->getAddressingMode() == ISD::PRE_INC && 8553 "Unknown addressing mode on vector store"); 8554 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 8555 SN->getAddressingMode()); 8556 } 8557 8558 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8559 DAG.getConstant(Stride, dl, 8560 BasePtr.getValueType())); 8561 Stores[Idx] = Store; 8562 } 8563 8564 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8565 8566 if (SN->isIndexed()) { 8567 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 8568 return DAG.getMergeValues(RetOps, dl); 8569 } 8570 8571 return TF; 8572 } 8573 8574 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 8575 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 8576 8577 // The values are now known to be -1 (false) or 1 (true). To convert this 8578 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8579 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8580 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8581 8582 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8583 // understand how to form the extending load. 8584 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8585 8586 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8587 8588 // Now convert to an integer and store. 8589 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8590 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8591 Value); 8592 8593 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8594 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8595 MachinePointerInfo PtrInfo = 8596 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8597 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8598 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8599 8600 SDValue Ops[] = {StoreChain, 8601 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8602 Value, FIdx}; 8603 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8604 8605 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8606 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8607 8608 // Move data into the byte array. 8609 SDValue Loads[4], LoadChains[4]; 8610 for (unsigned i = 0; i < 4; ++i) { 8611 unsigned Offset = 4*i; 8612 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8613 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8614 8615 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 8616 PtrInfo.getWithOffset(Offset)); 8617 LoadChains[i] = Loads[i].getValue(1); 8618 } 8619 8620 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8621 8622 SDValue Stores[4]; 8623 for (unsigned i = 0; i < 4; ++i) { 8624 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8625 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8626 8627 Stores[i] = DAG.getTruncStore( 8628 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 8629 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 8630 SN->getAAInfo()); 8631 } 8632 8633 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8634 8635 return StoreChain; 8636 } 8637 8638 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 8639 SDLoc dl(Op); 8640 if (Op.getValueType() == MVT::v4i32) { 8641 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8642 8643 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 8644 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 8645 8646 SDValue RHSSwap = // = vrlw RHS, 16 8647 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 8648 8649 // Shrinkify inputs to v8i16. 8650 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 8651 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 8652 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 8653 8654 // Low parts multiplied together, generating 32-bit results (we ignore the 8655 // top parts). 8656 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 8657 LHS, RHS, DAG, dl, MVT::v4i32); 8658 8659 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 8660 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 8661 // Shift the high parts up 16 bits. 8662 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 8663 Neg16, DAG, dl); 8664 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 8665 } else if (Op.getValueType() == MVT::v8i16) { 8666 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8667 8668 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 8669 8670 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 8671 LHS, RHS, Zero, DAG, dl); 8672 } else if (Op.getValueType() == MVT::v16i8) { 8673 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8674 bool isLittleEndian = Subtarget.isLittleEndian(); 8675 8676 // Multiply the even 8-bit parts, producing 16-bit sums. 8677 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 8678 LHS, RHS, DAG, dl, MVT::v8i16); 8679 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 8680 8681 // Multiply the odd 8-bit parts, producing 16-bit sums. 8682 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 8683 LHS, RHS, DAG, dl, MVT::v8i16); 8684 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 8685 8686 // Merge the results together. Because vmuleub and vmuloub are 8687 // instructions with a big-endian bias, we must reverse the 8688 // element numbering and reverse the meaning of "odd" and "even" 8689 // when generating little endian code. 8690 int Ops[16]; 8691 for (unsigned i = 0; i != 8; ++i) { 8692 if (isLittleEndian) { 8693 Ops[i*2 ] = 2*i; 8694 Ops[i*2+1] = 2*i+16; 8695 } else { 8696 Ops[i*2 ] = 2*i+1; 8697 Ops[i*2+1] = 2*i+1+16; 8698 } 8699 } 8700 if (isLittleEndian) 8701 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 8702 else 8703 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 8704 } else { 8705 llvm_unreachable("Unknown mul to lower!"); 8706 } 8707 } 8708 8709 /// LowerOperation - Provide custom lowering hooks for some operations. 8710 /// 8711 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8712 switch (Op.getOpcode()) { 8713 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 8714 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 8715 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 8716 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 8717 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 8718 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 8719 case ISD::SETCC: return LowerSETCC(Op, DAG); 8720 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 8721 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 8722 case ISD::VASTART: 8723 return LowerVASTART(Op, DAG); 8724 8725 case ISD::VAARG: 8726 return LowerVAARG(Op, DAG); 8727 8728 case ISD::VACOPY: 8729 return LowerVACOPY(Op, DAG); 8730 8731 case ISD::STACKRESTORE: 8732 return LowerSTACKRESTORE(Op, DAG); 8733 8734 case ISD::DYNAMIC_STACKALLOC: 8735 return LowerDYNAMIC_STACKALLOC(Op, DAG); 8736 8737 case ISD::GET_DYNAMIC_AREA_OFFSET: 8738 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 8739 8740 case ISD::EH_DWARF_CFA: 8741 return LowerEH_DWARF_CFA(Op, DAG); 8742 8743 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 8744 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 8745 8746 case ISD::LOAD: return LowerLOAD(Op, DAG); 8747 case ISD::STORE: return LowerSTORE(Op, DAG); 8748 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 8749 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 8750 case ISD::FP_TO_UINT: 8751 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 8752 SDLoc(Op)); 8753 case ISD::UINT_TO_FP: 8754 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 8755 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 8756 8757 // Lower 64-bit shifts. 8758 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 8759 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 8760 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 8761 8762 // Vector-related lowering. 8763 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 8764 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 8765 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 8766 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 8767 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 8768 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 8769 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 8770 case ISD::MUL: return LowerMUL(Op, DAG); 8771 8772 // For counter-based loop handling. 8773 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 8774 8775 // Frame & Return address. 8776 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 8777 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 8778 8779 case ISD::INTRINSIC_VOID: 8780 return LowerINTRINSIC_VOID(Op, DAG); 8781 } 8782 } 8783 8784 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 8785 SmallVectorImpl<SDValue>&Results, 8786 SelectionDAG &DAG) const { 8787 SDLoc dl(N); 8788 switch (N->getOpcode()) { 8789 default: 8790 llvm_unreachable("Do not know how to custom type legalize this operation!"); 8791 case ISD::READCYCLECOUNTER: { 8792 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 8793 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 8794 8795 Results.push_back(RTB); 8796 Results.push_back(RTB.getValue(1)); 8797 Results.push_back(RTB.getValue(2)); 8798 break; 8799 } 8800 case ISD::INTRINSIC_W_CHAIN: { 8801 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 8802 Intrinsic::ppc_is_decremented_ctr_nonzero) 8803 break; 8804 8805 assert(N->getValueType(0) == MVT::i1 && 8806 "Unexpected result type for CTR decrement intrinsic"); 8807 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 8808 N->getValueType(0)); 8809 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 8810 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 8811 N->getOperand(1)); 8812 8813 Results.push_back(NewInt); 8814 Results.push_back(NewInt.getValue(1)); 8815 break; 8816 } 8817 case ISD::VAARG: { 8818 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 8819 return; 8820 8821 EVT VT = N->getValueType(0); 8822 8823 if (VT == MVT::i64) { 8824 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 8825 8826 Results.push_back(NewNode); 8827 Results.push_back(NewNode.getValue(1)); 8828 } 8829 return; 8830 } 8831 case ISD::FP_ROUND_INREG: { 8832 assert(N->getValueType(0) == MVT::ppcf128); 8833 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 8834 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8835 MVT::f64, N->getOperand(0), 8836 DAG.getIntPtrConstant(0, dl)); 8837 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8838 MVT::f64, N->getOperand(0), 8839 DAG.getIntPtrConstant(1, dl)); 8840 8841 // Add the two halves of the long double in round-to-zero mode. 8842 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 8843 8844 // We know the low half is about to be thrown away, so just use something 8845 // convenient. 8846 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 8847 FPreg, FPreg)); 8848 return; 8849 } 8850 case ISD::FP_TO_SINT: 8851 case ISD::FP_TO_UINT: 8852 // LowerFP_TO_INT() can only handle f32 and f64. 8853 if (N->getOperand(0).getValueType() == MVT::ppcf128) 8854 return; 8855 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 8856 return; 8857 } 8858 } 8859 8860 //===----------------------------------------------------------------------===// 8861 // Other Lowering Code 8862 //===----------------------------------------------------------------------===// 8863 8864 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 8865 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 8866 Function *Func = Intrinsic::getDeclaration(M, Id); 8867 return Builder.CreateCall(Func, {}); 8868 } 8869 8870 // The mappings for emitLeading/TrailingFence is taken from 8871 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 8872 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 8873 Instruction *Inst, 8874 AtomicOrdering Ord) const { 8875 if (Ord == AtomicOrdering::SequentiallyConsistent) 8876 return callIntrinsic(Builder, Intrinsic::ppc_sync); 8877 if (isReleaseOrStronger(Ord)) 8878 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8879 return nullptr; 8880 } 8881 8882 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 8883 Instruction *Inst, 8884 AtomicOrdering Ord) const { 8885 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 8886 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 8887 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 8888 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 8889 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 8890 return Builder.CreateCall( 8891 Intrinsic::getDeclaration( 8892 Builder.GetInsertBlock()->getParent()->getParent(), 8893 Intrinsic::ppc_cfence, {Inst->getType()}), 8894 {Inst}); 8895 // FIXME: Can use isync for rmw operation. 8896 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8897 } 8898 return nullptr; 8899 } 8900 8901 MachineBasicBlock * 8902 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 8903 unsigned AtomicSize, 8904 unsigned BinOpcode, 8905 unsigned CmpOpcode, 8906 unsigned CmpPred) const { 8907 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8908 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8909 8910 auto LoadMnemonic = PPC::LDARX; 8911 auto StoreMnemonic = PPC::STDCX; 8912 switch (AtomicSize) { 8913 default: 8914 llvm_unreachable("Unexpected size of atomic entity"); 8915 case 1: 8916 LoadMnemonic = PPC::LBARX; 8917 StoreMnemonic = PPC::STBCX; 8918 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8919 break; 8920 case 2: 8921 LoadMnemonic = PPC::LHARX; 8922 StoreMnemonic = PPC::STHCX; 8923 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8924 break; 8925 case 4: 8926 LoadMnemonic = PPC::LWARX; 8927 StoreMnemonic = PPC::STWCX; 8928 break; 8929 case 8: 8930 LoadMnemonic = PPC::LDARX; 8931 StoreMnemonic = PPC::STDCX; 8932 break; 8933 } 8934 8935 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8936 MachineFunction *F = BB->getParent(); 8937 MachineFunction::iterator It = ++BB->getIterator(); 8938 8939 unsigned dest = MI.getOperand(0).getReg(); 8940 unsigned ptrA = MI.getOperand(1).getReg(); 8941 unsigned ptrB = MI.getOperand(2).getReg(); 8942 unsigned incr = MI.getOperand(3).getReg(); 8943 DebugLoc dl = MI.getDebugLoc(); 8944 8945 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8946 MachineBasicBlock *loop2MBB = 8947 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 8948 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8949 F->insert(It, loopMBB); 8950 if (CmpOpcode) 8951 F->insert(It, loop2MBB); 8952 F->insert(It, exitMBB); 8953 exitMBB->splice(exitMBB->begin(), BB, 8954 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8955 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8956 8957 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8958 unsigned TmpReg = (!BinOpcode) ? incr : 8959 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 8960 : &PPC::GPRCRegClass); 8961 8962 // thisMBB: 8963 // ... 8964 // fallthrough --> loopMBB 8965 BB->addSuccessor(loopMBB); 8966 8967 // loopMBB: 8968 // l[wd]arx dest, ptr 8969 // add r0, dest, incr 8970 // st[wd]cx. r0, ptr 8971 // bne- loopMBB 8972 // fallthrough --> exitMBB 8973 8974 // For max/min... 8975 // loopMBB: 8976 // l[wd]arx dest, ptr 8977 // cmpl?[wd] incr, dest 8978 // bgt exitMBB 8979 // loop2MBB: 8980 // st[wd]cx. dest, ptr 8981 // bne- loopMBB 8982 // fallthrough --> exitMBB 8983 8984 BB = loopMBB; 8985 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8986 .addReg(ptrA).addReg(ptrB); 8987 if (BinOpcode) 8988 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 8989 if (CmpOpcode) { 8990 // Signed comparisons of byte or halfword values must be sign-extended. 8991 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 8992 unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 8993 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 8994 ExtReg).addReg(dest); 8995 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 8996 .addReg(incr).addReg(ExtReg); 8997 } else 8998 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 8999 .addReg(incr).addReg(dest); 9000 9001 BuildMI(BB, dl, TII->get(PPC::BCC)) 9002 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9003 BB->addSuccessor(loop2MBB); 9004 BB->addSuccessor(exitMBB); 9005 BB = loop2MBB; 9006 } 9007 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9008 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 9009 BuildMI(BB, dl, TII->get(PPC::BCC)) 9010 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9011 BB->addSuccessor(loopMBB); 9012 BB->addSuccessor(exitMBB); 9013 9014 // exitMBB: 9015 // ... 9016 BB = exitMBB; 9017 return BB; 9018 } 9019 9020 MachineBasicBlock * 9021 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 9022 MachineBasicBlock *BB, 9023 bool is8bit, // operation 9024 unsigned BinOpcode, 9025 unsigned CmpOpcode, 9026 unsigned CmpPred) const { 9027 // If we support part-word atomic mnemonics, just use them 9028 if (Subtarget.hasPartwordAtomics()) 9029 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, 9030 CmpOpcode, CmpPred); 9031 9032 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 9033 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9034 // In 64 bit mode we have to use 64 bits for addresses, even though the 9035 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 9036 // registers without caring whether they're 32 or 64, but here we're 9037 // doing actual arithmetic on the addresses. 9038 bool is64bit = Subtarget.isPPC64(); 9039 bool isLittleEndian = Subtarget.isLittleEndian(); 9040 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9041 9042 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9043 MachineFunction *F = BB->getParent(); 9044 MachineFunction::iterator It = ++BB->getIterator(); 9045 9046 unsigned dest = MI.getOperand(0).getReg(); 9047 unsigned ptrA = MI.getOperand(1).getReg(); 9048 unsigned ptrB = MI.getOperand(2).getReg(); 9049 unsigned incr = MI.getOperand(3).getReg(); 9050 DebugLoc dl = MI.getDebugLoc(); 9051 9052 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 9053 MachineBasicBlock *loop2MBB = 9054 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 9055 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9056 F->insert(It, loopMBB); 9057 if (CmpOpcode) 9058 F->insert(It, loop2MBB); 9059 F->insert(It, exitMBB); 9060 exitMBB->splice(exitMBB->begin(), BB, 9061 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9062 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9063 9064 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9065 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9066 : &PPC::GPRCRegClass; 9067 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9068 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9069 unsigned ShiftReg = 9070 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9071 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 9072 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9073 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9074 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9075 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9076 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 9077 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9078 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9079 unsigned Ptr1Reg; 9080 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 9081 9082 // thisMBB: 9083 // ... 9084 // fallthrough --> loopMBB 9085 BB->addSuccessor(loopMBB); 9086 9087 // The 4-byte load must be aligned, while a char or short may be 9088 // anywhere in the word. Hence all this nasty bookkeeping code. 9089 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9090 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9091 // xori shift, shift1, 24 [16] 9092 // rlwinm ptr, ptr1, 0, 0, 29 9093 // slw incr2, incr, shift 9094 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9095 // slw mask, mask2, shift 9096 // loopMBB: 9097 // lwarx tmpDest, ptr 9098 // add tmp, tmpDest, incr2 9099 // andc tmp2, tmpDest, mask 9100 // and tmp3, tmp, mask 9101 // or tmp4, tmp3, tmp2 9102 // stwcx. tmp4, ptr 9103 // bne- loopMBB 9104 // fallthrough --> exitMBB 9105 // srw dest, tmpDest, shift 9106 if (ptrA != ZeroReg) { 9107 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9108 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9109 .addReg(ptrA).addReg(ptrB); 9110 } else { 9111 Ptr1Reg = ptrB; 9112 } 9113 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9114 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9115 if (!isLittleEndian) 9116 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9117 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9118 if (is64bit) 9119 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9120 .addReg(Ptr1Reg).addImm(0).addImm(61); 9121 else 9122 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9123 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9124 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 9125 .addReg(incr).addReg(ShiftReg); 9126 if (is8bit) 9127 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9128 else { 9129 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9130 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 9131 } 9132 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9133 .addReg(Mask2Reg).addReg(ShiftReg); 9134 9135 BB = loopMBB; 9136 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9137 .addReg(ZeroReg).addReg(PtrReg); 9138 if (BinOpcode) 9139 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 9140 .addReg(Incr2Reg).addReg(TmpDestReg); 9141 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 9142 .addReg(TmpDestReg).addReg(MaskReg); 9143 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 9144 .addReg(TmpReg).addReg(MaskReg); 9145 if (CmpOpcode) { 9146 // For unsigned comparisons, we can directly compare the shifted values. 9147 // For signed comparisons we shift and sign extend. 9148 unsigned SReg = RegInfo.createVirtualRegister(RC); 9149 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg) 9150 .addReg(TmpDestReg).addReg(MaskReg); 9151 unsigned ValueReg = SReg; 9152 unsigned CmpReg = Incr2Reg; 9153 if (CmpOpcode == PPC::CMPW) { 9154 ValueReg = RegInfo.createVirtualRegister(RC); 9155 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 9156 .addReg(SReg).addReg(ShiftReg); 9157 unsigned ValueSReg = RegInfo.createVirtualRegister(RC); 9158 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 9159 .addReg(ValueReg); 9160 ValueReg = ValueSReg; 9161 CmpReg = incr; 9162 } 9163 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9164 .addReg(CmpReg).addReg(ValueReg); 9165 BuildMI(BB, dl, TII->get(PPC::BCC)) 9166 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9167 BB->addSuccessor(loop2MBB); 9168 BB->addSuccessor(exitMBB); 9169 BB = loop2MBB; 9170 } 9171 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 9172 .addReg(Tmp3Reg).addReg(Tmp2Reg); 9173 BuildMI(BB, dl, TII->get(PPC::STWCX)) 9174 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 9175 BuildMI(BB, dl, TII->get(PPC::BCC)) 9176 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9177 BB->addSuccessor(loopMBB); 9178 BB->addSuccessor(exitMBB); 9179 9180 // exitMBB: 9181 // ... 9182 BB = exitMBB; 9183 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 9184 .addReg(ShiftReg); 9185 return BB; 9186 } 9187 9188 llvm::MachineBasicBlock * 9189 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 9190 MachineBasicBlock *MBB) const { 9191 DebugLoc DL = MI.getDebugLoc(); 9192 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9193 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 9194 9195 MachineFunction *MF = MBB->getParent(); 9196 MachineRegisterInfo &MRI = MF->getRegInfo(); 9197 9198 const BasicBlock *BB = MBB->getBasicBlock(); 9199 MachineFunction::iterator I = ++MBB->getIterator(); 9200 9201 // Memory Reference 9202 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9203 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9204 9205 unsigned DstReg = MI.getOperand(0).getReg(); 9206 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 9207 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 9208 unsigned mainDstReg = MRI.createVirtualRegister(RC); 9209 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 9210 9211 MVT PVT = getPointerTy(MF->getDataLayout()); 9212 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9213 "Invalid Pointer Size!"); 9214 // For v = setjmp(buf), we generate 9215 // 9216 // thisMBB: 9217 // SjLjSetup mainMBB 9218 // bl mainMBB 9219 // v_restore = 1 9220 // b sinkMBB 9221 // 9222 // mainMBB: 9223 // buf[LabelOffset] = LR 9224 // v_main = 0 9225 // 9226 // sinkMBB: 9227 // v = phi(main, restore) 9228 // 9229 9230 MachineBasicBlock *thisMBB = MBB; 9231 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 9232 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 9233 MF->insert(I, mainMBB); 9234 MF->insert(I, sinkMBB); 9235 9236 MachineInstrBuilder MIB; 9237 9238 // Transfer the remainder of BB and its successor edges to sinkMBB. 9239 sinkMBB->splice(sinkMBB->begin(), MBB, 9240 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 9241 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 9242 9243 // Note that the structure of the jmp_buf used here is not compatible 9244 // with that used by libc, and is not designed to be. Specifically, it 9245 // stores only those 'reserved' registers that LLVM does not otherwise 9246 // understand how to spill. Also, by convention, by the time this 9247 // intrinsic is called, Clang has already stored the frame address in the 9248 // first slot of the buffer and stack address in the third. Following the 9249 // X86 target code, we'll store the jump address in the second slot. We also 9250 // need to save the TOC pointer (R2) to handle jumps between shared 9251 // libraries, and that will be stored in the fourth slot. The thread 9252 // identifier (R13) is not affected. 9253 9254 // thisMBB: 9255 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9256 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9257 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9258 9259 // Prepare IP either in reg. 9260 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 9261 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 9262 unsigned BufReg = MI.getOperand(1).getReg(); 9263 9264 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 9265 setUsesTOCBasePtr(*MBB->getParent()); 9266 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 9267 .addReg(PPC::X2) 9268 .addImm(TOCOffset) 9269 .addReg(BufReg); 9270 MIB.setMemRefs(MMOBegin, MMOEnd); 9271 } 9272 9273 // Naked functions never have a base pointer, and so we use r1. For all 9274 // other functions, this decision must be delayed until during PEI. 9275 unsigned BaseReg; 9276 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 9277 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 9278 else 9279 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 9280 9281 MIB = BuildMI(*thisMBB, MI, DL, 9282 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 9283 .addReg(BaseReg) 9284 .addImm(BPOffset) 9285 .addReg(BufReg); 9286 MIB.setMemRefs(MMOBegin, MMOEnd); 9287 9288 // Setup 9289 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 9290 MIB.addRegMask(TRI->getNoPreservedMask()); 9291 9292 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 9293 9294 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 9295 .addMBB(mainMBB); 9296 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 9297 9298 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 9299 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 9300 9301 // mainMBB: 9302 // mainDstReg = 0 9303 MIB = 9304 BuildMI(mainMBB, DL, 9305 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 9306 9307 // Store IP 9308 if (Subtarget.isPPC64()) { 9309 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 9310 .addReg(LabelReg) 9311 .addImm(LabelOffset) 9312 .addReg(BufReg); 9313 } else { 9314 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 9315 .addReg(LabelReg) 9316 .addImm(LabelOffset) 9317 .addReg(BufReg); 9318 } 9319 9320 MIB.setMemRefs(MMOBegin, MMOEnd); 9321 9322 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 9323 mainMBB->addSuccessor(sinkMBB); 9324 9325 // sinkMBB: 9326 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 9327 TII->get(PPC::PHI), DstReg) 9328 .addReg(mainDstReg).addMBB(mainMBB) 9329 .addReg(restoreDstReg).addMBB(thisMBB); 9330 9331 MI.eraseFromParent(); 9332 return sinkMBB; 9333 } 9334 9335 MachineBasicBlock * 9336 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 9337 MachineBasicBlock *MBB) const { 9338 DebugLoc DL = MI.getDebugLoc(); 9339 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9340 9341 MachineFunction *MF = MBB->getParent(); 9342 MachineRegisterInfo &MRI = MF->getRegInfo(); 9343 9344 // Memory Reference 9345 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9346 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9347 9348 MVT PVT = getPointerTy(MF->getDataLayout()); 9349 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9350 "Invalid Pointer Size!"); 9351 9352 const TargetRegisterClass *RC = 9353 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 9354 unsigned Tmp = MRI.createVirtualRegister(RC); 9355 // Since FP is only updated here but NOT referenced, it's treated as GPR. 9356 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 9357 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 9358 unsigned BP = 9359 (PVT == MVT::i64) 9360 ? PPC::X30 9361 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 9362 : PPC::R30); 9363 9364 MachineInstrBuilder MIB; 9365 9366 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9367 const int64_t SPOffset = 2 * PVT.getStoreSize(); 9368 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9369 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9370 9371 unsigned BufReg = MI.getOperand(0).getReg(); 9372 9373 // Reload FP (the jumped-to function may not have had a 9374 // frame pointer, and if so, then its r31 will be restored 9375 // as necessary). 9376 if (PVT == MVT::i64) { 9377 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 9378 .addImm(0) 9379 .addReg(BufReg); 9380 } else { 9381 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 9382 .addImm(0) 9383 .addReg(BufReg); 9384 } 9385 MIB.setMemRefs(MMOBegin, MMOEnd); 9386 9387 // Reload IP 9388 if (PVT == MVT::i64) { 9389 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 9390 .addImm(LabelOffset) 9391 .addReg(BufReg); 9392 } else { 9393 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 9394 .addImm(LabelOffset) 9395 .addReg(BufReg); 9396 } 9397 MIB.setMemRefs(MMOBegin, MMOEnd); 9398 9399 // Reload SP 9400 if (PVT == MVT::i64) { 9401 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 9402 .addImm(SPOffset) 9403 .addReg(BufReg); 9404 } else { 9405 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 9406 .addImm(SPOffset) 9407 .addReg(BufReg); 9408 } 9409 MIB.setMemRefs(MMOBegin, MMOEnd); 9410 9411 // Reload BP 9412 if (PVT == MVT::i64) { 9413 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 9414 .addImm(BPOffset) 9415 .addReg(BufReg); 9416 } else { 9417 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 9418 .addImm(BPOffset) 9419 .addReg(BufReg); 9420 } 9421 MIB.setMemRefs(MMOBegin, MMOEnd); 9422 9423 // Reload TOC 9424 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 9425 setUsesTOCBasePtr(*MBB->getParent()); 9426 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 9427 .addImm(TOCOffset) 9428 .addReg(BufReg); 9429 9430 MIB.setMemRefs(MMOBegin, MMOEnd); 9431 } 9432 9433 // Jump 9434 BuildMI(*MBB, MI, DL, 9435 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 9436 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 9437 9438 MI.eraseFromParent(); 9439 return MBB; 9440 } 9441 9442 MachineBasicBlock * 9443 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 9444 MachineBasicBlock *BB) const { 9445 if (MI.getOpcode() == TargetOpcode::STACKMAP || 9446 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 9447 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 9448 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 9449 // Call lowering should have added an r2 operand to indicate a dependence 9450 // on the TOC base pointer value. It can't however, because there is no 9451 // way to mark the dependence as implicit there, and so the stackmap code 9452 // will confuse it with a regular operand. Instead, add the dependence 9453 // here. 9454 setUsesTOCBasePtr(*BB->getParent()); 9455 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 9456 } 9457 9458 return emitPatchPoint(MI, BB); 9459 } 9460 9461 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 9462 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 9463 return emitEHSjLjSetJmp(MI, BB); 9464 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 9465 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 9466 return emitEHSjLjLongJmp(MI, BB); 9467 } 9468 9469 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9470 9471 // To "insert" these instructions we actually have to insert their 9472 // control-flow patterns. 9473 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9474 MachineFunction::iterator It = ++BB->getIterator(); 9475 9476 MachineFunction *F = BB->getParent(); 9477 9478 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 9479 MI.getOpcode() == PPC::SELECT_CC_I8 || 9480 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) { 9481 SmallVector<MachineOperand, 2> Cond; 9482 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 9483 MI.getOpcode() == PPC::SELECT_CC_I8) 9484 Cond.push_back(MI.getOperand(4)); 9485 else 9486 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 9487 Cond.push_back(MI.getOperand(1)); 9488 9489 DebugLoc dl = MI.getDebugLoc(); 9490 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 9491 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 9492 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 9493 MI.getOpcode() == PPC::SELECT_CC_I8 || 9494 MI.getOpcode() == PPC::SELECT_CC_F4 || 9495 MI.getOpcode() == PPC::SELECT_CC_F8 || 9496 MI.getOpcode() == PPC::SELECT_CC_QFRC || 9497 MI.getOpcode() == PPC::SELECT_CC_QSRC || 9498 MI.getOpcode() == PPC::SELECT_CC_QBRC || 9499 MI.getOpcode() == PPC::SELECT_CC_VRRC || 9500 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 9501 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 9502 MI.getOpcode() == PPC::SELECT_CC_VSRC || 9503 MI.getOpcode() == PPC::SELECT_I4 || 9504 MI.getOpcode() == PPC::SELECT_I8 || 9505 MI.getOpcode() == PPC::SELECT_F4 || 9506 MI.getOpcode() == PPC::SELECT_F8 || 9507 MI.getOpcode() == PPC::SELECT_QFRC || 9508 MI.getOpcode() == PPC::SELECT_QSRC || 9509 MI.getOpcode() == PPC::SELECT_QBRC || 9510 MI.getOpcode() == PPC::SELECT_VRRC || 9511 MI.getOpcode() == PPC::SELECT_VSFRC || 9512 MI.getOpcode() == PPC::SELECT_VSSRC || 9513 MI.getOpcode() == PPC::SELECT_VSRC) { 9514 // The incoming instruction knows the destination vreg to set, the 9515 // condition code register to branch on, the true/false values to 9516 // select between, and a branch opcode to use. 9517 9518 // thisMBB: 9519 // ... 9520 // TrueVal = ... 9521 // cmpTY ccX, r1, r2 9522 // bCC copy1MBB 9523 // fallthrough --> copy0MBB 9524 MachineBasicBlock *thisMBB = BB; 9525 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 9526 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9527 DebugLoc dl = MI.getDebugLoc(); 9528 F->insert(It, copy0MBB); 9529 F->insert(It, sinkMBB); 9530 9531 // Transfer the remainder of BB and its successor edges to sinkMBB. 9532 sinkMBB->splice(sinkMBB->begin(), BB, 9533 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9534 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9535 9536 // Next, add the true and fallthrough blocks as its successors. 9537 BB->addSuccessor(copy0MBB); 9538 BB->addSuccessor(sinkMBB); 9539 9540 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 9541 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 9542 MI.getOpcode() == PPC::SELECT_QFRC || 9543 MI.getOpcode() == PPC::SELECT_QSRC || 9544 MI.getOpcode() == PPC::SELECT_QBRC || 9545 MI.getOpcode() == PPC::SELECT_VRRC || 9546 MI.getOpcode() == PPC::SELECT_VSFRC || 9547 MI.getOpcode() == PPC::SELECT_VSSRC || 9548 MI.getOpcode() == PPC::SELECT_VSRC) { 9549 BuildMI(BB, dl, TII->get(PPC::BC)) 9550 .addReg(MI.getOperand(1).getReg()) 9551 .addMBB(sinkMBB); 9552 } else { 9553 unsigned SelectPred = MI.getOperand(4).getImm(); 9554 BuildMI(BB, dl, TII->get(PPC::BCC)) 9555 .addImm(SelectPred) 9556 .addReg(MI.getOperand(1).getReg()) 9557 .addMBB(sinkMBB); 9558 } 9559 9560 // copy0MBB: 9561 // %FalseValue = ... 9562 // # fallthrough to sinkMBB 9563 BB = copy0MBB; 9564 9565 // Update machine-CFG edges 9566 BB->addSuccessor(sinkMBB); 9567 9568 // sinkMBB: 9569 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 9570 // ... 9571 BB = sinkMBB; 9572 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 9573 .addReg(MI.getOperand(3).getReg()) 9574 .addMBB(copy0MBB) 9575 .addReg(MI.getOperand(2).getReg()) 9576 .addMBB(thisMBB); 9577 } else if (MI.getOpcode() == PPC::ReadTB) { 9578 // To read the 64-bit time-base register on a 32-bit target, we read the 9579 // two halves. Should the counter have wrapped while it was being read, we 9580 // need to try again. 9581 // ... 9582 // readLoop: 9583 // mfspr Rx,TBU # load from TBU 9584 // mfspr Ry,TB # load from TB 9585 // mfspr Rz,TBU # load from TBU 9586 // cmpw crX,Rx,Rz # check if 'old'='new' 9587 // bne readLoop # branch if they're not equal 9588 // ... 9589 9590 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 9591 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9592 DebugLoc dl = MI.getDebugLoc(); 9593 F->insert(It, readMBB); 9594 F->insert(It, sinkMBB); 9595 9596 // Transfer the remainder of BB and its successor edges to sinkMBB. 9597 sinkMBB->splice(sinkMBB->begin(), BB, 9598 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9599 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9600 9601 BB->addSuccessor(readMBB); 9602 BB = readMBB; 9603 9604 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9605 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9606 unsigned LoReg = MI.getOperand(0).getReg(); 9607 unsigned HiReg = MI.getOperand(1).getReg(); 9608 9609 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 9610 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 9611 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 9612 9613 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9614 9615 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 9616 .addReg(HiReg).addReg(ReadAgainReg); 9617 BuildMI(BB, dl, TII->get(PPC::BCC)) 9618 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 9619 9620 BB->addSuccessor(readMBB); 9621 BB->addSuccessor(sinkMBB); 9622 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 9623 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 9624 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 9625 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 9626 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 9627 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 9628 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 9629 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 9630 9631 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 9632 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 9633 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 9634 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 9635 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 9636 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 9637 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 9638 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 9639 9640 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 9641 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 9642 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 9643 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 9644 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 9645 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 9646 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 9647 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 9648 9649 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 9650 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 9651 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 9652 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 9653 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 9654 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 9655 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 9656 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 9657 9658 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 9659 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 9660 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 9661 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 9662 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 9663 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 9664 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 9665 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 9666 9667 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 9668 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 9669 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 9670 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 9671 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 9672 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 9673 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 9674 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 9675 9676 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 9677 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 9678 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 9679 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 9680 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 9681 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 9682 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 9683 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 9684 9685 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 9686 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 9687 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 9688 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 9689 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 9690 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 9691 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 9692 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 9693 9694 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 9695 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 9696 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 9697 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 9698 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 9699 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 9700 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 9701 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 9702 9703 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 9704 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 9705 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 9706 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 9707 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 9708 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 9709 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 9710 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 9711 9712 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 9713 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 9714 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 9715 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 9716 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 9717 BB = EmitAtomicBinary(MI, BB, 4, 0); 9718 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 9719 BB = EmitAtomicBinary(MI, BB, 8, 0); 9720 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 9721 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 9722 (Subtarget.hasPartwordAtomics() && 9723 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 9724 (Subtarget.hasPartwordAtomics() && 9725 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 9726 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 9727 9728 auto LoadMnemonic = PPC::LDARX; 9729 auto StoreMnemonic = PPC::STDCX; 9730 switch (MI.getOpcode()) { 9731 default: 9732 llvm_unreachable("Compare and swap of unknown size"); 9733 case PPC::ATOMIC_CMP_SWAP_I8: 9734 LoadMnemonic = PPC::LBARX; 9735 StoreMnemonic = PPC::STBCX; 9736 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9737 break; 9738 case PPC::ATOMIC_CMP_SWAP_I16: 9739 LoadMnemonic = PPC::LHARX; 9740 StoreMnemonic = PPC::STHCX; 9741 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9742 break; 9743 case PPC::ATOMIC_CMP_SWAP_I32: 9744 LoadMnemonic = PPC::LWARX; 9745 StoreMnemonic = PPC::STWCX; 9746 break; 9747 case PPC::ATOMIC_CMP_SWAP_I64: 9748 LoadMnemonic = PPC::LDARX; 9749 StoreMnemonic = PPC::STDCX; 9750 break; 9751 } 9752 unsigned dest = MI.getOperand(0).getReg(); 9753 unsigned ptrA = MI.getOperand(1).getReg(); 9754 unsigned ptrB = MI.getOperand(2).getReg(); 9755 unsigned oldval = MI.getOperand(3).getReg(); 9756 unsigned newval = MI.getOperand(4).getReg(); 9757 DebugLoc dl = MI.getDebugLoc(); 9758 9759 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9760 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9761 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9762 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9763 F->insert(It, loop1MBB); 9764 F->insert(It, loop2MBB); 9765 F->insert(It, midMBB); 9766 F->insert(It, exitMBB); 9767 exitMBB->splice(exitMBB->begin(), BB, 9768 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9769 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9770 9771 // thisMBB: 9772 // ... 9773 // fallthrough --> loopMBB 9774 BB->addSuccessor(loop1MBB); 9775 9776 // loop1MBB: 9777 // l[bhwd]arx dest, ptr 9778 // cmp[wd] dest, oldval 9779 // bne- midMBB 9780 // loop2MBB: 9781 // st[bhwd]cx. newval, ptr 9782 // bne- loopMBB 9783 // b exitBB 9784 // midMBB: 9785 // st[bhwd]cx. dest, ptr 9786 // exitBB: 9787 BB = loop1MBB; 9788 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9789 .addReg(ptrA).addReg(ptrB); 9790 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 9791 .addReg(oldval).addReg(dest); 9792 BuildMI(BB, dl, TII->get(PPC::BCC)) 9793 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9794 BB->addSuccessor(loop2MBB); 9795 BB->addSuccessor(midMBB); 9796 9797 BB = loop2MBB; 9798 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9799 .addReg(newval).addReg(ptrA).addReg(ptrB); 9800 BuildMI(BB, dl, TII->get(PPC::BCC)) 9801 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9802 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9803 BB->addSuccessor(loop1MBB); 9804 BB->addSuccessor(exitMBB); 9805 9806 BB = midMBB; 9807 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9808 .addReg(dest).addReg(ptrA).addReg(ptrB); 9809 BB->addSuccessor(exitMBB); 9810 9811 // exitMBB: 9812 // ... 9813 BB = exitMBB; 9814 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 9815 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 9816 // We must use 64-bit registers for addresses when targeting 64-bit, 9817 // since we're actually doing arithmetic on them. Other registers 9818 // can be 32-bit. 9819 bool is64bit = Subtarget.isPPC64(); 9820 bool isLittleEndian = Subtarget.isLittleEndian(); 9821 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 9822 9823 unsigned dest = MI.getOperand(0).getReg(); 9824 unsigned ptrA = MI.getOperand(1).getReg(); 9825 unsigned ptrB = MI.getOperand(2).getReg(); 9826 unsigned oldval = MI.getOperand(3).getReg(); 9827 unsigned newval = MI.getOperand(4).getReg(); 9828 DebugLoc dl = MI.getDebugLoc(); 9829 9830 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9831 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9832 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9833 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9834 F->insert(It, loop1MBB); 9835 F->insert(It, loop2MBB); 9836 F->insert(It, midMBB); 9837 F->insert(It, exitMBB); 9838 exitMBB->splice(exitMBB->begin(), BB, 9839 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9840 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9841 9842 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9843 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9844 : &PPC::GPRCRegClass; 9845 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9846 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9847 unsigned ShiftReg = 9848 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9849 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 9850 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 9851 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 9852 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 9853 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9854 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9855 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9856 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9857 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9858 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9859 unsigned Ptr1Reg; 9860 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 9861 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9862 // thisMBB: 9863 // ... 9864 // fallthrough --> loopMBB 9865 BB->addSuccessor(loop1MBB); 9866 9867 // The 4-byte load must be aligned, while a char or short may be 9868 // anywhere in the word. Hence all this nasty bookkeeping code. 9869 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9870 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9871 // xori shift, shift1, 24 [16] 9872 // rlwinm ptr, ptr1, 0, 0, 29 9873 // slw newval2, newval, shift 9874 // slw oldval2, oldval,shift 9875 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9876 // slw mask, mask2, shift 9877 // and newval3, newval2, mask 9878 // and oldval3, oldval2, mask 9879 // loop1MBB: 9880 // lwarx tmpDest, ptr 9881 // and tmp, tmpDest, mask 9882 // cmpw tmp, oldval3 9883 // bne- midMBB 9884 // loop2MBB: 9885 // andc tmp2, tmpDest, mask 9886 // or tmp4, tmp2, newval3 9887 // stwcx. tmp4, ptr 9888 // bne- loop1MBB 9889 // b exitBB 9890 // midMBB: 9891 // stwcx. tmpDest, ptr 9892 // exitBB: 9893 // srw dest, tmpDest, shift 9894 if (ptrA != ZeroReg) { 9895 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9896 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9897 .addReg(ptrA).addReg(ptrB); 9898 } else { 9899 Ptr1Reg = ptrB; 9900 } 9901 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9902 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9903 if (!isLittleEndian) 9904 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9905 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9906 if (is64bit) 9907 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9908 .addReg(Ptr1Reg).addImm(0).addImm(61); 9909 else 9910 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9911 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9912 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 9913 .addReg(newval).addReg(ShiftReg); 9914 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 9915 .addReg(oldval).addReg(ShiftReg); 9916 if (is8bit) 9917 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9918 else { 9919 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9920 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 9921 .addReg(Mask3Reg).addImm(65535); 9922 } 9923 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9924 .addReg(Mask2Reg).addReg(ShiftReg); 9925 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 9926 .addReg(NewVal2Reg).addReg(MaskReg); 9927 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 9928 .addReg(OldVal2Reg).addReg(MaskReg); 9929 9930 BB = loop1MBB; 9931 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9932 .addReg(ZeroReg).addReg(PtrReg); 9933 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 9934 .addReg(TmpDestReg).addReg(MaskReg); 9935 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 9936 .addReg(TmpReg).addReg(OldVal3Reg); 9937 BuildMI(BB, dl, TII->get(PPC::BCC)) 9938 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9939 BB->addSuccessor(loop2MBB); 9940 BB->addSuccessor(midMBB); 9941 9942 BB = loop2MBB; 9943 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 9944 .addReg(TmpDestReg).addReg(MaskReg); 9945 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 9946 .addReg(Tmp2Reg).addReg(NewVal3Reg); 9947 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 9948 .addReg(ZeroReg).addReg(PtrReg); 9949 BuildMI(BB, dl, TII->get(PPC::BCC)) 9950 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9951 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9952 BB->addSuccessor(loop1MBB); 9953 BB->addSuccessor(exitMBB); 9954 9955 BB = midMBB; 9956 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 9957 .addReg(ZeroReg).addReg(PtrReg); 9958 BB->addSuccessor(exitMBB); 9959 9960 // exitMBB: 9961 // ... 9962 BB = exitMBB; 9963 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 9964 .addReg(ShiftReg); 9965 } else if (MI.getOpcode() == PPC::FADDrtz) { 9966 // This pseudo performs an FADD with rounding mode temporarily forced 9967 // to round-to-zero. We emit this via custom inserter since the FPSCR 9968 // is not modeled at the SelectionDAG level. 9969 unsigned Dest = MI.getOperand(0).getReg(); 9970 unsigned Src1 = MI.getOperand(1).getReg(); 9971 unsigned Src2 = MI.getOperand(2).getReg(); 9972 DebugLoc dl = MI.getDebugLoc(); 9973 9974 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9975 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 9976 9977 // Save FPSCR value. 9978 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 9979 9980 // Set rounding mode to round-to-zero. 9981 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 9982 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 9983 9984 // Perform addition. 9985 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 9986 9987 // Restore FPSCR value. 9988 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 9989 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 9990 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 9991 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9992 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 9993 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9994 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 9995 ? PPC::ANDIo8 9996 : PPC::ANDIo; 9997 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 9998 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 9999 10000 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10001 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 10002 &PPC::GPRCRegClass : 10003 &PPC::G8RCRegClass); 10004 10005 DebugLoc dl = MI.getDebugLoc(); 10006 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 10007 .addReg(MI.getOperand(1).getReg()) 10008 .addImm(1); 10009 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 10010 MI.getOperand(0).getReg()) 10011 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 10012 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 10013 DebugLoc Dl = MI.getDebugLoc(); 10014 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10015 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 10016 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 10017 return BB; 10018 } else { 10019 llvm_unreachable("Unexpected instr type to insert"); 10020 } 10021 10022 MI.eraseFromParent(); // The pseudo instruction is gone now. 10023 return BB; 10024 } 10025 10026 //===----------------------------------------------------------------------===// 10027 // Target Optimization Hooks 10028 //===----------------------------------------------------------------------===// 10029 10030 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 10031 // For the estimates, convergence is quadratic, so we essentially double the 10032 // number of digits correct after every iteration. For both FRE and FRSQRTE, 10033 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 10034 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 10035 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 10036 if (VT.getScalarType() == MVT::f64) 10037 RefinementSteps++; 10038 return RefinementSteps; 10039 } 10040 10041 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 10042 int Enabled, int &RefinementSteps, 10043 bool &UseOneConstNR, 10044 bool Reciprocal) const { 10045 EVT VT = Operand.getValueType(); 10046 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 10047 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 10048 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10049 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10050 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10051 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10052 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10053 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10054 10055 UseOneConstNR = true; 10056 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 10057 } 10058 return SDValue(); 10059 } 10060 10061 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 10062 int Enabled, 10063 int &RefinementSteps) const { 10064 EVT VT = Operand.getValueType(); 10065 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 10066 (VT == MVT::f64 && Subtarget.hasFRE()) || 10067 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 10068 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 10069 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 10070 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 10071 if (RefinementSteps == ReciprocalEstimate::Unspecified) 10072 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 10073 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 10074 } 10075 return SDValue(); 10076 } 10077 10078 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 10079 // Note: This functionality is used only when unsafe-fp-math is enabled, and 10080 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 10081 // enabled for division), this functionality is redundant with the default 10082 // combiner logic (once the division -> reciprocal/multiply transformation 10083 // has taken place). As a result, this matters more for older cores than for 10084 // newer ones. 10085 10086 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 10087 // reciprocal if there are two or more FDIVs (for embedded cores with only 10088 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 10089 switch (Subtarget.getDarwinDirective()) { 10090 default: 10091 return 3; 10092 case PPC::DIR_440: 10093 case PPC::DIR_A2: 10094 case PPC::DIR_E500mc: 10095 case PPC::DIR_E5500: 10096 return 2; 10097 } 10098 } 10099 10100 // isConsecutiveLSLoc needs to work even if all adds have not yet been 10101 // collapsed, and so we need to look through chains of them. 10102 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 10103 int64_t& Offset, SelectionDAG &DAG) { 10104 if (DAG.isBaseWithConstantOffset(Loc)) { 10105 Base = Loc.getOperand(0); 10106 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 10107 10108 // The base might itself be a base plus an offset, and if so, accumulate 10109 // that as well. 10110 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 10111 } 10112 } 10113 10114 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 10115 unsigned Bytes, int Dist, 10116 SelectionDAG &DAG) { 10117 if (VT.getSizeInBits() / 8 != Bytes) 10118 return false; 10119 10120 SDValue BaseLoc = Base->getBasePtr(); 10121 if (Loc.getOpcode() == ISD::FrameIndex) { 10122 if (BaseLoc.getOpcode() != ISD::FrameIndex) 10123 return false; 10124 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10125 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 10126 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 10127 int FS = MFI.getObjectSize(FI); 10128 int BFS = MFI.getObjectSize(BFI); 10129 if (FS != BFS || FS != (int)Bytes) return false; 10130 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 10131 } 10132 10133 SDValue Base1 = Loc, Base2 = BaseLoc; 10134 int64_t Offset1 = 0, Offset2 = 0; 10135 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 10136 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 10137 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 10138 return true; 10139 10140 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10141 const GlobalValue *GV1 = nullptr; 10142 const GlobalValue *GV2 = nullptr; 10143 Offset1 = 0; 10144 Offset2 = 0; 10145 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 10146 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 10147 if (isGA1 && isGA2 && GV1 == GV2) 10148 return Offset1 == (Offset2 + Dist*Bytes); 10149 return false; 10150 } 10151 10152 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 10153 // not enforce equality of the chain operands. 10154 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 10155 unsigned Bytes, int Dist, 10156 SelectionDAG &DAG) { 10157 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 10158 EVT VT = LS->getMemoryVT(); 10159 SDValue Loc = LS->getBasePtr(); 10160 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 10161 } 10162 10163 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 10164 EVT VT; 10165 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10166 default: return false; 10167 case Intrinsic::ppc_qpx_qvlfd: 10168 case Intrinsic::ppc_qpx_qvlfda: 10169 VT = MVT::v4f64; 10170 break; 10171 case Intrinsic::ppc_qpx_qvlfs: 10172 case Intrinsic::ppc_qpx_qvlfsa: 10173 VT = MVT::v4f32; 10174 break; 10175 case Intrinsic::ppc_qpx_qvlfcd: 10176 case Intrinsic::ppc_qpx_qvlfcda: 10177 VT = MVT::v2f64; 10178 break; 10179 case Intrinsic::ppc_qpx_qvlfcs: 10180 case Intrinsic::ppc_qpx_qvlfcsa: 10181 VT = MVT::v2f32; 10182 break; 10183 case Intrinsic::ppc_qpx_qvlfiwa: 10184 case Intrinsic::ppc_qpx_qvlfiwz: 10185 case Intrinsic::ppc_altivec_lvx: 10186 case Intrinsic::ppc_altivec_lvxl: 10187 case Intrinsic::ppc_vsx_lxvw4x: 10188 case Intrinsic::ppc_vsx_lxvw4x_be: 10189 VT = MVT::v4i32; 10190 break; 10191 case Intrinsic::ppc_vsx_lxvd2x: 10192 case Intrinsic::ppc_vsx_lxvd2x_be: 10193 VT = MVT::v2f64; 10194 break; 10195 case Intrinsic::ppc_altivec_lvebx: 10196 VT = MVT::i8; 10197 break; 10198 case Intrinsic::ppc_altivec_lvehx: 10199 VT = MVT::i16; 10200 break; 10201 case Intrinsic::ppc_altivec_lvewx: 10202 VT = MVT::i32; 10203 break; 10204 } 10205 10206 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 10207 } 10208 10209 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 10210 EVT VT; 10211 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10212 default: return false; 10213 case Intrinsic::ppc_qpx_qvstfd: 10214 case Intrinsic::ppc_qpx_qvstfda: 10215 VT = MVT::v4f64; 10216 break; 10217 case Intrinsic::ppc_qpx_qvstfs: 10218 case Intrinsic::ppc_qpx_qvstfsa: 10219 VT = MVT::v4f32; 10220 break; 10221 case Intrinsic::ppc_qpx_qvstfcd: 10222 case Intrinsic::ppc_qpx_qvstfcda: 10223 VT = MVT::v2f64; 10224 break; 10225 case Intrinsic::ppc_qpx_qvstfcs: 10226 case Intrinsic::ppc_qpx_qvstfcsa: 10227 VT = MVT::v2f32; 10228 break; 10229 case Intrinsic::ppc_qpx_qvstfiw: 10230 case Intrinsic::ppc_qpx_qvstfiwa: 10231 case Intrinsic::ppc_altivec_stvx: 10232 case Intrinsic::ppc_altivec_stvxl: 10233 case Intrinsic::ppc_vsx_stxvw4x: 10234 VT = MVT::v4i32; 10235 break; 10236 case Intrinsic::ppc_vsx_stxvd2x: 10237 VT = MVT::v2f64; 10238 break; 10239 case Intrinsic::ppc_vsx_stxvw4x_be: 10240 VT = MVT::v4i32; 10241 break; 10242 case Intrinsic::ppc_vsx_stxvd2x_be: 10243 VT = MVT::v2f64; 10244 break; 10245 case Intrinsic::ppc_altivec_stvebx: 10246 VT = MVT::i8; 10247 break; 10248 case Intrinsic::ppc_altivec_stvehx: 10249 VT = MVT::i16; 10250 break; 10251 case Intrinsic::ppc_altivec_stvewx: 10252 VT = MVT::i32; 10253 break; 10254 } 10255 10256 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 10257 } 10258 10259 return false; 10260 } 10261 10262 // Return true is there is a nearyby consecutive load to the one provided 10263 // (regardless of alignment). We search up and down the chain, looking though 10264 // token factors and other loads (but nothing else). As a result, a true result 10265 // indicates that it is safe to create a new consecutive load adjacent to the 10266 // load provided. 10267 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 10268 SDValue Chain = LD->getChain(); 10269 EVT VT = LD->getMemoryVT(); 10270 10271 SmallSet<SDNode *, 16> LoadRoots; 10272 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 10273 SmallSet<SDNode *, 16> Visited; 10274 10275 // First, search up the chain, branching to follow all token-factor operands. 10276 // If we find a consecutive load, then we're done, otherwise, record all 10277 // nodes just above the top-level loads and token factors. 10278 while (!Queue.empty()) { 10279 SDNode *ChainNext = Queue.pop_back_val(); 10280 if (!Visited.insert(ChainNext).second) 10281 continue; 10282 10283 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 10284 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10285 return true; 10286 10287 if (!Visited.count(ChainLD->getChain().getNode())) 10288 Queue.push_back(ChainLD->getChain().getNode()); 10289 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 10290 for (const SDUse &O : ChainNext->ops()) 10291 if (!Visited.count(O.getNode())) 10292 Queue.push_back(O.getNode()); 10293 } else 10294 LoadRoots.insert(ChainNext); 10295 } 10296 10297 // Second, search down the chain, starting from the top-level nodes recorded 10298 // in the first phase. These top-level nodes are the nodes just above all 10299 // loads and token factors. Starting with their uses, recursively look though 10300 // all loads (just the chain uses) and token factors to find a consecutive 10301 // load. 10302 Visited.clear(); 10303 Queue.clear(); 10304 10305 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 10306 IE = LoadRoots.end(); I != IE; ++I) { 10307 Queue.push_back(*I); 10308 10309 while (!Queue.empty()) { 10310 SDNode *LoadRoot = Queue.pop_back_val(); 10311 if (!Visited.insert(LoadRoot).second) 10312 continue; 10313 10314 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 10315 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10316 return true; 10317 10318 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 10319 UE = LoadRoot->use_end(); UI != UE; ++UI) 10320 if (((isa<MemSDNode>(*UI) && 10321 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 10322 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 10323 Queue.push_back(*UI); 10324 } 10325 } 10326 10327 return false; 10328 } 10329 10330 /// This function is called when we have proved that a SETCC node can be replaced 10331 /// by subtraction (and other supporting instructions) so that the result of 10332 /// comparison is kept in a GPR instead of CR. This function is purely for 10333 /// codegen purposes and has some flags to guide the codegen process. 10334 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 10335 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 10336 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10337 10338 // Zero extend the operands to the largest legal integer. Originally, they 10339 // must be of a strictly smaller size. 10340 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 10341 DAG.getConstant(Size, DL, MVT::i32)); 10342 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 10343 DAG.getConstant(Size, DL, MVT::i32)); 10344 10345 // Swap if needed. Depends on the condition code. 10346 if (Swap) 10347 std::swap(Op0, Op1); 10348 10349 // Subtract extended integers. 10350 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 10351 10352 // Move the sign bit to the least significant position and zero out the rest. 10353 // Now the least significant bit carries the result of original comparison. 10354 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 10355 DAG.getConstant(Size - 1, DL, MVT::i32)); 10356 auto Final = Shifted; 10357 10358 // Complement the result if needed. Based on the condition code. 10359 if (Complement) 10360 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 10361 DAG.getConstant(1, DL, MVT::i64)); 10362 10363 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 10364 } 10365 10366 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 10367 DAGCombinerInfo &DCI) const { 10368 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10369 10370 SelectionDAG &DAG = DCI.DAG; 10371 SDLoc DL(N); 10372 10373 // Size of integers being compared has a critical role in the following 10374 // analysis, so we prefer to do this when all types are legal. 10375 if (!DCI.isAfterLegalizeVectorOps()) 10376 return SDValue(); 10377 10378 // If all users of SETCC extend its value to a legal integer type 10379 // then we replace SETCC with a subtraction 10380 for (SDNode::use_iterator UI = N->use_begin(), 10381 UE = N->use_end(); UI != UE; ++UI) { 10382 if (UI->getOpcode() != ISD::ZERO_EXTEND) 10383 return SDValue(); 10384 } 10385 10386 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 10387 auto OpSize = N->getOperand(0).getValueSizeInBits(); 10388 10389 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 10390 10391 if (OpSize < Size) { 10392 switch (CC) { 10393 default: break; 10394 case ISD::SETULT: 10395 return generateEquivalentSub(N, Size, false, false, DL, DAG); 10396 case ISD::SETULE: 10397 return generateEquivalentSub(N, Size, true, true, DL, DAG); 10398 case ISD::SETUGT: 10399 return generateEquivalentSub(N, Size, false, true, DL, DAG); 10400 case ISD::SETUGE: 10401 return generateEquivalentSub(N, Size, true, false, DL, DAG); 10402 } 10403 } 10404 10405 return SDValue(); 10406 } 10407 10408 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 10409 DAGCombinerInfo &DCI) const { 10410 SelectionDAG &DAG = DCI.DAG; 10411 SDLoc dl(N); 10412 10413 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 10414 // If we're tracking CR bits, we need to be careful that we don't have: 10415 // trunc(binary-ops(zext(x), zext(y))) 10416 // or 10417 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 10418 // such that we're unnecessarily moving things into GPRs when it would be 10419 // better to keep them in CR bits. 10420 10421 // Note that trunc here can be an actual i1 trunc, or can be the effective 10422 // truncation that comes from a setcc or select_cc. 10423 if (N->getOpcode() == ISD::TRUNCATE && 10424 N->getValueType(0) != MVT::i1) 10425 return SDValue(); 10426 10427 if (N->getOperand(0).getValueType() != MVT::i32 && 10428 N->getOperand(0).getValueType() != MVT::i64) 10429 return SDValue(); 10430 10431 if (N->getOpcode() == ISD::SETCC || 10432 N->getOpcode() == ISD::SELECT_CC) { 10433 // If we're looking at a comparison, then we need to make sure that the 10434 // high bits (all except for the first) don't matter the result. 10435 ISD::CondCode CC = 10436 cast<CondCodeSDNode>(N->getOperand( 10437 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 10438 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 10439 10440 if (ISD::isSignedIntSetCC(CC)) { 10441 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 10442 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 10443 return SDValue(); 10444 } else if (ISD::isUnsignedIntSetCC(CC)) { 10445 if (!DAG.MaskedValueIsZero(N->getOperand(0), 10446 APInt::getHighBitsSet(OpBits, OpBits-1)) || 10447 !DAG.MaskedValueIsZero(N->getOperand(1), 10448 APInt::getHighBitsSet(OpBits, OpBits-1))) 10449 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 10450 : SDValue()); 10451 } else { 10452 // This is neither a signed nor an unsigned comparison, just make sure 10453 // that the high bits are equal. 10454 KnownBits Op1Known, Op2Known; 10455 DAG.computeKnownBits(N->getOperand(0), Op1Known); 10456 DAG.computeKnownBits(N->getOperand(1), Op2Known); 10457 10458 // We don't really care about what is known about the first bit (if 10459 // anything), so clear it in all masks prior to comparing them. 10460 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 10461 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 10462 10463 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 10464 return SDValue(); 10465 } 10466 } 10467 10468 // We now know that the higher-order bits are irrelevant, we just need to 10469 // make sure that all of the intermediate operations are bit operations, and 10470 // all inputs are extensions. 10471 if (N->getOperand(0).getOpcode() != ISD::AND && 10472 N->getOperand(0).getOpcode() != ISD::OR && 10473 N->getOperand(0).getOpcode() != ISD::XOR && 10474 N->getOperand(0).getOpcode() != ISD::SELECT && 10475 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 10476 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 10477 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 10478 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 10479 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 10480 return SDValue(); 10481 10482 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 10483 N->getOperand(1).getOpcode() != ISD::AND && 10484 N->getOperand(1).getOpcode() != ISD::OR && 10485 N->getOperand(1).getOpcode() != ISD::XOR && 10486 N->getOperand(1).getOpcode() != ISD::SELECT && 10487 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 10488 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 10489 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 10490 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 10491 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 10492 return SDValue(); 10493 10494 SmallVector<SDValue, 4> Inputs; 10495 SmallVector<SDValue, 8> BinOps, PromOps; 10496 SmallPtrSet<SDNode *, 16> Visited; 10497 10498 for (unsigned i = 0; i < 2; ++i) { 10499 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 10500 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 10501 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 10502 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 10503 isa<ConstantSDNode>(N->getOperand(i))) 10504 Inputs.push_back(N->getOperand(i)); 10505 else 10506 BinOps.push_back(N->getOperand(i)); 10507 10508 if (N->getOpcode() == ISD::TRUNCATE) 10509 break; 10510 } 10511 10512 // Visit all inputs, collect all binary operations (and, or, xor and 10513 // select) that are all fed by extensions. 10514 while (!BinOps.empty()) { 10515 SDValue BinOp = BinOps.back(); 10516 BinOps.pop_back(); 10517 10518 if (!Visited.insert(BinOp.getNode()).second) 10519 continue; 10520 10521 PromOps.push_back(BinOp); 10522 10523 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 10524 // The condition of the select is not promoted. 10525 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 10526 continue; 10527 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 10528 continue; 10529 10530 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 10531 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 10532 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 10533 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 10534 isa<ConstantSDNode>(BinOp.getOperand(i))) { 10535 Inputs.push_back(BinOp.getOperand(i)); 10536 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 10537 BinOp.getOperand(i).getOpcode() == ISD::OR || 10538 BinOp.getOperand(i).getOpcode() == ISD::XOR || 10539 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 10540 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 10541 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 10542 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 10543 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 10544 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 10545 BinOps.push_back(BinOp.getOperand(i)); 10546 } else { 10547 // We have an input that is not an extension or another binary 10548 // operation; we'll abort this transformation. 10549 return SDValue(); 10550 } 10551 } 10552 } 10553 10554 // Make sure that this is a self-contained cluster of operations (which 10555 // is not quite the same thing as saying that everything has only one 10556 // use). 10557 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10558 if (isa<ConstantSDNode>(Inputs[i])) 10559 continue; 10560 10561 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 10562 UE = Inputs[i].getNode()->use_end(); 10563 UI != UE; ++UI) { 10564 SDNode *User = *UI; 10565 if (User != N && !Visited.count(User)) 10566 return SDValue(); 10567 10568 // Make sure that we're not going to promote the non-output-value 10569 // operand(s) or SELECT or SELECT_CC. 10570 // FIXME: Although we could sometimes handle this, and it does occur in 10571 // practice that one of the condition inputs to the select is also one of 10572 // the outputs, we currently can't deal with this. 10573 if (User->getOpcode() == ISD::SELECT) { 10574 if (User->getOperand(0) == Inputs[i]) 10575 return SDValue(); 10576 } else if (User->getOpcode() == ISD::SELECT_CC) { 10577 if (User->getOperand(0) == Inputs[i] || 10578 User->getOperand(1) == Inputs[i]) 10579 return SDValue(); 10580 } 10581 } 10582 } 10583 10584 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 10585 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 10586 UE = PromOps[i].getNode()->use_end(); 10587 UI != UE; ++UI) { 10588 SDNode *User = *UI; 10589 if (User != N && !Visited.count(User)) 10590 return SDValue(); 10591 10592 // Make sure that we're not going to promote the non-output-value 10593 // operand(s) or SELECT or SELECT_CC. 10594 // FIXME: Although we could sometimes handle this, and it does occur in 10595 // practice that one of the condition inputs to the select is also one of 10596 // the outputs, we currently can't deal with this. 10597 if (User->getOpcode() == ISD::SELECT) { 10598 if (User->getOperand(0) == PromOps[i]) 10599 return SDValue(); 10600 } else if (User->getOpcode() == ISD::SELECT_CC) { 10601 if (User->getOperand(0) == PromOps[i] || 10602 User->getOperand(1) == PromOps[i]) 10603 return SDValue(); 10604 } 10605 } 10606 } 10607 10608 // Replace all inputs with the extension operand. 10609 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10610 // Constants may have users outside the cluster of to-be-promoted nodes, 10611 // and so we need to replace those as we do the promotions. 10612 if (isa<ConstantSDNode>(Inputs[i])) 10613 continue; 10614 else 10615 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 10616 } 10617 10618 std::list<HandleSDNode> PromOpHandles; 10619 for (auto &PromOp : PromOps) 10620 PromOpHandles.emplace_back(PromOp); 10621 10622 // Replace all operations (these are all the same, but have a different 10623 // (i1) return type). DAG.getNode will validate that the types of 10624 // a binary operator match, so go through the list in reverse so that 10625 // we've likely promoted both operands first. Any intermediate truncations or 10626 // extensions disappear. 10627 while (!PromOpHandles.empty()) { 10628 SDValue PromOp = PromOpHandles.back().getValue(); 10629 PromOpHandles.pop_back(); 10630 10631 if (PromOp.getOpcode() == ISD::TRUNCATE || 10632 PromOp.getOpcode() == ISD::SIGN_EXTEND || 10633 PromOp.getOpcode() == ISD::ZERO_EXTEND || 10634 PromOp.getOpcode() == ISD::ANY_EXTEND) { 10635 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 10636 PromOp.getOperand(0).getValueType() != MVT::i1) { 10637 // The operand is not yet ready (see comment below). 10638 PromOpHandles.emplace_front(PromOp); 10639 continue; 10640 } 10641 10642 SDValue RepValue = PromOp.getOperand(0); 10643 if (isa<ConstantSDNode>(RepValue)) 10644 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 10645 10646 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 10647 continue; 10648 } 10649 10650 unsigned C; 10651 switch (PromOp.getOpcode()) { 10652 default: C = 0; break; 10653 case ISD::SELECT: C = 1; break; 10654 case ISD::SELECT_CC: C = 2; break; 10655 } 10656 10657 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 10658 PromOp.getOperand(C).getValueType() != MVT::i1) || 10659 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 10660 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 10661 // The to-be-promoted operands of this node have not yet been 10662 // promoted (this should be rare because we're going through the 10663 // list backward, but if one of the operands has several users in 10664 // this cluster of to-be-promoted nodes, it is possible). 10665 PromOpHandles.emplace_front(PromOp); 10666 continue; 10667 } 10668 10669 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 10670 PromOp.getNode()->op_end()); 10671 10672 // If there are any constant inputs, make sure they're replaced now. 10673 for (unsigned i = 0; i < 2; ++i) 10674 if (isa<ConstantSDNode>(Ops[C+i])) 10675 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 10676 10677 DAG.ReplaceAllUsesOfValueWith(PromOp, 10678 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 10679 } 10680 10681 // Now we're left with the initial truncation itself. 10682 if (N->getOpcode() == ISD::TRUNCATE) 10683 return N->getOperand(0); 10684 10685 // Otherwise, this is a comparison. The operands to be compared have just 10686 // changed type (to i1), but everything else is the same. 10687 return SDValue(N, 0); 10688 } 10689 10690 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 10691 DAGCombinerInfo &DCI) const { 10692 SelectionDAG &DAG = DCI.DAG; 10693 SDLoc dl(N); 10694 10695 // If we're tracking CR bits, we need to be careful that we don't have: 10696 // zext(binary-ops(trunc(x), trunc(y))) 10697 // or 10698 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 10699 // such that we're unnecessarily moving things into CR bits that can more 10700 // efficiently stay in GPRs. Note that if we're not certain that the high 10701 // bits are set as required by the final extension, we still may need to do 10702 // some masking to get the proper behavior. 10703 10704 // This same functionality is important on PPC64 when dealing with 10705 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 10706 // the return values of functions. Because it is so similar, it is handled 10707 // here as well. 10708 10709 if (N->getValueType(0) != MVT::i32 && 10710 N->getValueType(0) != MVT::i64) 10711 return SDValue(); 10712 10713 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 10714 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 10715 return SDValue(); 10716 10717 if (N->getOperand(0).getOpcode() != ISD::AND && 10718 N->getOperand(0).getOpcode() != ISD::OR && 10719 N->getOperand(0).getOpcode() != ISD::XOR && 10720 N->getOperand(0).getOpcode() != ISD::SELECT && 10721 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 10722 return SDValue(); 10723 10724 SmallVector<SDValue, 4> Inputs; 10725 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 10726 SmallPtrSet<SDNode *, 16> Visited; 10727 10728 // Visit all inputs, collect all binary operations (and, or, xor and 10729 // select) that are all fed by truncations. 10730 while (!BinOps.empty()) { 10731 SDValue BinOp = BinOps.back(); 10732 BinOps.pop_back(); 10733 10734 if (!Visited.insert(BinOp.getNode()).second) 10735 continue; 10736 10737 PromOps.push_back(BinOp); 10738 10739 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 10740 // The condition of the select is not promoted. 10741 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 10742 continue; 10743 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 10744 continue; 10745 10746 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 10747 isa<ConstantSDNode>(BinOp.getOperand(i))) { 10748 Inputs.push_back(BinOp.getOperand(i)); 10749 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 10750 BinOp.getOperand(i).getOpcode() == ISD::OR || 10751 BinOp.getOperand(i).getOpcode() == ISD::XOR || 10752 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 10753 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 10754 BinOps.push_back(BinOp.getOperand(i)); 10755 } else { 10756 // We have an input that is not a truncation or another binary 10757 // operation; we'll abort this transformation. 10758 return SDValue(); 10759 } 10760 } 10761 } 10762 10763 // The operands of a select that must be truncated when the select is 10764 // promoted because the operand is actually part of the to-be-promoted set. 10765 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 10766 10767 // Make sure that this is a self-contained cluster of operations (which 10768 // is not quite the same thing as saying that everything has only one 10769 // use). 10770 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10771 if (isa<ConstantSDNode>(Inputs[i])) 10772 continue; 10773 10774 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 10775 UE = Inputs[i].getNode()->use_end(); 10776 UI != UE; ++UI) { 10777 SDNode *User = *UI; 10778 if (User != N && !Visited.count(User)) 10779 return SDValue(); 10780 10781 // If we're going to promote the non-output-value operand(s) or SELECT or 10782 // SELECT_CC, record them for truncation. 10783 if (User->getOpcode() == ISD::SELECT) { 10784 if (User->getOperand(0) == Inputs[i]) 10785 SelectTruncOp[0].insert(std::make_pair(User, 10786 User->getOperand(0).getValueType())); 10787 } else if (User->getOpcode() == ISD::SELECT_CC) { 10788 if (User->getOperand(0) == Inputs[i]) 10789 SelectTruncOp[0].insert(std::make_pair(User, 10790 User->getOperand(0).getValueType())); 10791 if (User->getOperand(1) == Inputs[i]) 10792 SelectTruncOp[1].insert(std::make_pair(User, 10793 User->getOperand(1).getValueType())); 10794 } 10795 } 10796 } 10797 10798 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 10799 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 10800 UE = PromOps[i].getNode()->use_end(); 10801 UI != UE; ++UI) { 10802 SDNode *User = *UI; 10803 if (User != N && !Visited.count(User)) 10804 return SDValue(); 10805 10806 // If we're going to promote the non-output-value operand(s) or SELECT or 10807 // SELECT_CC, record them for truncation. 10808 if (User->getOpcode() == ISD::SELECT) { 10809 if (User->getOperand(0) == PromOps[i]) 10810 SelectTruncOp[0].insert(std::make_pair(User, 10811 User->getOperand(0).getValueType())); 10812 } else if (User->getOpcode() == ISD::SELECT_CC) { 10813 if (User->getOperand(0) == PromOps[i]) 10814 SelectTruncOp[0].insert(std::make_pair(User, 10815 User->getOperand(0).getValueType())); 10816 if (User->getOperand(1) == PromOps[i]) 10817 SelectTruncOp[1].insert(std::make_pair(User, 10818 User->getOperand(1).getValueType())); 10819 } 10820 } 10821 } 10822 10823 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 10824 bool ReallyNeedsExt = false; 10825 if (N->getOpcode() != ISD::ANY_EXTEND) { 10826 // If all of the inputs are not already sign/zero extended, then 10827 // we'll still need to do that at the end. 10828 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10829 if (isa<ConstantSDNode>(Inputs[i])) 10830 continue; 10831 10832 unsigned OpBits = 10833 Inputs[i].getOperand(0).getValueSizeInBits(); 10834 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 10835 10836 if ((N->getOpcode() == ISD::ZERO_EXTEND && 10837 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 10838 APInt::getHighBitsSet(OpBits, 10839 OpBits-PromBits))) || 10840 (N->getOpcode() == ISD::SIGN_EXTEND && 10841 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 10842 (OpBits-(PromBits-1)))) { 10843 ReallyNeedsExt = true; 10844 break; 10845 } 10846 } 10847 } 10848 10849 // Replace all inputs, either with the truncation operand, or a 10850 // truncation or extension to the final output type. 10851 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10852 // Constant inputs need to be replaced with the to-be-promoted nodes that 10853 // use them because they might have users outside of the cluster of 10854 // promoted nodes. 10855 if (isa<ConstantSDNode>(Inputs[i])) 10856 continue; 10857 10858 SDValue InSrc = Inputs[i].getOperand(0); 10859 if (Inputs[i].getValueType() == N->getValueType(0)) 10860 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 10861 else if (N->getOpcode() == ISD::SIGN_EXTEND) 10862 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10863 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 10864 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10865 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10866 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 10867 else 10868 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10869 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 10870 } 10871 10872 std::list<HandleSDNode> PromOpHandles; 10873 for (auto &PromOp : PromOps) 10874 PromOpHandles.emplace_back(PromOp); 10875 10876 // Replace all operations (these are all the same, but have a different 10877 // (promoted) return type). DAG.getNode will validate that the types of 10878 // a binary operator match, so go through the list in reverse so that 10879 // we've likely promoted both operands first. 10880 while (!PromOpHandles.empty()) { 10881 SDValue PromOp = PromOpHandles.back().getValue(); 10882 PromOpHandles.pop_back(); 10883 10884 unsigned C; 10885 switch (PromOp.getOpcode()) { 10886 default: C = 0; break; 10887 case ISD::SELECT: C = 1; break; 10888 case ISD::SELECT_CC: C = 2; break; 10889 } 10890 10891 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 10892 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 10893 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 10894 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 10895 // The to-be-promoted operands of this node have not yet been 10896 // promoted (this should be rare because we're going through the 10897 // list backward, but if one of the operands has several users in 10898 // this cluster of to-be-promoted nodes, it is possible). 10899 PromOpHandles.emplace_front(PromOp); 10900 continue; 10901 } 10902 10903 // For SELECT and SELECT_CC nodes, we do a similar check for any 10904 // to-be-promoted comparison inputs. 10905 if (PromOp.getOpcode() == ISD::SELECT || 10906 PromOp.getOpcode() == ISD::SELECT_CC) { 10907 if ((SelectTruncOp[0].count(PromOp.getNode()) && 10908 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 10909 (SelectTruncOp[1].count(PromOp.getNode()) && 10910 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 10911 PromOpHandles.emplace_front(PromOp); 10912 continue; 10913 } 10914 } 10915 10916 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 10917 PromOp.getNode()->op_end()); 10918 10919 // If this node has constant inputs, then they'll need to be promoted here. 10920 for (unsigned i = 0; i < 2; ++i) { 10921 if (!isa<ConstantSDNode>(Ops[C+i])) 10922 continue; 10923 if (Ops[C+i].getValueType() == N->getValueType(0)) 10924 continue; 10925 10926 if (N->getOpcode() == ISD::SIGN_EXTEND) 10927 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10928 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10929 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10930 else 10931 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10932 } 10933 10934 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 10935 // truncate them again to the original value type. 10936 if (PromOp.getOpcode() == ISD::SELECT || 10937 PromOp.getOpcode() == ISD::SELECT_CC) { 10938 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 10939 if (SI0 != SelectTruncOp[0].end()) 10940 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 10941 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 10942 if (SI1 != SelectTruncOp[1].end()) 10943 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 10944 } 10945 10946 DAG.ReplaceAllUsesOfValueWith(PromOp, 10947 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 10948 } 10949 10950 // Now we're left with the initial extension itself. 10951 if (!ReallyNeedsExt) 10952 return N->getOperand(0); 10953 10954 // To zero extend, just mask off everything except for the first bit (in the 10955 // i1 case). 10956 if (N->getOpcode() == ISD::ZERO_EXTEND) 10957 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 10958 DAG.getConstant(APInt::getLowBitsSet( 10959 N->getValueSizeInBits(0), PromBits), 10960 dl, N->getValueType(0))); 10961 10962 assert(N->getOpcode() == ISD::SIGN_EXTEND && 10963 "Invalid extension type"); 10964 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 10965 SDValue ShiftCst = 10966 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 10967 return DAG.getNode( 10968 ISD::SRA, dl, N->getValueType(0), 10969 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 10970 ShiftCst); 10971 } 10972 10973 /// \brief Reduces the number of fp-to-int conversion when building a vector. 10974 /// 10975 /// If this vector is built out of floating to integer conversions, 10976 /// transform it to a vector built out of floating point values followed by a 10977 /// single floating to integer conversion of the vector. 10978 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 10979 /// becomes (fptosi (build_vector ($A, $B, ...))) 10980 SDValue PPCTargetLowering:: 10981 combineElementTruncationToVectorTruncation(SDNode *N, 10982 DAGCombinerInfo &DCI) const { 10983 assert(N->getOpcode() == ISD::BUILD_VECTOR && 10984 "Should be called with a BUILD_VECTOR node"); 10985 10986 SelectionDAG &DAG = DCI.DAG; 10987 SDLoc dl(N); 10988 10989 SDValue FirstInput = N->getOperand(0); 10990 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 10991 "The input operand must be an fp-to-int conversion."); 10992 10993 // This combine happens after legalization so the fp_to_[su]i nodes are 10994 // already converted to PPCSISD nodes. 10995 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 10996 if (FirstConversion == PPCISD::FCTIDZ || 10997 FirstConversion == PPCISD::FCTIDUZ || 10998 FirstConversion == PPCISD::FCTIWZ || 10999 FirstConversion == PPCISD::FCTIWUZ) { 11000 bool IsSplat = true; 11001 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 11002 FirstConversion == PPCISD::FCTIWUZ; 11003 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 11004 SmallVector<SDValue, 4> Ops; 11005 EVT TargetVT = N->getValueType(0); 11006 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11007 if (N->getOperand(i).getOpcode() != PPCISD::MFVSR) 11008 return SDValue(); 11009 unsigned NextConversion = N->getOperand(i).getOperand(0).getOpcode(); 11010 if (NextConversion != FirstConversion) 11011 return SDValue(); 11012 if (N->getOperand(i) != FirstInput) 11013 IsSplat = false; 11014 } 11015 11016 // If this is a splat, we leave it as-is since there will be only a single 11017 // fp-to-int conversion followed by a splat of the integer. This is better 11018 // for 32-bit and smaller ints and neutral for 64-bit ints. 11019 if (IsSplat) 11020 return SDValue(); 11021 11022 // Now that we know we have the right type of node, get its operands 11023 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 11024 SDValue In = N->getOperand(i).getOperand(0); 11025 // For 32-bit values, we need to add an FP_ROUND node. 11026 if (Is32Bit) { 11027 if (In.isUndef()) 11028 Ops.push_back(DAG.getUNDEF(SrcVT)); 11029 else { 11030 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 11031 MVT::f32, In.getOperand(0), 11032 DAG.getIntPtrConstant(1, dl)); 11033 Ops.push_back(Trunc); 11034 } 11035 } else 11036 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 11037 } 11038 11039 unsigned Opcode; 11040 if (FirstConversion == PPCISD::FCTIDZ || 11041 FirstConversion == PPCISD::FCTIWZ) 11042 Opcode = ISD::FP_TO_SINT; 11043 else 11044 Opcode = ISD::FP_TO_UINT; 11045 11046 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 11047 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 11048 return DAG.getNode(Opcode, dl, TargetVT, BV); 11049 } 11050 return SDValue(); 11051 } 11052 11053 /// \brief Reduce the number of loads when building a vector. 11054 /// 11055 /// Building a vector out of multiple loads can be converted to a load 11056 /// of the vector type if the loads are consecutive. If the loads are 11057 /// consecutive but in descending order, a shuffle is added at the end 11058 /// to reorder the vector. 11059 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 11060 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11061 "Should be called with a BUILD_VECTOR node"); 11062 11063 SDLoc dl(N); 11064 bool InputsAreConsecutiveLoads = true; 11065 bool InputsAreReverseConsecutive = true; 11066 unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8; 11067 SDValue FirstInput = N->getOperand(0); 11068 bool IsRoundOfExtLoad = false; 11069 11070 if (FirstInput.getOpcode() == ISD::FP_ROUND && 11071 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 11072 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 11073 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 11074 } 11075 // Not a build vector of (possibly fp_rounded) loads. 11076 if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) 11077 return SDValue(); 11078 11079 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 11080 // If any inputs are fp_round(extload), they all must be. 11081 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 11082 return SDValue(); 11083 11084 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 11085 N->getOperand(i); 11086 if (NextInput.getOpcode() != ISD::LOAD) 11087 return SDValue(); 11088 11089 SDValue PreviousInput = 11090 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 11091 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 11092 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 11093 11094 // If any inputs are fp_round(extload), they all must be. 11095 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 11096 return SDValue(); 11097 11098 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 11099 InputsAreConsecutiveLoads = false; 11100 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 11101 InputsAreReverseConsecutive = false; 11102 11103 // Exit early if the loads are neither consecutive nor reverse consecutive. 11104 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 11105 return SDValue(); 11106 } 11107 11108 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 11109 "The loads cannot be both consecutive and reverse consecutive."); 11110 11111 SDValue FirstLoadOp = 11112 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 11113 SDValue LastLoadOp = 11114 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 11115 N->getOperand(N->getNumOperands()-1); 11116 11117 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 11118 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 11119 if (InputsAreConsecutiveLoads) { 11120 assert(LD1 && "Input needs to be a LoadSDNode."); 11121 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 11122 LD1->getBasePtr(), LD1->getPointerInfo(), 11123 LD1->getAlignment()); 11124 } 11125 if (InputsAreReverseConsecutive) { 11126 assert(LDL && "Input needs to be a LoadSDNode."); 11127 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 11128 LDL->getBasePtr(), LDL->getPointerInfo(), 11129 LDL->getAlignment()); 11130 SmallVector<int, 16> Ops; 11131 for (int i = N->getNumOperands() - 1; i >= 0; i--) 11132 Ops.push_back(i); 11133 11134 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 11135 DAG.getUNDEF(N->getValueType(0)), Ops); 11136 } 11137 return SDValue(); 11138 } 11139 11140 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 11141 DAGCombinerInfo &DCI) const { 11142 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11143 "Should be called with a BUILD_VECTOR node"); 11144 11145 SelectionDAG &DAG = DCI.DAG; 11146 SDLoc dl(N); 11147 11148 if (!Subtarget.hasVSX()) 11149 return SDValue(); 11150 11151 // The target independent DAG combiner will leave a build_vector of 11152 // float-to-int conversions intact. We can generate MUCH better code for 11153 // a float-to-int conversion of a vector of floats. 11154 SDValue FirstInput = N->getOperand(0); 11155 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 11156 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 11157 if (Reduced) 11158 return Reduced; 11159 } 11160 11161 // If we're building a vector out of consecutive loads, just load that 11162 // vector type. 11163 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 11164 if (Reduced) 11165 return Reduced; 11166 11167 if (N->getValueType(0) != MVT::v2f64) 11168 return SDValue(); 11169 11170 // Looking for: 11171 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 11172 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 11173 FirstInput.getOpcode() != ISD::UINT_TO_FP) 11174 return SDValue(); 11175 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 11176 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 11177 return SDValue(); 11178 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 11179 return SDValue(); 11180 11181 SDValue Ext1 = FirstInput.getOperand(0); 11182 SDValue Ext2 = N->getOperand(1).getOperand(0); 11183 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 11184 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11185 return SDValue(); 11186 11187 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 11188 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 11189 if (!Ext1Op || !Ext2Op) 11190 return SDValue(); 11191 if (Ext1.getValueType() != MVT::i32 || 11192 Ext2.getValueType() != MVT::i32) 11193 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 11194 return SDValue(); 11195 11196 int FirstElem = Ext1Op->getZExtValue(); 11197 int SecondElem = Ext2Op->getZExtValue(); 11198 int SubvecIdx; 11199 if (FirstElem == 0 && SecondElem == 1) 11200 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 11201 else if (FirstElem == 2 && SecondElem == 3) 11202 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 11203 else 11204 return SDValue(); 11205 11206 SDValue SrcVec = Ext1.getOperand(0); 11207 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 11208 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 11209 return DAG.getNode(NodeType, dl, MVT::v2f64, 11210 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 11211 } 11212 11213 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 11214 DAGCombinerInfo &DCI) const { 11215 assert((N->getOpcode() == ISD::SINT_TO_FP || 11216 N->getOpcode() == ISD::UINT_TO_FP) && 11217 "Need an int -> FP conversion node here"); 11218 11219 if (useSoftFloat() || !Subtarget.has64BitSupport()) 11220 return SDValue(); 11221 11222 SelectionDAG &DAG = DCI.DAG; 11223 SDLoc dl(N); 11224 SDValue Op(N, 0); 11225 11226 SDValue FirstOperand(Op.getOperand(0)); 11227 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 11228 (FirstOperand.getValueType() == MVT::i8 || 11229 FirstOperand.getValueType() == MVT::i16); 11230 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 11231 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 11232 bool DstDouble = Op.getValueType() == MVT::f64; 11233 unsigned ConvOp = Signed ? 11234 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 11235 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 11236 SDValue WidthConst = 11237 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 11238 dl, false); 11239 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 11240 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 11241 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 11242 DAG.getVTList(MVT::f64, MVT::Other), 11243 Ops, MVT::i8, LDN->getMemOperand()); 11244 11245 // For signed conversion, we need to sign-extend the value in the VSR 11246 if (Signed) { 11247 SDValue ExtOps[] = { Ld, WidthConst }; 11248 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 11249 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 11250 } else 11251 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 11252 } 11253 11254 // Don't handle ppc_fp128 here or i1 conversions. 11255 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 11256 return SDValue(); 11257 if (Op.getOperand(0).getValueType() == MVT::i1) 11258 return SDValue(); 11259 11260 // For i32 intermediate values, unfortunately, the conversion functions 11261 // leave the upper 32 bits of the value are undefined. Within the set of 11262 // scalar instructions, we have no method for zero- or sign-extending the 11263 // value. Thus, we cannot handle i32 intermediate values here. 11264 if (Op.getOperand(0).getValueType() == MVT::i32) 11265 return SDValue(); 11266 11267 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 11268 "UINT_TO_FP is supported only with FPCVT"); 11269 11270 // If we have FCFIDS, then use it when converting to single-precision. 11271 // Otherwise, convert to double-precision and then round. 11272 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 11273 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 11274 : PPCISD::FCFIDS) 11275 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 11276 : PPCISD::FCFID); 11277 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 11278 ? MVT::f32 11279 : MVT::f64; 11280 11281 // If we're converting from a float, to an int, and back to a float again, 11282 // then we don't need the store/load pair at all. 11283 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 11284 Subtarget.hasFPCVT()) || 11285 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 11286 SDValue Src = Op.getOperand(0).getOperand(0); 11287 if (Src.getValueType() == MVT::f32) { 11288 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 11289 DCI.AddToWorklist(Src.getNode()); 11290 } else if (Src.getValueType() != MVT::f64) { 11291 // Make sure that we don't pick up a ppc_fp128 source value. 11292 return SDValue(); 11293 } 11294 11295 unsigned FCTOp = 11296 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 11297 PPCISD::FCTIDUZ; 11298 11299 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 11300 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 11301 11302 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 11303 FP = DAG.getNode(ISD::FP_ROUND, dl, 11304 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 11305 DCI.AddToWorklist(FP.getNode()); 11306 } 11307 11308 return FP; 11309 } 11310 11311 return SDValue(); 11312 } 11313 11314 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 11315 // builtins) into loads with swaps. 11316 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 11317 DAGCombinerInfo &DCI) const { 11318 SelectionDAG &DAG = DCI.DAG; 11319 SDLoc dl(N); 11320 SDValue Chain; 11321 SDValue Base; 11322 MachineMemOperand *MMO; 11323 11324 switch (N->getOpcode()) { 11325 default: 11326 llvm_unreachable("Unexpected opcode for little endian VSX load"); 11327 case ISD::LOAD: { 11328 LoadSDNode *LD = cast<LoadSDNode>(N); 11329 Chain = LD->getChain(); 11330 Base = LD->getBasePtr(); 11331 MMO = LD->getMemOperand(); 11332 // If the MMO suggests this isn't a load of a full vector, leave 11333 // things alone. For a built-in, we have to make the change for 11334 // correctness, so if there is a size problem that will be a bug. 11335 if (MMO->getSize() < 16) 11336 return SDValue(); 11337 break; 11338 } 11339 case ISD::INTRINSIC_W_CHAIN: { 11340 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 11341 Chain = Intrin->getChain(); 11342 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 11343 // us what we want. Get operand 2 instead. 11344 Base = Intrin->getOperand(2); 11345 MMO = Intrin->getMemOperand(); 11346 break; 11347 } 11348 } 11349 11350 MVT VecTy = N->getValueType(0).getSimpleVT(); 11351 11352 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 11353 // aligned and the type is a vector with elements up to 4 bytes 11354 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 11355 && VecTy.getScalarSizeInBits() <= 32 ) { 11356 return SDValue(); 11357 } 11358 11359 SDValue LoadOps[] = { Chain, Base }; 11360 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 11361 DAG.getVTList(MVT::v2f64, MVT::Other), 11362 LoadOps, MVT::v2f64, MMO); 11363 11364 DCI.AddToWorklist(Load.getNode()); 11365 Chain = Load.getValue(1); 11366 SDValue Swap = DAG.getNode( 11367 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 11368 DCI.AddToWorklist(Swap.getNode()); 11369 11370 // Add a bitcast if the resulting load type doesn't match v2f64. 11371 if (VecTy != MVT::v2f64) { 11372 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 11373 DCI.AddToWorklist(N.getNode()); 11374 // Package {bitcast value, swap's chain} to match Load's shape. 11375 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 11376 N, Swap.getValue(1)); 11377 } 11378 11379 return Swap; 11380 } 11381 11382 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 11383 // builtins) into stores with swaps. 11384 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 11385 DAGCombinerInfo &DCI) const { 11386 SelectionDAG &DAG = DCI.DAG; 11387 SDLoc dl(N); 11388 SDValue Chain; 11389 SDValue Base; 11390 unsigned SrcOpnd; 11391 MachineMemOperand *MMO; 11392 11393 switch (N->getOpcode()) { 11394 default: 11395 llvm_unreachable("Unexpected opcode for little endian VSX store"); 11396 case ISD::STORE: { 11397 StoreSDNode *ST = cast<StoreSDNode>(N); 11398 Chain = ST->getChain(); 11399 Base = ST->getBasePtr(); 11400 MMO = ST->getMemOperand(); 11401 SrcOpnd = 1; 11402 // If the MMO suggests this isn't a store of a full vector, leave 11403 // things alone. For a built-in, we have to make the change for 11404 // correctness, so if there is a size problem that will be a bug. 11405 if (MMO->getSize() < 16) 11406 return SDValue(); 11407 break; 11408 } 11409 case ISD::INTRINSIC_VOID: { 11410 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 11411 Chain = Intrin->getChain(); 11412 // Intrin->getBasePtr() oddly does not get what we want. 11413 Base = Intrin->getOperand(3); 11414 MMO = Intrin->getMemOperand(); 11415 SrcOpnd = 2; 11416 break; 11417 } 11418 } 11419 11420 SDValue Src = N->getOperand(SrcOpnd); 11421 MVT VecTy = Src.getValueType().getSimpleVT(); 11422 11423 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 11424 // aligned and the type is a vector with elements up to 4 bytes 11425 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 11426 && VecTy.getScalarSizeInBits() <= 32 ) { 11427 return SDValue(); 11428 } 11429 11430 // All stores are done as v2f64 and possible bit cast. 11431 if (VecTy != MVT::v2f64) { 11432 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 11433 DCI.AddToWorklist(Src.getNode()); 11434 } 11435 11436 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 11437 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 11438 DCI.AddToWorklist(Swap.getNode()); 11439 Chain = Swap.getValue(1); 11440 SDValue StoreOps[] = { Chain, Swap, Base }; 11441 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 11442 DAG.getVTList(MVT::Other), 11443 StoreOps, VecTy, MMO); 11444 DCI.AddToWorklist(Store.getNode()); 11445 return Store; 11446 } 11447 11448 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 11449 DAGCombinerInfo &DCI) const { 11450 SelectionDAG &DAG = DCI.DAG; 11451 SDLoc dl(N); 11452 switch (N->getOpcode()) { 11453 default: break; 11454 case ISD::SHL: 11455 return combineSHL(N, DCI); 11456 case ISD::SRA: 11457 return combineSRA(N, DCI); 11458 case ISD::SRL: 11459 return combineSRL(N, DCI); 11460 case PPCISD::SHL: 11461 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 11462 return N->getOperand(0); 11463 break; 11464 case PPCISD::SRL: 11465 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 11466 return N->getOperand(0); 11467 break; 11468 case PPCISD::SRA: 11469 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 11470 if (C->isNullValue() || // 0 >>s V -> 0. 11471 C->isAllOnesValue()) // -1 >>s V -> -1. 11472 return N->getOperand(0); 11473 } 11474 break; 11475 case ISD::SIGN_EXTEND: 11476 case ISD::ZERO_EXTEND: 11477 case ISD::ANY_EXTEND: 11478 return DAGCombineExtBoolTrunc(N, DCI); 11479 case ISD::TRUNCATE: 11480 case ISD::SETCC: 11481 case ISD::SELECT_CC: 11482 return DAGCombineTruncBoolExt(N, DCI); 11483 case ISD::SINT_TO_FP: 11484 case ISD::UINT_TO_FP: 11485 return combineFPToIntToFP(N, DCI); 11486 case ISD::STORE: { 11487 EVT Op1VT = N->getOperand(1).getValueType(); 11488 bool ValidTypeForStoreFltAsInt = (Op1VT == MVT::i32) || 11489 (Subtarget.hasP9Vector() && (Op1VT == MVT::i8 || Op1VT == MVT::i16)); 11490 11491 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 11492 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 11493 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 11494 ValidTypeForStoreFltAsInt && 11495 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 11496 SDValue Val = N->getOperand(1).getOperand(0); 11497 if (Val.getValueType() == MVT::f32) { 11498 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 11499 DCI.AddToWorklist(Val.getNode()); 11500 } 11501 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 11502 DCI.AddToWorklist(Val.getNode()); 11503 11504 if (Op1VT == MVT::i32) { 11505 SDValue Ops[] = { 11506 N->getOperand(0), Val, N->getOperand(2), 11507 DAG.getValueType(N->getOperand(1).getValueType()) 11508 }; 11509 11510 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 11511 DAG.getVTList(MVT::Other), Ops, 11512 cast<StoreSDNode>(N)->getMemoryVT(), 11513 cast<StoreSDNode>(N)->getMemOperand()); 11514 } else { 11515 unsigned WidthInBytes = 11516 N->getOperand(1).getValueType() == MVT::i8 ? 1 : 2; 11517 SDValue WidthConst = DAG.getIntPtrConstant(WidthInBytes, dl, false); 11518 11519 SDValue Ops[] = { 11520 N->getOperand(0), Val, N->getOperand(2), WidthConst, 11521 DAG.getValueType(N->getOperand(1).getValueType()) 11522 }; 11523 Val = DAG.getMemIntrinsicNode(PPCISD::STXSIX, dl, 11524 DAG.getVTList(MVT::Other), Ops, 11525 cast<StoreSDNode>(N)->getMemoryVT(), 11526 cast<StoreSDNode>(N)->getMemOperand()); 11527 } 11528 11529 DCI.AddToWorklist(Val.getNode()); 11530 return Val; 11531 } 11532 11533 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 11534 if (cast<StoreSDNode>(N)->isUnindexed() && 11535 N->getOperand(1).getOpcode() == ISD::BSWAP && 11536 N->getOperand(1).getNode()->hasOneUse() && 11537 (N->getOperand(1).getValueType() == MVT::i32 || 11538 N->getOperand(1).getValueType() == MVT::i16 || 11539 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 11540 N->getOperand(1).getValueType() == MVT::i64))) { 11541 SDValue BSwapOp = N->getOperand(1).getOperand(0); 11542 // Do an any-extend to 32-bits if this is a half-word input. 11543 if (BSwapOp.getValueType() == MVT::i16) 11544 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 11545 11546 // If the type of BSWAP operand is wider than stored memory width 11547 // it need to be shifted to the right side before STBRX. 11548 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 11549 if (Op1VT.bitsGT(mVT)) { 11550 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 11551 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 11552 DAG.getConstant(Shift, dl, MVT::i32)); 11553 // Need to truncate if this is a bswap of i64 stored as i32/i16. 11554 if (Op1VT == MVT::i64) 11555 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 11556 } 11557 11558 SDValue Ops[] = { 11559 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 11560 }; 11561 return 11562 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 11563 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 11564 cast<StoreSDNode>(N)->getMemOperand()); 11565 } 11566 11567 // For little endian, VSX stores require generating xxswapd/lxvd2x. 11568 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 11569 EVT VT = N->getOperand(1).getValueType(); 11570 if (VT.isSimple()) { 11571 MVT StoreVT = VT.getSimpleVT(); 11572 if (Subtarget.needsSwapsForVSXMemOps() && 11573 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 11574 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 11575 return expandVSXStoreForLE(N, DCI); 11576 } 11577 break; 11578 } 11579 case ISD::LOAD: { 11580 LoadSDNode *LD = cast<LoadSDNode>(N); 11581 EVT VT = LD->getValueType(0); 11582 11583 // For little endian, VSX loads require generating lxvd2x/xxswapd. 11584 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 11585 if (VT.isSimple()) { 11586 MVT LoadVT = VT.getSimpleVT(); 11587 if (Subtarget.needsSwapsForVSXMemOps() && 11588 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 11589 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 11590 return expandVSXLoadForLE(N, DCI); 11591 } 11592 11593 // We sometimes end up with a 64-bit integer load, from which we extract 11594 // two single-precision floating-point numbers. This happens with 11595 // std::complex<float>, and other similar structures, because of the way we 11596 // canonicalize structure copies. However, if we lack direct moves, 11597 // then the final bitcasts from the extracted integer values to the 11598 // floating-point numbers turn into store/load pairs. Even with direct moves, 11599 // just loading the two floating-point numbers is likely better. 11600 auto ReplaceTwoFloatLoad = [&]() { 11601 if (VT != MVT::i64) 11602 return false; 11603 11604 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 11605 LD->isVolatile()) 11606 return false; 11607 11608 // We're looking for a sequence like this: 11609 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 11610 // t16: i64 = srl t13, Constant:i32<32> 11611 // t17: i32 = truncate t16 11612 // t18: f32 = bitcast t17 11613 // t19: i32 = truncate t13 11614 // t20: f32 = bitcast t19 11615 11616 if (!LD->hasNUsesOfValue(2, 0)) 11617 return false; 11618 11619 auto UI = LD->use_begin(); 11620 while (UI.getUse().getResNo() != 0) ++UI; 11621 SDNode *Trunc = *UI++; 11622 while (UI.getUse().getResNo() != 0) ++UI; 11623 SDNode *RightShift = *UI; 11624 if (Trunc->getOpcode() != ISD::TRUNCATE) 11625 std::swap(Trunc, RightShift); 11626 11627 if (Trunc->getOpcode() != ISD::TRUNCATE || 11628 Trunc->getValueType(0) != MVT::i32 || 11629 !Trunc->hasOneUse()) 11630 return false; 11631 if (RightShift->getOpcode() != ISD::SRL || 11632 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 11633 RightShift->getConstantOperandVal(1) != 32 || 11634 !RightShift->hasOneUse()) 11635 return false; 11636 11637 SDNode *Trunc2 = *RightShift->use_begin(); 11638 if (Trunc2->getOpcode() != ISD::TRUNCATE || 11639 Trunc2->getValueType(0) != MVT::i32 || 11640 !Trunc2->hasOneUse()) 11641 return false; 11642 11643 SDNode *Bitcast = *Trunc->use_begin(); 11644 SDNode *Bitcast2 = *Trunc2->use_begin(); 11645 11646 if (Bitcast->getOpcode() != ISD::BITCAST || 11647 Bitcast->getValueType(0) != MVT::f32) 11648 return false; 11649 if (Bitcast2->getOpcode() != ISD::BITCAST || 11650 Bitcast2->getValueType(0) != MVT::f32) 11651 return false; 11652 11653 if (Subtarget.isLittleEndian()) 11654 std::swap(Bitcast, Bitcast2); 11655 11656 // Bitcast has the second float (in memory-layout order) and Bitcast2 11657 // has the first one. 11658 11659 SDValue BasePtr = LD->getBasePtr(); 11660 if (LD->isIndexed()) { 11661 assert(LD->getAddressingMode() == ISD::PRE_INC && 11662 "Non-pre-inc AM on PPC?"); 11663 BasePtr = 11664 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 11665 LD->getOffset()); 11666 } 11667 11668 auto MMOFlags = 11669 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 11670 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 11671 LD->getPointerInfo(), LD->getAlignment(), 11672 MMOFlags, LD->getAAInfo()); 11673 SDValue AddPtr = 11674 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 11675 BasePtr, DAG.getIntPtrConstant(4, dl)); 11676 SDValue FloatLoad2 = DAG.getLoad( 11677 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 11678 LD->getPointerInfo().getWithOffset(4), 11679 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 11680 11681 if (LD->isIndexed()) { 11682 // Note that DAGCombine should re-form any pre-increment load(s) from 11683 // what is produced here if that makes sense. 11684 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 11685 } 11686 11687 DCI.CombineTo(Bitcast2, FloatLoad); 11688 DCI.CombineTo(Bitcast, FloatLoad2); 11689 11690 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 11691 SDValue(FloatLoad2.getNode(), 1)); 11692 return true; 11693 }; 11694 11695 if (ReplaceTwoFloatLoad()) 11696 return SDValue(N, 0); 11697 11698 EVT MemVT = LD->getMemoryVT(); 11699 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 11700 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 11701 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 11702 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 11703 if (LD->isUnindexed() && VT.isVector() && 11704 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 11705 // P8 and later hardware should just use LOAD. 11706 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 11707 VT == MVT::v4i32 || VT == MVT::v4f32)) || 11708 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 11709 LD->getAlignment() >= ScalarABIAlignment)) && 11710 LD->getAlignment() < ABIAlignment) { 11711 // This is a type-legal unaligned Altivec or QPX load. 11712 SDValue Chain = LD->getChain(); 11713 SDValue Ptr = LD->getBasePtr(); 11714 bool isLittleEndian = Subtarget.isLittleEndian(); 11715 11716 // This implements the loading of unaligned vectors as described in 11717 // the venerable Apple Velocity Engine overview. Specifically: 11718 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 11719 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 11720 // 11721 // The general idea is to expand a sequence of one or more unaligned 11722 // loads into an alignment-based permutation-control instruction (lvsl 11723 // or lvsr), a series of regular vector loads (which always truncate 11724 // their input address to an aligned address), and a series of 11725 // permutations. The results of these permutations are the requested 11726 // loaded values. The trick is that the last "extra" load is not taken 11727 // from the address you might suspect (sizeof(vector) bytes after the 11728 // last requested load), but rather sizeof(vector) - 1 bytes after the 11729 // last requested vector. The point of this is to avoid a page fault if 11730 // the base address happened to be aligned. This works because if the 11731 // base address is aligned, then adding less than a full vector length 11732 // will cause the last vector in the sequence to be (re)loaded. 11733 // Otherwise, the next vector will be fetched as you might suspect was 11734 // necessary. 11735 11736 // We might be able to reuse the permutation generation from 11737 // a different base address offset from this one by an aligned amount. 11738 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 11739 // optimization later. 11740 Intrinsic::ID Intr, IntrLD, IntrPerm; 11741 MVT PermCntlTy, PermTy, LDTy; 11742 if (Subtarget.hasAltivec()) { 11743 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 11744 Intrinsic::ppc_altivec_lvsl; 11745 IntrLD = Intrinsic::ppc_altivec_lvx; 11746 IntrPerm = Intrinsic::ppc_altivec_vperm; 11747 PermCntlTy = MVT::v16i8; 11748 PermTy = MVT::v4i32; 11749 LDTy = MVT::v4i32; 11750 } else { 11751 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 11752 Intrinsic::ppc_qpx_qvlpcls; 11753 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 11754 Intrinsic::ppc_qpx_qvlfs; 11755 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 11756 PermCntlTy = MVT::v4f64; 11757 PermTy = MVT::v4f64; 11758 LDTy = MemVT.getSimpleVT(); 11759 } 11760 11761 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 11762 11763 // Create the new MMO for the new base load. It is like the original MMO, 11764 // but represents an area in memory almost twice the vector size centered 11765 // on the original address. If the address is unaligned, we might start 11766 // reading up to (sizeof(vector)-1) bytes below the address of the 11767 // original unaligned load. 11768 MachineFunction &MF = DAG.getMachineFunction(); 11769 MachineMemOperand *BaseMMO = 11770 MF.getMachineMemOperand(LD->getMemOperand(), 11771 -(long)MemVT.getStoreSize()+1, 11772 2*MemVT.getStoreSize()-1); 11773 11774 // Create the new base load. 11775 SDValue LDXIntID = 11776 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 11777 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 11778 SDValue BaseLoad = 11779 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 11780 DAG.getVTList(PermTy, MVT::Other), 11781 BaseLoadOps, LDTy, BaseMMO); 11782 11783 // Note that the value of IncOffset (which is provided to the next 11784 // load's pointer info offset value, and thus used to calculate the 11785 // alignment), and the value of IncValue (which is actually used to 11786 // increment the pointer value) are different! This is because we 11787 // require the next load to appear to be aligned, even though it 11788 // is actually offset from the base pointer by a lesser amount. 11789 int IncOffset = VT.getSizeInBits() / 8; 11790 int IncValue = IncOffset; 11791 11792 // Walk (both up and down) the chain looking for another load at the real 11793 // (aligned) offset (the alignment of the other load does not matter in 11794 // this case). If found, then do not use the offset reduction trick, as 11795 // that will prevent the loads from being later combined (as they would 11796 // otherwise be duplicates). 11797 if (!findConsecutiveLoad(LD, DAG)) 11798 --IncValue; 11799 11800 SDValue Increment = 11801 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 11802 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 11803 11804 MachineMemOperand *ExtraMMO = 11805 MF.getMachineMemOperand(LD->getMemOperand(), 11806 1, 2*MemVT.getStoreSize()-1); 11807 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 11808 SDValue ExtraLoad = 11809 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 11810 DAG.getVTList(PermTy, MVT::Other), 11811 ExtraLoadOps, LDTy, ExtraMMO); 11812 11813 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 11814 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 11815 11816 // Because vperm has a big-endian bias, we must reverse the order 11817 // of the input vectors and complement the permute control vector 11818 // when generating little endian code. We have already handled the 11819 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 11820 // and ExtraLoad here. 11821 SDValue Perm; 11822 if (isLittleEndian) 11823 Perm = BuildIntrinsicOp(IntrPerm, 11824 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 11825 else 11826 Perm = BuildIntrinsicOp(IntrPerm, 11827 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 11828 11829 if (VT != PermTy) 11830 Perm = Subtarget.hasAltivec() ? 11831 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 11832 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 11833 DAG.getTargetConstant(1, dl, MVT::i64)); 11834 // second argument is 1 because this rounding 11835 // is always exact. 11836 11837 // The output of the permutation is our loaded result, the TokenFactor is 11838 // our new chain. 11839 DCI.CombineTo(N, Perm, TF); 11840 return SDValue(N, 0); 11841 } 11842 } 11843 break; 11844 case ISD::INTRINSIC_WO_CHAIN: { 11845 bool isLittleEndian = Subtarget.isLittleEndian(); 11846 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 11847 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 11848 : Intrinsic::ppc_altivec_lvsl); 11849 if ((IID == Intr || 11850 IID == Intrinsic::ppc_qpx_qvlpcld || 11851 IID == Intrinsic::ppc_qpx_qvlpcls) && 11852 N->getOperand(1)->getOpcode() == ISD::ADD) { 11853 SDValue Add = N->getOperand(1); 11854 11855 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 11856 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 11857 11858 if (DAG.MaskedValueIsZero(Add->getOperand(1), 11859 APInt::getAllOnesValue(Bits /* alignment */) 11860 .zext(Add.getScalarValueSizeInBits()))) { 11861 SDNode *BasePtr = Add->getOperand(0).getNode(); 11862 for (SDNode::use_iterator UI = BasePtr->use_begin(), 11863 UE = BasePtr->use_end(); 11864 UI != UE; ++UI) { 11865 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 11866 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 11867 // We've found another LVSL/LVSR, and this address is an aligned 11868 // multiple of that one. The results will be the same, so use the 11869 // one we've just found instead. 11870 11871 return SDValue(*UI, 0); 11872 } 11873 } 11874 } 11875 11876 if (isa<ConstantSDNode>(Add->getOperand(1))) { 11877 SDNode *BasePtr = Add->getOperand(0).getNode(); 11878 for (SDNode::use_iterator UI = BasePtr->use_begin(), 11879 UE = BasePtr->use_end(); UI != UE; ++UI) { 11880 if (UI->getOpcode() == ISD::ADD && 11881 isa<ConstantSDNode>(UI->getOperand(1)) && 11882 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 11883 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 11884 (1ULL << Bits) == 0) { 11885 SDNode *OtherAdd = *UI; 11886 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 11887 VE = OtherAdd->use_end(); VI != VE; ++VI) { 11888 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 11889 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 11890 return SDValue(*VI, 0); 11891 } 11892 } 11893 } 11894 } 11895 } 11896 } 11897 } 11898 11899 break; 11900 case ISD::INTRINSIC_W_CHAIN: 11901 // For little endian, VSX loads require generating lxvd2x/xxswapd. 11902 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 11903 if (Subtarget.needsSwapsForVSXMemOps()) { 11904 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11905 default: 11906 break; 11907 case Intrinsic::ppc_vsx_lxvw4x: 11908 case Intrinsic::ppc_vsx_lxvd2x: 11909 return expandVSXLoadForLE(N, DCI); 11910 } 11911 } 11912 break; 11913 case ISD::INTRINSIC_VOID: 11914 // For little endian, VSX stores require generating xxswapd/stxvd2x. 11915 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 11916 if (Subtarget.needsSwapsForVSXMemOps()) { 11917 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11918 default: 11919 break; 11920 case Intrinsic::ppc_vsx_stxvw4x: 11921 case Intrinsic::ppc_vsx_stxvd2x: 11922 return expandVSXStoreForLE(N, DCI); 11923 } 11924 } 11925 break; 11926 case ISD::BSWAP: 11927 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 11928 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 11929 N->getOperand(0).hasOneUse() && 11930 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 11931 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 11932 N->getValueType(0) == MVT::i64))) { 11933 SDValue Load = N->getOperand(0); 11934 LoadSDNode *LD = cast<LoadSDNode>(Load); 11935 // Create the byte-swapping load. 11936 SDValue Ops[] = { 11937 LD->getChain(), // Chain 11938 LD->getBasePtr(), // Ptr 11939 DAG.getValueType(N->getValueType(0)) // VT 11940 }; 11941 SDValue BSLoad = 11942 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 11943 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 11944 MVT::i64 : MVT::i32, MVT::Other), 11945 Ops, LD->getMemoryVT(), LD->getMemOperand()); 11946 11947 // If this is an i16 load, insert the truncate. 11948 SDValue ResVal = BSLoad; 11949 if (N->getValueType(0) == MVT::i16) 11950 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 11951 11952 // First, combine the bswap away. This makes the value produced by the 11953 // load dead. 11954 DCI.CombineTo(N, ResVal); 11955 11956 // Next, combine the load away, we give it a bogus result value but a real 11957 // chain result. The result value is dead because the bswap is dead. 11958 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 11959 11960 // Return N so it doesn't get rechecked! 11961 return SDValue(N, 0); 11962 } 11963 break; 11964 case PPCISD::VCMP: 11965 // If a VCMPo node already exists with exactly the same operands as this 11966 // node, use its result instead of this node (VCMPo computes both a CR6 and 11967 // a normal output). 11968 // 11969 if (!N->getOperand(0).hasOneUse() && 11970 !N->getOperand(1).hasOneUse() && 11971 !N->getOperand(2).hasOneUse()) { 11972 11973 // Scan all of the users of the LHS, looking for VCMPo's that match. 11974 SDNode *VCMPoNode = nullptr; 11975 11976 SDNode *LHSN = N->getOperand(0).getNode(); 11977 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 11978 UI != E; ++UI) 11979 if (UI->getOpcode() == PPCISD::VCMPo && 11980 UI->getOperand(1) == N->getOperand(1) && 11981 UI->getOperand(2) == N->getOperand(2) && 11982 UI->getOperand(0) == N->getOperand(0)) { 11983 VCMPoNode = *UI; 11984 break; 11985 } 11986 11987 // If there is no VCMPo node, or if the flag value has a single use, don't 11988 // transform this. 11989 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 11990 break; 11991 11992 // Look at the (necessarily single) use of the flag value. If it has a 11993 // chain, this transformation is more complex. Note that multiple things 11994 // could use the value result, which we should ignore. 11995 SDNode *FlagUser = nullptr; 11996 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 11997 FlagUser == nullptr; ++UI) { 11998 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 11999 SDNode *User = *UI; 12000 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 12001 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 12002 FlagUser = User; 12003 break; 12004 } 12005 } 12006 } 12007 12008 // If the user is a MFOCRF instruction, we know this is safe. 12009 // Otherwise we give up for right now. 12010 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 12011 return SDValue(VCMPoNode, 0); 12012 } 12013 break; 12014 case ISD::BRCOND: { 12015 SDValue Cond = N->getOperand(1); 12016 SDValue Target = N->getOperand(2); 12017 12018 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12019 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 12020 Intrinsic::ppc_is_decremented_ctr_nonzero) { 12021 12022 // We now need to make the intrinsic dead (it cannot be instruction 12023 // selected). 12024 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 12025 assert(Cond.getNode()->hasOneUse() && 12026 "Counter decrement has more than one use"); 12027 12028 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 12029 N->getOperand(0), Target); 12030 } 12031 } 12032 break; 12033 case ISD::BR_CC: { 12034 // If this is a branch on an altivec predicate comparison, lower this so 12035 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 12036 // lowering is done pre-legalize, because the legalizer lowers the predicate 12037 // compare down to code that is difficult to reassemble. 12038 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 12039 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 12040 12041 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 12042 // value. If so, pass-through the AND to get to the intrinsic. 12043 if (LHS.getOpcode() == ISD::AND && 12044 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 12045 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 12046 Intrinsic::ppc_is_decremented_ctr_nonzero && 12047 isa<ConstantSDNode>(LHS.getOperand(1)) && 12048 !isNullConstant(LHS.getOperand(1))) 12049 LHS = LHS.getOperand(0); 12050 12051 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 12052 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 12053 Intrinsic::ppc_is_decremented_ctr_nonzero && 12054 isa<ConstantSDNode>(RHS)) { 12055 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 12056 "Counter decrement comparison is not EQ or NE"); 12057 12058 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12059 bool isBDNZ = (CC == ISD::SETEQ && Val) || 12060 (CC == ISD::SETNE && !Val); 12061 12062 // We now need to make the intrinsic dead (it cannot be instruction 12063 // selected). 12064 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 12065 assert(LHS.getNode()->hasOneUse() && 12066 "Counter decrement has more than one use"); 12067 12068 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 12069 N->getOperand(0), N->getOperand(4)); 12070 } 12071 12072 int CompareOpc; 12073 bool isDot; 12074 12075 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 12076 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 12077 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 12078 assert(isDot && "Can't compare against a vector result!"); 12079 12080 // If this is a comparison against something other than 0/1, then we know 12081 // that the condition is never/always true. 12082 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 12083 if (Val != 0 && Val != 1) { 12084 if (CC == ISD::SETEQ) // Cond never true, remove branch. 12085 return N->getOperand(0); 12086 // Always !=, turn it into an unconditional branch. 12087 return DAG.getNode(ISD::BR, dl, MVT::Other, 12088 N->getOperand(0), N->getOperand(4)); 12089 } 12090 12091 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 12092 12093 // Create the PPCISD altivec 'dot' comparison node. 12094 SDValue Ops[] = { 12095 LHS.getOperand(2), // LHS of compare 12096 LHS.getOperand(3), // RHS of compare 12097 DAG.getConstant(CompareOpc, dl, MVT::i32) 12098 }; 12099 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 12100 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 12101 12102 // Unpack the result based on how the target uses it. 12103 PPC::Predicate CompOpc; 12104 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 12105 default: // Can't happen, don't crash on invalid number though. 12106 case 0: // Branch on the value of the EQ bit of CR6. 12107 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 12108 break; 12109 case 1: // Branch on the inverted value of the EQ bit of CR6. 12110 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 12111 break; 12112 case 2: // Branch on the value of the LT bit of CR6. 12113 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 12114 break; 12115 case 3: // Branch on the inverted value of the LT bit of CR6. 12116 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 12117 break; 12118 } 12119 12120 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 12121 DAG.getConstant(CompOpc, dl, MVT::i32), 12122 DAG.getRegister(PPC::CR6, MVT::i32), 12123 N->getOperand(4), CompNode.getValue(1)); 12124 } 12125 break; 12126 } 12127 case ISD::BUILD_VECTOR: 12128 return DAGCombineBuildVector(N, DCI); 12129 } 12130 12131 return SDValue(); 12132 } 12133 12134 SDValue 12135 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 12136 SelectionDAG &DAG, 12137 std::vector<SDNode *> *Created) const { 12138 // fold (sdiv X, pow2) 12139 EVT VT = N->getValueType(0); 12140 if (VT == MVT::i64 && !Subtarget.isPPC64()) 12141 return SDValue(); 12142 if ((VT != MVT::i32 && VT != MVT::i64) || 12143 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 12144 return SDValue(); 12145 12146 SDLoc DL(N); 12147 SDValue N0 = N->getOperand(0); 12148 12149 bool IsNegPow2 = (-Divisor).isPowerOf2(); 12150 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 12151 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 12152 12153 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 12154 if (Created) 12155 Created->push_back(Op.getNode()); 12156 12157 if (IsNegPow2) { 12158 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 12159 if (Created) 12160 Created->push_back(Op.getNode()); 12161 } 12162 12163 return Op; 12164 } 12165 12166 //===----------------------------------------------------------------------===// 12167 // Inline Assembly Support 12168 //===----------------------------------------------------------------------===// 12169 12170 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 12171 KnownBits &Known, 12172 const APInt &DemandedElts, 12173 const SelectionDAG &DAG, 12174 unsigned Depth) const { 12175 Known.resetAll(); 12176 switch (Op.getOpcode()) { 12177 default: break; 12178 case PPCISD::LBRX: { 12179 // lhbrx is known to have the top bits cleared out. 12180 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 12181 Known.Zero = 0xFFFF0000; 12182 break; 12183 } 12184 case ISD::INTRINSIC_WO_CHAIN: { 12185 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 12186 default: break; 12187 case Intrinsic::ppc_altivec_vcmpbfp_p: 12188 case Intrinsic::ppc_altivec_vcmpeqfp_p: 12189 case Intrinsic::ppc_altivec_vcmpequb_p: 12190 case Intrinsic::ppc_altivec_vcmpequh_p: 12191 case Intrinsic::ppc_altivec_vcmpequw_p: 12192 case Intrinsic::ppc_altivec_vcmpequd_p: 12193 case Intrinsic::ppc_altivec_vcmpgefp_p: 12194 case Intrinsic::ppc_altivec_vcmpgtfp_p: 12195 case Intrinsic::ppc_altivec_vcmpgtsb_p: 12196 case Intrinsic::ppc_altivec_vcmpgtsh_p: 12197 case Intrinsic::ppc_altivec_vcmpgtsw_p: 12198 case Intrinsic::ppc_altivec_vcmpgtsd_p: 12199 case Intrinsic::ppc_altivec_vcmpgtub_p: 12200 case Intrinsic::ppc_altivec_vcmpgtuh_p: 12201 case Intrinsic::ppc_altivec_vcmpgtuw_p: 12202 case Intrinsic::ppc_altivec_vcmpgtud_p: 12203 Known.Zero = ~1U; // All bits but the low one are known to be zero. 12204 break; 12205 } 12206 } 12207 } 12208 } 12209 12210 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 12211 switch (Subtarget.getDarwinDirective()) { 12212 default: break; 12213 case PPC::DIR_970: 12214 case PPC::DIR_PWR4: 12215 case PPC::DIR_PWR5: 12216 case PPC::DIR_PWR5X: 12217 case PPC::DIR_PWR6: 12218 case PPC::DIR_PWR6X: 12219 case PPC::DIR_PWR7: 12220 case PPC::DIR_PWR8: 12221 case PPC::DIR_PWR9: { 12222 if (!ML) 12223 break; 12224 12225 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 12226 12227 // For small loops (between 5 and 8 instructions), align to a 32-byte 12228 // boundary so that the entire loop fits in one instruction-cache line. 12229 uint64_t LoopSize = 0; 12230 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 12231 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 12232 LoopSize += TII->getInstSizeInBytes(*J); 12233 if (LoopSize > 32) 12234 break; 12235 } 12236 12237 if (LoopSize > 16 && LoopSize <= 32) 12238 return 5; 12239 12240 break; 12241 } 12242 } 12243 12244 return TargetLowering::getPrefLoopAlignment(ML); 12245 } 12246 12247 /// getConstraintType - Given a constraint, return the type of 12248 /// constraint it is for this target. 12249 PPCTargetLowering::ConstraintType 12250 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 12251 if (Constraint.size() == 1) { 12252 switch (Constraint[0]) { 12253 default: break; 12254 case 'b': 12255 case 'r': 12256 case 'f': 12257 case 'd': 12258 case 'v': 12259 case 'y': 12260 return C_RegisterClass; 12261 case 'Z': 12262 // FIXME: While Z does indicate a memory constraint, it specifically 12263 // indicates an r+r address (used in conjunction with the 'y' modifier 12264 // in the replacement string). Currently, we're forcing the base 12265 // register to be r0 in the asm printer (which is interpreted as zero) 12266 // and forming the complete address in the second register. This is 12267 // suboptimal. 12268 return C_Memory; 12269 } 12270 } else if (Constraint == "wc") { // individual CR bits. 12271 return C_RegisterClass; 12272 } else if (Constraint == "wa" || Constraint == "wd" || 12273 Constraint == "wf" || Constraint == "ws") { 12274 return C_RegisterClass; // VSX registers. 12275 } 12276 return TargetLowering::getConstraintType(Constraint); 12277 } 12278 12279 /// Examine constraint type and operand type and determine a weight value. 12280 /// This object must already have been set up with the operand type 12281 /// and the current alternative constraint selected. 12282 TargetLowering::ConstraintWeight 12283 PPCTargetLowering::getSingleConstraintMatchWeight( 12284 AsmOperandInfo &info, const char *constraint) const { 12285 ConstraintWeight weight = CW_Invalid; 12286 Value *CallOperandVal = info.CallOperandVal; 12287 // If we don't have a value, we can't do a match, 12288 // but allow it at the lowest weight. 12289 if (!CallOperandVal) 12290 return CW_Default; 12291 Type *type = CallOperandVal->getType(); 12292 12293 // Look at the constraint type. 12294 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 12295 return CW_Register; // an individual CR bit. 12296 else if ((StringRef(constraint) == "wa" || 12297 StringRef(constraint) == "wd" || 12298 StringRef(constraint) == "wf") && 12299 type->isVectorTy()) 12300 return CW_Register; 12301 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 12302 return CW_Register; 12303 12304 switch (*constraint) { 12305 default: 12306 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 12307 break; 12308 case 'b': 12309 if (type->isIntegerTy()) 12310 weight = CW_Register; 12311 break; 12312 case 'f': 12313 if (type->isFloatTy()) 12314 weight = CW_Register; 12315 break; 12316 case 'd': 12317 if (type->isDoubleTy()) 12318 weight = CW_Register; 12319 break; 12320 case 'v': 12321 if (type->isVectorTy()) 12322 weight = CW_Register; 12323 break; 12324 case 'y': 12325 weight = CW_Register; 12326 break; 12327 case 'Z': 12328 weight = CW_Memory; 12329 break; 12330 } 12331 return weight; 12332 } 12333 12334 std::pair<unsigned, const TargetRegisterClass *> 12335 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 12336 StringRef Constraint, 12337 MVT VT) const { 12338 if (Constraint.size() == 1) { 12339 // GCC RS6000 Constraint Letters 12340 switch (Constraint[0]) { 12341 case 'b': // R1-R31 12342 if (VT == MVT::i64 && Subtarget.isPPC64()) 12343 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 12344 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 12345 case 'r': // R0-R31 12346 if (VT == MVT::i64 && Subtarget.isPPC64()) 12347 return std::make_pair(0U, &PPC::G8RCRegClass); 12348 return std::make_pair(0U, &PPC::GPRCRegClass); 12349 // 'd' and 'f' constraints are both defined to be "the floating point 12350 // registers", where one is for 32-bit and the other for 64-bit. We don't 12351 // really care overly much here so just give them all the same reg classes. 12352 case 'd': 12353 case 'f': 12354 if (VT == MVT::f32 || VT == MVT::i32) 12355 return std::make_pair(0U, &PPC::F4RCRegClass); 12356 if (VT == MVT::f64 || VT == MVT::i64) 12357 return std::make_pair(0U, &PPC::F8RCRegClass); 12358 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 12359 return std::make_pair(0U, &PPC::QFRCRegClass); 12360 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 12361 return std::make_pair(0U, &PPC::QSRCRegClass); 12362 break; 12363 case 'v': 12364 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 12365 return std::make_pair(0U, &PPC::QFRCRegClass); 12366 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 12367 return std::make_pair(0U, &PPC::QSRCRegClass); 12368 if (Subtarget.hasAltivec()) 12369 return std::make_pair(0U, &PPC::VRRCRegClass); 12370 case 'y': // crrc 12371 return std::make_pair(0U, &PPC::CRRCRegClass); 12372 } 12373 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 12374 // An individual CR bit. 12375 return std::make_pair(0U, &PPC::CRBITRCRegClass); 12376 } else if ((Constraint == "wa" || Constraint == "wd" || 12377 Constraint == "wf") && Subtarget.hasVSX()) { 12378 return std::make_pair(0U, &PPC::VSRCRegClass); 12379 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 12380 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 12381 return std::make_pair(0U, &PPC::VSSRCRegClass); 12382 else 12383 return std::make_pair(0U, &PPC::VSFRCRegClass); 12384 } 12385 12386 std::pair<unsigned, const TargetRegisterClass *> R = 12387 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 12388 12389 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 12390 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 12391 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 12392 // register. 12393 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 12394 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 12395 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 12396 PPC::GPRCRegClass.contains(R.first)) 12397 return std::make_pair(TRI->getMatchingSuperReg(R.first, 12398 PPC::sub_32, &PPC::G8RCRegClass), 12399 &PPC::G8RCRegClass); 12400 12401 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 12402 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 12403 R.first = PPC::CR0; 12404 R.second = &PPC::CRRCRegClass; 12405 } 12406 12407 return R; 12408 } 12409 12410 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 12411 /// vector. If it is invalid, don't add anything to Ops. 12412 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 12413 std::string &Constraint, 12414 std::vector<SDValue>&Ops, 12415 SelectionDAG &DAG) const { 12416 SDValue Result; 12417 12418 // Only support length 1 constraints. 12419 if (Constraint.length() > 1) return; 12420 12421 char Letter = Constraint[0]; 12422 switch (Letter) { 12423 default: break; 12424 case 'I': 12425 case 'J': 12426 case 'K': 12427 case 'L': 12428 case 'M': 12429 case 'N': 12430 case 'O': 12431 case 'P': { 12432 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 12433 if (!CST) return; // Must be an immediate to match. 12434 SDLoc dl(Op); 12435 int64_t Value = CST->getSExtValue(); 12436 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 12437 // numbers are printed as such. 12438 switch (Letter) { 12439 default: llvm_unreachable("Unknown constraint letter!"); 12440 case 'I': // "I" is a signed 16-bit constant. 12441 if (isInt<16>(Value)) 12442 Result = DAG.getTargetConstant(Value, dl, TCVT); 12443 break; 12444 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 12445 if (isShiftedUInt<16, 16>(Value)) 12446 Result = DAG.getTargetConstant(Value, dl, TCVT); 12447 break; 12448 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 12449 if (isShiftedInt<16, 16>(Value)) 12450 Result = DAG.getTargetConstant(Value, dl, TCVT); 12451 break; 12452 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 12453 if (isUInt<16>(Value)) 12454 Result = DAG.getTargetConstant(Value, dl, TCVT); 12455 break; 12456 case 'M': // "M" is a constant that is greater than 31. 12457 if (Value > 31) 12458 Result = DAG.getTargetConstant(Value, dl, TCVT); 12459 break; 12460 case 'N': // "N" is a positive constant that is an exact power of two. 12461 if (Value > 0 && isPowerOf2_64(Value)) 12462 Result = DAG.getTargetConstant(Value, dl, TCVT); 12463 break; 12464 case 'O': // "O" is the constant zero. 12465 if (Value == 0) 12466 Result = DAG.getTargetConstant(Value, dl, TCVT); 12467 break; 12468 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 12469 if (isInt<16>(-Value)) 12470 Result = DAG.getTargetConstant(Value, dl, TCVT); 12471 break; 12472 } 12473 break; 12474 } 12475 } 12476 12477 if (Result.getNode()) { 12478 Ops.push_back(Result); 12479 return; 12480 } 12481 12482 // Handle standard constraint letters. 12483 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 12484 } 12485 12486 // isLegalAddressingMode - Return true if the addressing mode represented 12487 // by AM is legal for this target, for a load/store of the specified type. 12488 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 12489 const AddrMode &AM, Type *Ty, 12490 unsigned AS) const { 12491 // PPC does not allow r+i addressing modes for vectors! 12492 if (Ty->isVectorTy() && AM.BaseOffs != 0) 12493 return false; 12494 12495 // PPC allows a sign-extended 16-bit immediate field. 12496 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 12497 return false; 12498 12499 // No global is ever allowed as a base. 12500 if (AM.BaseGV) 12501 return false; 12502 12503 // PPC only support r+r, 12504 switch (AM.Scale) { 12505 case 0: // "r+i" or just "i", depending on HasBaseReg. 12506 break; 12507 case 1: 12508 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 12509 return false; 12510 // Otherwise we have r+r or r+i. 12511 break; 12512 case 2: 12513 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 12514 return false; 12515 // Allow 2*r as r+r. 12516 break; 12517 default: 12518 // No other scales are supported. 12519 return false; 12520 } 12521 12522 return true; 12523 } 12524 12525 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 12526 SelectionDAG &DAG) const { 12527 MachineFunction &MF = DAG.getMachineFunction(); 12528 MachineFrameInfo &MFI = MF.getFrameInfo(); 12529 MFI.setReturnAddressIsTaken(true); 12530 12531 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 12532 return SDValue(); 12533 12534 SDLoc dl(Op); 12535 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 12536 12537 // Make sure the function does not optimize away the store of the RA to 12538 // the stack. 12539 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 12540 FuncInfo->setLRStoreRequired(); 12541 bool isPPC64 = Subtarget.isPPC64(); 12542 auto PtrVT = getPointerTy(MF.getDataLayout()); 12543 12544 if (Depth > 0) { 12545 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 12546 SDValue Offset = 12547 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 12548 isPPC64 ? MVT::i64 : MVT::i32); 12549 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 12550 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 12551 MachinePointerInfo()); 12552 } 12553 12554 // Just load the return address off the stack. 12555 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 12556 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 12557 MachinePointerInfo()); 12558 } 12559 12560 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 12561 SelectionDAG &DAG) const { 12562 SDLoc dl(Op); 12563 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 12564 12565 MachineFunction &MF = DAG.getMachineFunction(); 12566 MachineFrameInfo &MFI = MF.getFrameInfo(); 12567 MFI.setFrameAddressIsTaken(true); 12568 12569 EVT PtrVT = getPointerTy(MF.getDataLayout()); 12570 bool isPPC64 = PtrVT == MVT::i64; 12571 12572 // Naked functions never have a frame pointer, and so we use r1. For all 12573 // other functions, this decision must be delayed until during PEI. 12574 unsigned FrameReg; 12575 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 12576 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 12577 else 12578 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 12579 12580 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 12581 PtrVT); 12582 while (Depth--) 12583 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 12584 FrameAddr, MachinePointerInfo()); 12585 return FrameAddr; 12586 } 12587 12588 // FIXME? Maybe this could be a TableGen attribute on some registers and 12589 // this table could be generated automatically from RegInfo. 12590 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 12591 SelectionDAG &DAG) const { 12592 bool isPPC64 = Subtarget.isPPC64(); 12593 bool isDarwinABI = Subtarget.isDarwinABI(); 12594 12595 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 12596 (!isPPC64 && VT != MVT::i32)) 12597 report_fatal_error("Invalid register global variable type"); 12598 12599 bool is64Bit = isPPC64 && VT == MVT::i64; 12600 unsigned Reg = StringSwitch<unsigned>(RegName) 12601 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 12602 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 12603 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 12604 (is64Bit ? PPC::X13 : PPC::R13)) 12605 .Default(0); 12606 12607 if (Reg) 12608 return Reg; 12609 report_fatal_error("Invalid register name global variable"); 12610 } 12611 12612 bool 12613 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 12614 // The PowerPC target isn't yet aware of offsets. 12615 return false; 12616 } 12617 12618 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 12619 const CallInst &I, 12620 unsigned Intrinsic) const { 12621 switch (Intrinsic) { 12622 case Intrinsic::ppc_qpx_qvlfd: 12623 case Intrinsic::ppc_qpx_qvlfs: 12624 case Intrinsic::ppc_qpx_qvlfcd: 12625 case Intrinsic::ppc_qpx_qvlfcs: 12626 case Intrinsic::ppc_qpx_qvlfiwa: 12627 case Intrinsic::ppc_qpx_qvlfiwz: 12628 case Intrinsic::ppc_altivec_lvx: 12629 case Intrinsic::ppc_altivec_lvxl: 12630 case Intrinsic::ppc_altivec_lvebx: 12631 case Intrinsic::ppc_altivec_lvehx: 12632 case Intrinsic::ppc_altivec_lvewx: 12633 case Intrinsic::ppc_vsx_lxvd2x: 12634 case Intrinsic::ppc_vsx_lxvw4x: { 12635 EVT VT; 12636 switch (Intrinsic) { 12637 case Intrinsic::ppc_altivec_lvebx: 12638 VT = MVT::i8; 12639 break; 12640 case Intrinsic::ppc_altivec_lvehx: 12641 VT = MVT::i16; 12642 break; 12643 case Intrinsic::ppc_altivec_lvewx: 12644 VT = MVT::i32; 12645 break; 12646 case Intrinsic::ppc_vsx_lxvd2x: 12647 VT = MVT::v2f64; 12648 break; 12649 case Intrinsic::ppc_qpx_qvlfd: 12650 VT = MVT::v4f64; 12651 break; 12652 case Intrinsic::ppc_qpx_qvlfs: 12653 VT = MVT::v4f32; 12654 break; 12655 case Intrinsic::ppc_qpx_qvlfcd: 12656 VT = MVT::v2f64; 12657 break; 12658 case Intrinsic::ppc_qpx_qvlfcs: 12659 VT = MVT::v2f32; 12660 break; 12661 default: 12662 VT = MVT::v4i32; 12663 break; 12664 } 12665 12666 Info.opc = ISD::INTRINSIC_W_CHAIN; 12667 Info.memVT = VT; 12668 Info.ptrVal = I.getArgOperand(0); 12669 Info.offset = -VT.getStoreSize()+1; 12670 Info.size = 2*VT.getStoreSize()-1; 12671 Info.align = 1; 12672 Info.vol = false; 12673 Info.readMem = true; 12674 Info.writeMem = false; 12675 return true; 12676 } 12677 case Intrinsic::ppc_qpx_qvlfda: 12678 case Intrinsic::ppc_qpx_qvlfsa: 12679 case Intrinsic::ppc_qpx_qvlfcda: 12680 case Intrinsic::ppc_qpx_qvlfcsa: 12681 case Intrinsic::ppc_qpx_qvlfiwaa: 12682 case Intrinsic::ppc_qpx_qvlfiwza: { 12683 EVT VT; 12684 switch (Intrinsic) { 12685 case Intrinsic::ppc_qpx_qvlfda: 12686 VT = MVT::v4f64; 12687 break; 12688 case Intrinsic::ppc_qpx_qvlfsa: 12689 VT = MVT::v4f32; 12690 break; 12691 case Intrinsic::ppc_qpx_qvlfcda: 12692 VT = MVT::v2f64; 12693 break; 12694 case Intrinsic::ppc_qpx_qvlfcsa: 12695 VT = MVT::v2f32; 12696 break; 12697 default: 12698 VT = MVT::v4i32; 12699 break; 12700 } 12701 12702 Info.opc = ISD::INTRINSIC_W_CHAIN; 12703 Info.memVT = VT; 12704 Info.ptrVal = I.getArgOperand(0); 12705 Info.offset = 0; 12706 Info.size = VT.getStoreSize(); 12707 Info.align = 1; 12708 Info.vol = false; 12709 Info.readMem = true; 12710 Info.writeMem = false; 12711 return true; 12712 } 12713 case Intrinsic::ppc_qpx_qvstfd: 12714 case Intrinsic::ppc_qpx_qvstfs: 12715 case Intrinsic::ppc_qpx_qvstfcd: 12716 case Intrinsic::ppc_qpx_qvstfcs: 12717 case Intrinsic::ppc_qpx_qvstfiw: 12718 case Intrinsic::ppc_altivec_stvx: 12719 case Intrinsic::ppc_altivec_stvxl: 12720 case Intrinsic::ppc_altivec_stvebx: 12721 case Intrinsic::ppc_altivec_stvehx: 12722 case Intrinsic::ppc_altivec_stvewx: 12723 case Intrinsic::ppc_vsx_stxvd2x: 12724 case Intrinsic::ppc_vsx_stxvw4x: { 12725 EVT VT; 12726 switch (Intrinsic) { 12727 case Intrinsic::ppc_altivec_stvebx: 12728 VT = MVT::i8; 12729 break; 12730 case Intrinsic::ppc_altivec_stvehx: 12731 VT = MVT::i16; 12732 break; 12733 case Intrinsic::ppc_altivec_stvewx: 12734 VT = MVT::i32; 12735 break; 12736 case Intrinsic::ppc_vsx_stxvd2x: 12737 VT = MVT::v2f64; 12738 break; 12739 case Intrinsic::ppc_qpx_qvstfd: 12740 VT = MVT::v4f64; 12741 break; 12742 case Intrinsic::ppc_qpx_qvstfs: 12743 VT = MVT::v4f32; 12744 break; 12745 case Intrinsic::ppc_qpx_qvstfcd: 12746 VT = MVT::v2f64; 12747 break; 12748 case Intrinsic::ppc_qpx_qvstfcs: 12749 VT = MVT::v2f32; 12750 break; 12751 default: 12752 VT = MVT::v4i32; 12753 break; 12754 } 12755 12756 Info.opc = ISD::INTRINSIC_VOID; 12757 Info.memVT = VT; 12758 Info.ptrVal = I.getArgOperand(1); 12759 Info.offset = -VT.getStoreSize()+1; 12760 Info.size = 2*VT.getStoreSize()-1; 12761 Info.align = 1; 12762 Info.vol = false; 12763 Info.readMem = false; 12764 Info.writeMem = true; 12765 return true; 12766 } 12767 case Intrinsic::ppc_qpx_qvstfda: 12768 case Intrinsic::ppc_qpx_qvstfsa: 12769 case Intrinsic::ppc_qpx_qvstfcda: 12770 case Intrinsic::ppc_qpx_qvstfcsa: 12771 case Intrinsic::ppc_qpx_qvstfiwa: { 12772 EVT VT; 12773 switch (Intrinsic) { 12774 case Intrinsic::ppc_qpx_qvstfda: 12775 VT = MVT::v4f64; 12776 break; 12777 case Intrinsic::ppc_qpx_qvstfsa: 12778 VT = MVT::v4f32; 12779 break; 12780 case Intrinsic::ppc_qpx_qvstfcda: 12781 VT = MVT::v2f64; 12782 break; 12783 case Intrinsic::ppc_qpx_qvstfcsa: 12784 VT = MVT::v2f32; 12785 break; 12786 default: 12787 VT = MVT::v4i32; 12788 break; 12789 } 12790 12791 Info.opc = ISD::INTRINSIC_VOID; 12792 Info.memVT = VT; 12793 Info.ptrVal = I.getArgOperand(1); 12794 Info.offset = 0; 12795 Info.size = VT.getStoreSize(); 12796 Info.align = 1; 12797 Info.vol = false; 12798 Info.readMem = false; 12799 Info.writeMem = true; 12800 return true; 12801 } 12802 default: 12803 break; 12804 } 12805 12806 return false; 12807 } 12808 12809 /// getOptimalMemOpType - Returns the target specific optimal type for load 12810 /// and store operations as a result of memset, memcpy, and memmove 12811 /// lowering. If DstAlign is zero that means it's safe to destination 12812 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 12813 /// means there isn't a need to check it against alignment requirement, 12814 /// probably because the source does not need to be loaded. If 'IsMemset' is 12815 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 12816 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 12817 /// source is constant so it does not need to be loaded. 12818 /// It returns EVT::Other if the type should be determined using generic 12819 /// target-independent logic. 12820 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 12821 unsigned DstAlign, unsigned SrcAlign, 12822 bool IsMemset, bool ZeroMemset, 12823 bool MemcpyStrSrc, 12824 MachineFunction &MF) const { 12825 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 12826 const Function *F = MF.getFunction(); 12827 // When expanding a memset, require at least two QPX instructions to cover 12828 // the cost of loading the value to be stored from the constant pool. 12829 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 12830 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 12831 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 12832 return MVT::v4f64; 12833 } 12834 12835 // We should use Altivec/VSX loads and stores when available. For unaligned 12836 // addresses, unaligned VSX loads are only fast starting with the P8. 12837 if (Subtarget.hasAltivec() && Size >= 16 && 12838 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 12839 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 12840 return MVT::v4i32; 12841 } 12842 12843 if (Subtarget.isPPC64()) { 12844 return MVT::i64; 12845 } 12846 12847 return MVT::i32; 12848 } 12849 12850 /// \brief Returns true if it is beneficial to convert a load of a constant 12851 /// to just the constant itself. 12852 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 12853 Type *Ty) const { 12854 assert(Ty->isIntegerTy()); 12855 12856 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 12857 return !(BitSize == 0 || BitSize > 64); 12858 } 12859 12860 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 12861 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 12862 return false; 12863 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 12864 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 12865 return NumBits1 == 64 && NumBits2 == 32; 12866 } 12867 12868 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 12869 if (!VT1.isInteger() || !VT2.isInteger()) 12870 return false; 12871 unsigned NumBits1 = VT1.getSizeInBits(); 12872 unsigned NumBits2 = VT2.getSizeInBits(); 12873 return NumBits1 == 64 && NumBits2 == 32; 12874 } 12875 12876 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 12877 // Generally speaking, zexts are not free, but they are free when they can be 12878 // folded with other operations. 12879 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 12880 EVT MemVT = LD->getMemoryVT(); 12881 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 12882 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 12883 (LD->getExtensionType() == ISD::NON_EXTLOAD || 12884 LD->getExtensionType() == ISD::ZEXTLOAD)) 12885 return true; 12886 } 12887 12888 // FIXME: Add other cases... 12889 // - 32-bit shifts with a zext to i64 12890 // - zext after ctlz, bswap, etc. 12891 // - zext after and by a constant mask 12892 12893 return TargetLowering::isZExtFree(Val, VT2); 12894 } 12895 12896 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 12897 assert(VT.isFloatingPoint()); 12898 return true; 12899 } 12900 12901 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 12902 return isInt<16>(Imm) || isUInt<16>(Imm); 12903 } 12904 12905 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 12906 return isInt<16>(Imm) || isUInt<16>(Imm); 12907 } 12908 12909 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 12910 unsigned, 12911 unsigned, 12912 bool *Fast) const { 12913 if (DisablePPCUnaligned) 12914 return false; 12915 12916 // PowerPC supports unaligned memory access for simple non-vector types. 12917 // Although accessing unaligned addresses is not as efficient as accessing 12918 // aligned addresses, it is generally more efficient than manual expansion, 12919 // and generally only traps for software emulation when crossing page 12920 // boundaries. 12921 12922 if (!VT.isSimple()) 12923 return false; 12924 12925 if (VT.getSimpleVT().isVector()) { 12926 if (Subtarget.hasVSX()) { 12927 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 12928 VT != MVT::v4f32 && VT != MVT::v4i32) 12929 return false; 12930 } else { 12931 return false; 12932 } 12933 } 12934 12935 if (VT == MVT::ppcf128) 12936 return false; 12937 12938 if (Fast) 12939 *Fast = true; 12940 12941 return true; 12942 } 12943 12944 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 12945 VT = VT.getScalarType(); 12946 12947 if (!VT.isSimple()) 12948 return false; 12949 12950 switch (VT.getSimpleVT().SimpleTy) { 12951 case MVT::f32: 12952 case MVT::f64: 12953 return true; 12954 default: 12955 break; 12956 } 12957 12958 return false; 12959 } 12960 12961 const MCPhysReg * 12962 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 12963 // LR is a callee-save register, but we must treat it as clobbered by any call 12964 // site. Hence we include LR in the scratch registers, which are in turn added 12965 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 12966 // to CTR, which is used by any indirect call. 12967 static const MCPhysReg ScratchRegs[] = { 12968 PPC::X12, PPC::LR8, PPC::CTR8, 0 12969 }; 12970 12971 return ScratchRegs; 12972 } 12973 12974 unsigned PPCTargetLowering::getExceptionPointerRegister( 12975 const Constant *PersonalityFn) const { 12976 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 12977 } 12978 12979 unsigned PPCTargetLowering::getExceptionSelectorRegister( 12980 const Constant *PersonalityFn) const { 12981 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 12982 } 12983 12984 bool 12985 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 12986 EVT VT , unsigned DefinedValues) const { 12987 if (VT == MVT::v2i64) 12988 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 12989 12990 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 12991 return true; 12992 12993 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 12994 } 12995 12996 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 12997 if (DisableILPPref || Subtarget.enableMachineScheduler()) 12998 return TargetLowering::getSchedulingPreference(N); 12999 13000 return Sched::ILP; 13001 } 13002 13003 // Create a fast isel object. 13004 FastISel * 13005 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 13006 const TargetLibraryInfo *LibInfo) const { 13007 return PPC::createFastISel(FuncInfo, LibInfo); 13008 } 13009 13010 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 13011 if (Subtarget.isDarwinABI()) return; 13012 if (!Subtarget.isPPC64()) return; 13013 13014 // Update IsSplitCSR in PPCFunctionInfo 13015 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 13016 PFI->setIsSplitCSR(true); 13017 } 13018 13019 void PPCTargetLowering::insertCopiesSplitCSR( 13020 MachineBasicBlock *Entry, 13021 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 13022 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 13023 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 13024 if (!IStart) 13025 return; 13026 13027 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 13028 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 13029 MachineBasicBlock::iterator MBBI = Entry->begin(); 13030 for (const MCPhysReg *I = IStart; *I; ++I) { 13031 const TargetRegisterClass *RC = nullptr; 13032 if (PPC::G8RCRegClass.contains(*I)) 13033 RC = &PPC::G8RCRegClass; 13034 else if (PPC::F8RCRegClass.contains(*I)) 13035 RC = &PPC::F8RCRegClass; 13036 else if (PPC::CRRCRegClass.contains(*I)) 13037 RC = &PPC::CRRCRegClass; 13038 else if (PPC::VRRCRegClass.contains(*I)) 13039 RC = &PPC::VRRCRegClass; 13040 else 13041 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 13042 13043 unsigned NewVR = MRI->createVirtualRegister(RC); 13044 // Create copy from CSR to a virtual register. 13045 // FIXME: this currently does not emit CFI pseudo-instructions, it works 13046 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 13047 // nounwind. If we want to generalize this later, we may need to emit 13048 // CFI pseudo-instructions. 13049 assert(Entry->getParent()->getFunction()->hasFnAttribute( 13050 Attribute::NoUnwind) && 13051 "Function should be nounwind in insertCopiesSplitCSR!"); 13052 Entry->addLiveIn(*I); 13053 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 13054 .addReg(*I); 13055 13056 // Insert the copy-back instructions right before the terminator 13057 for (auto *Exit : Exits) 13058 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 13059 TII->get(TargetOpcode::COPY), *I) 13060 .addReg(NewVR); 13061 } 13062 } 13063 13064 // Override to enable LOAD_STACK_GUARD lowering on Linux. 13065 bool PPCTargetLowering::useLoadStackGuardNode() const { 13066 if (!Subtarget.isTargetLinux()) 13067 return TargetLowering::useLoadStackGuardNode(); 13068 return true; 13069 } 13070 13071 // Override to disable global variable loading on Linux. 13072 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 13073 if (!Subtarget.isTargetLinux()) 13074 return TargetLowering::insertSSPDeclarations(M); 13075 } 13076 13077 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 13078 if (!VT.isSimple() || !Subtarget.hasVSX()) 13079 return false; 13080 13081 switch(VT.getSimpleVT().SimpleTy) { 13082 default: 13083 // For FP types that are currently not supported by PPC backend, return 13084 // false. Examples: f16, f80. 13085 return false; 13086 case MVT::f32: 13087 case MVT::f64: 13088 case MVT::ppcf128: 13089 return Imm.isPosZero(); 13090 } 13091 } 13092 13093 // For vector shift operation op, fold 13094 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 13095 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 13096 SelectionDAG &DAG) { 13097 SDValue N0 = N->getOperand(0); 13098 SDValue N1 = N->getOperand(1); 13099 EVT VT = N0.getValueType(); 13100 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 13101 unsigned Opcode = N->getOpcode(); 13102 unsigned TargetOpcode; 13103 13104 switch (Opcode) { 13105 default: 13106 llvm_unreachable("Unexpected shift operation"); 13107 case ISD::SHL: 13108 TargetOpcode = PPCISD::SHL; 13109 break; 13110 case ISD::SRL: 13111 TargetOpcode = PPCISD::SRL; 13112 break; 13113 case ISD::SRA: 13114 TargetOpcode = PPCISD::SRA; 13115 break; 13116 } 13117 13118 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 13119 N1->getOpcode() == ISD::AND) 13120 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 13121 if (Mask->getZExtValue() == OpSizeInBits - 1) 13122 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 13123 13124 return SDValue(); 13125 } 13126 13127 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 13128 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13129 return Value; 13130 13131 return SDValue(); 13132 } 13133 13134 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 13135 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13136 return Value; 13137 13138 return SDValue(); 13139 } 13140 13141 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 13142 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 13143 return Value; 13144 13145 return SDValue(); 13146 } 13147