1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/PPCPredicates.h" 15 #include "PPC.h" 16 #include "PPCCallingConv.h" 17 #include "PPCCCState.h" 18 #include "PPCFrameLowering.h" 19 #include "PPCInstrInfo.h" 20 #include "PPCISelLowering.h" 21 #include "PPCMachineFunctionInfo.h" 22 #include "PPCPerfectShuffle.h" 23 #include "PPCRegisterInfo.h" 24 #include "PPCSubtarget.h" 25 #include "PPCTargetMachine.h" 26 #include "llvm/ADT/APFloat.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/DenseMap.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/STLExtras.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/StringSwitch.h" 38 #include "llvm/CodeGen/CallingConvLower.h" 39 #include "llvm/CodeGen/ISDOpcodes.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFrameInfo.h" 42 #include "llvm/CodeGen/MachineFunction.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineJumpTableInfo.h" 46 #include "llvm/CodeGen/MachineLoopInfo.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/MachineValueType.h" 51 #include "llvm/CodeGen/RuntimeLibcalls.h" 52 #include "llvm/CodeGen/SelectionDAG.h" 53 #include "llvm/CodeGen/SelectionDAGNodes.h" 54 #include "llvm/CodeGen/ValueTypes.h" 55 #include "llvm/IR/CallingConv.h" 56 #include "llvm/IR/CallSite.h" 57 #include "llvm/IR/Constant.h" 58 #include "llvm/IR/Constants.h" 59 #include "llvm/IR/DataLayout.h" 60 #include "llvm/IR/DebugLoc.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/Function.h" 63 #include "llvm/IR/GlobalValue.h" 64 #include "llvm/IR/Instructions.h" 65 #include "llvm/IR/Intrinsics.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Module.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/MC/MCExpr.h" 72 #include "llvm/MC/MCRegisterInfo.h" 73 #include "llvm/Support/AtomicOrdering.h" 74 #include "llvm/Support/BranchProbability.h" 75 #include "llvm/Support/Casting.h" 76 #include "llvm/Support/CodeGen.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/Format.h" 82 #include "llvm/Support/KnownBits.h" 83 #include "llvm/Support/MathExtras.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/Target/TargetInstrInfo.h" 86 #include "llvm/Target/TargetLowering.h" 87 #include "llvm/Target/TargetMachine.h" 88 #include "llvm/Target/TargetOptions.h" 89 #include "llvm/Target/TargetRegisterInfo.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 100 #define DEBUG_TYPE "ppc-lowering" 101 102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 104 105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 107 108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 110 111 static cl::opt<bool> DisableSCO("disable-ppc-sco", 112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 113 114 STATISTIC(NumTailCalls, "Number of tail calls"); 115 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 116 117 // FIXME: Remove this once the bug has been fixed! 118 extern cl::opt<bool> ANDIGlueBug; 119 120 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 121 const PPCSubtarget &STI) 122 : TargetLowering(TM), Subtarget(STI) { 123 // Use _setjmp/_longjmp instead of setjmp/longjmp. 124 setUseUnderscoreSetJmp(true); 125 setUseUnderscoreLongJmp(true); 126 127 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 128 // arguments are at least 4/8 bytes aligned. 129 bool isPPC64 = Subtarget.isPPC64(); 130 setMinStackArgumentAlignment(isPPC64 ? 8:4); 131 132 // Set up the register classes. 133 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 134 if (!useSoftFloat()) { 135 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 136 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 137 } 138 139 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 140 for (MVT VT : MVT::integer_valuetypes()) { 141 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 142 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 143 } 144 145 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 146 147 // PowerPC has pre-inc load and store's. 148 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 149 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 150 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 151 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 152 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 153 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 154 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 155 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 156 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 157 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 158 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 159 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 160 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 161 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 162 163 if (Subtarget.useCRBits()) { 164 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 165 166 if (isPPC64 || Subtarget.hasFPCVT()) { 167 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 168 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 169 isPPC64 ? MVT::i64 : MVT::i32); 170 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 171 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 172 isPPC64 ? MVT::i64 : MVT::i32); 173 } else { 174 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 175 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 176 } 177 178 // PowerPC does not support direct load / store of condition registers 179 setOperationAction(ISD::LOAD, MVT::i1, Custom); 180 setOperationAction(ISD::STORE, MVT::i1, Custom); 181 182 // FIXME: Remove this once the ANDI glue bug is fixed: 183 if (ANDIGlueBug) 184 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 185 186 for (MVT VT : MVT::integer_valuetypes()) { 187 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 188 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 189 setTruncStoreAction(VT, MVT::i1, Expand); 190 } 191 192 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 193 } 194 195 // This is used in the ppcf128->int sequence. Note it has different semantics 196 // from FP_ROUND: that rounds to nearest, this rounds to zero. 197 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 198 199 // We do not currently implement these libm ops for PowerPC. 200 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 201 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 202 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 203 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 204 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 205 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 206 207 // PowerPC has no SREM/UREM instructions 208 setOperationAction(ISD::SREM, MVT::i32, Expand); 209 setOperationAction(ISD::UREM, MVT::i32, Expand); 210 setOperationAction(ISD::SREM, MVT::i64, Expand); 211 setOperationAction(ISD::UREM, MVT::i64, Expand); 212 213 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 214 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 215 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 216 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 217 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 218 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 219 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 220 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 221 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 222 223 // We don't support sin/cos/sqrt/fmod/pow 224 setOperationAction(ISD::FSIN , MVT::f64, Expand); 225 setOperationAction(ISD::FCOS , MVT::f64, Expand); 226 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 227 setOperationAction(ISD::FREM , MVT::f64, Expand); 228 setOperationAction(ISD::FPOW , MVT::f64, Expand); 229 setOperationAction(ISD::FMA , MVT::f64, Legal); 230 setOperationAction(ISD::FSIN , MVT::f32, Expand); 231 setOperationAction(ISD::FCOS , MVT::f32, Expand); 232 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 233 setOperationAction(ISD::FREM , MVT::f32, Expand); 234 setOperationAction(ISD::FPOW , MVT::f32, Expand); 235 setOperationAction(ISD::FMA , MVT::f32, Legal); 236 237 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 238 239 // If we're enabling GP optimizations, use hardware square root 240 if (!Subtarget.hasFSQRT() && 241 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 242 Subtarget.hasFRE())) 243 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 244 245 if (!Subtarget.hasFSQRT() && 246 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 247 Subtarget.hasFRES())) 248 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 249 250 if (Subtarget.hasFCPSGN()) { 251 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 252 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 253 } else { 254 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 255 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 256 } 257 258 if (Subtarget.hasFPRND()) { 259 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 260 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 261 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 262 setOperationAction(ISD::FROUND, MVT::f64, Legal); 263 264 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 265 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 266 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 267 setOperationAction(ISD::FROUND, MVT::f32, Legal); 268 } 269 270 // PowerPC does not have BSWAP 271 // CTPOP or CTTZ were introduced in P8/P9 respectivelly 272 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 273 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 274 if (Subtarget.isISA3_0()) { 275 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 276 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 277 } else { 278 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 279 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 280 } 281 282 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 283 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 284 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 285 } else { 286 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 287 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 288 } 289 290 // PowerPC does not have ROTR 291 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 292 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 293 294 if (!Subtarget.useCRBits()) { 295 // PowerPC does not have Select 296 setOperationAction(ISD::SELECT, MVT::i32, Expand); 297 setOperationAction(ISD::SELECT, MVT::i64, Expand); 298 setOperationAction(ISD::SELECT, MVT::f32, Expand); 299 setOperationAction(ISD::SELECT, MVT::f64, Expand); 300 } 301 302 // PowerPC wants to turn select_cc of FP into fsel when possible. 303 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 304 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 305 306 // PowerPC wants to optimize integer setcc a bit 307 if (!Subtarget.useCRBits()) 308 setOperationAction(ISD::SETCC, MVT::i32, Custom); 309 310 // PowerPC does not have BRCOND which requires SetCC 311 if (!Subtarget.useCRBits()) 312 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 313 314 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 315 316 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 317 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 318 319 // PowerPC does not have [U|S]INT_TO_FP 320 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 321 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 322 323 if (Subtarget.hasDirectMove() && isPPC64) { 324 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 325 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 326 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 327 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 328 } else { 329 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 330 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 331 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 332 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 333 } 334 335 // We cannot sextinreg(i1). Expand to shifts. 336 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 337 338 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 339 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 340 // support continuation, user-level threading, and etc.. As a result, no 341 // other SjLj exception interfaces are implemented and please don't build 342 // your own exception handling based on them. 343 // LLVM/Clang supports zero-cost DWARF exception handling. 344 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 345 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 346 347 // We want to legalize GlobalAddress and ConstantPool nodes into the 348 // appropriate instructions to materialize the address. 349 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 350 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 351 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 352 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 353 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 354 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 355 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 356 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 357 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 358 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 359 360 // TRAP is legal. 361 setOperationAction(ISD::TRAP, MVT::Other, Legal); 362 363 // TRAMPOLINE is custom lowered. 364 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 365 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 366 367 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 368 setOperationAction(ISD::VASTART , MVT::Other, Custom); 369 370 if (Subtarget.isSVR4ABI()) { 371 if (isPPC64) { 372 // VAARG always uses double-word chunks, so promote anything smaller. 373 setOperationAction(ISD::VAARG, MVT::i1, Promote); 374 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 375 setOperationAction(ISD::VAARG, MVT::i8, Promote); 376 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 377 setOperationAction(ISD::VAARG, MVT::i16, Promote); 378 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 379 setOperationAction(ISD::VAARG, MVT::i32, Promote); 380 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 381 setOperationAction(ISD::VAARG, MVT::Other, Expand); 382 } else { 383 // VAARG is custom lowered with the 32-bit SVR4 ABI. 384 setOperationAction(ISD::VAARG, MVT::Other, Custom); 385 setOperationAction(ISD::VAARG, MVT::i64, Custom); 386 } 387 } else 388 setOperationAction(ISD::VAARG, MVT::Other, Expand); 389 390 if (Subtarget.isSVR4ABI() && !isPPC64) 391 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 392 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 393 else 394 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 395 396 // Use the default implementation. 397 setOperationAction(ISD::VAEND , MVT::Other, Expand); 398 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 399 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 400 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 401 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 402 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 403 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 404 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 405 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 406 407 // We want to custom lower some of our intrinsics. 408 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 409 410 // To handle counter-based loop conditions. 411 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 412 413 // Comparisons that require checking two conditions. 414 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 415 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 416 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 417 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 418 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 419 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 420 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 421 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 422 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 423 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 424 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 425 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 426 427 if (Subtarget.has64BitSupport()) { 428 // They also have instructions for converting between i64 and fp. 429 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 430 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 431 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 432 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 433 // This is just the low 32 bits of a (signed) fp->i64 conversion. 434 // We cannot do this with Promote because i64 is not a legal type. 435 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 436 437 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 438 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 439 } else { 440 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 441 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 442 } 443 444 // With the instructions enabled under FPCVT, we can do everything. 445 if (Subtarget.hasFPCVT()) { 446 if (Subtarget.has64BitSupport()) { 447 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 448 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 449 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 450 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 451 } 452 453 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 454 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 455 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 456 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 457 } 458 459 if (Subtarget.use64BitRegs()) { 460 // 64-bit PowerPC implementations can support i64 types directly 461 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 462 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 463 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 464 // 64-bit PowerPC wants to expand i128 shifts itself. 465 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 466 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 467 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 468 } else { 469 // 32-bit PowerPC wants to expand i64 shifts itself. 470 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 471 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 472 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 473 } 474 475 if (Subtarget.hasAltivec()) { 476 // First set operation action for all vector types to expand. Then we 477 // will selectively turn on ones that can be effectively codegen'd. 478 for (MVT VT : MVT::vector_valuetypes()) { 479 // add/sub are legal for all supported vector VT's. 480 setOperationAction(ISD::ADD, VT, Legal); 481 setOperationAction(ISD::SUB, VT, Legal); 482 483 // Vector instructions introduced in P8 484 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 485 setOperationAction(ISD::CTPOP, VT, Legal); 486 setOperationAction(ISD::CTLZ, VT, Legal); 487 } 488 else { 489 setOperationAction(ISD::CTPOP, VT, Expand); 490 setOperationAction(ISD::CTLZ, VT, Expand); 491 } 492 493 // Vector instructions introduced in P9 494 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 495 setOperationAction(ISD::CTTZ, VT, Legal); 496 else 497 setOperationAction(ISD::CTTZ, VT, Expand); 498 499 // We promote all shuffles to v16i8. 500 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 501 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 502 503 // We promote all non-typed operations to v4i32. 504 setOperationAction(ISD::AND , VT, Promote); 505 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 506 setOperationAction(ISD::OR , VT, Promote); 507 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 508 setOperationAction(ISD::XOR , VT, Promote); 509 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 510 setOperationAction(ISD::LOAD , VT, Promote); 511 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 512 setOperationAction(ISD::SELECT, VT, Promote); 513 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 514 setOperationAction(ISD::SELECT_CC, VT, Promote); 515 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 516 setOperationAction(ISD::STORE, VT, Promote); 517 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 518 519 // No other operations are legal. 520 setOperationAction(ISD::MUL , VT, Expand); 521 setOperationAction(ISD::SDIV, VT, Expand); 522 setOperationAction(ISD::SREM, VT, Expand); 523 setOperationAction(ISD::UDIV, VT, Expand); 524 setOperationAction(ISD::UREM, VT, Expand); 525 setOperationAction(ISD::FDIV, VT, Expand); 526 setOperationAction(ISD::FREM, VT, Expand); 527 setOperationAction(ISD::FNEG, VT, Expand); 528 setOperationAction(ISD::FSQRT, VT, Expand); 529 setOperationAction(ISD::FLOG, VT, Expand); 530 setOperationAction(ISD::FLOG10, VT, Expand); 531 setOperationAction(ISD::FLOG2, VT, Expand); 532 setOperationAction(ISD::FEXP, VT, Expand); 533 setOperationAction(ISD::FEXP2, VT, Expand); 534 setOperationAction(ISD::FSIN, VT, Expand); 535 setOperationAction(ISD::FCOS, VT, Expand); 536 setOperationAction(ISD::FABS, VT, Expand); 537 setOperationAction(ISD::FPOWI, VT, Expand); 538 setOperationAction(ISD::FFLOOR, VT, Expand); 539 setOperationAction(ISD::FCEIL, VT, Expand); 540 setOperationAction(ISD::FTRUNC, VT, Expand); 541 setOperationAction(ISD::FRINT, VT, Expand); 542 setOperationAction(ISD::FNEARBYINT, VT, Expand); 543 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 544 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 545 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 546 setOperationAction(ISD::MULHU, VT, Expand); 547 setOperationAction(ISD::MULHS, VT, Expand); 548 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 549 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 550 setOperationAction(ISD::UDIVREM, VT, Expand); 551 setOperationAction(ISD::SDIVREM, VT, Expand); 552 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 553 setOperationAction(ISD::FPOW, VT, Expand); 554 setOperationAction(ISD::BSWAP, VT, Expand); 555 setOperationAction(ISD::VSELECT, VT, Expand); 556 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 557 setOperationAction(ISD::ROTL, VT, Expand); 558 setOperationAction(ISD::ROTR, VT, Expand); 559 560 for (MVT InnerVT : MVT::vector_valuetypes()) { 561 setTruncStoreAction(VT, InnerVT, Expand); 562 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 563 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 564 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 565 } 566 } 567 568 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 569 // with merges, splats, etc. 570 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 571 572 setOperationAction(ISD::AND , MVT::v4i32, Legal); 573 setOperationAction(ISD::OR , MVT::v4i32, Legal); 574 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 575 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 576 setOperationAction(ISD::SELECT, MVT::v4i32, 577 Subtarget.useCRBits() ? Legal : Expand); 578 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 579 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 580 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 581 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 582 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 583 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 584 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 585 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 586 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 587 588 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 589 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 590 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 591 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 592 593 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 594 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 595 596 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 597 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 598 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 599 } 600 601 if (Subtarget.hasP8Altivec()) 602 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 603 else 604 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 605 606 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 607 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 608 609 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 610 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 611 612 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 613 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 614 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 615 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 616 617 // Altivec does not contain unordered floating-point compare instructions 618 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 619 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 620 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 621 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 622 623 if (Subtarget.hasVSX()) { 624 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 625 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 626 if (Subtarget.hasP8Vector()) { 627 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 628 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 629 } 630 if (Subtarget.hasDirectMove() && isPPC64) { 631 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 632 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 633 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 634 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 635 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 636 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 637 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 638 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 639 } 640 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 641 642 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 643 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 644 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 645 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 646 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 647 648 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 649 650 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 651 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 652 653 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 654 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 655 656 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 657 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 658 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 659 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 660 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 661 662 // Share the Altivec comparison restrictions. 663 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 664 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 665 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 666 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 667 668 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 669 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 670 671 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 672 673 if (Subtarget.hasP8Vector()) 674 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 675 676 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 677 678 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 679 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 680 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 681 682 if (Subtarget.hasP8Altivec()) { 683 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 684 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 685 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 686 687 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 688 } 689 else { 690 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 691 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 692 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 693 694 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 695 696 // VSX v2i64 only supports non-arithmetic operations. 697 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 698 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 699 } 700 701 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 702 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 703 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 704 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 705 706 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 707 708 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 709 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 710 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 711 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 712 713 // Vector operation legalization checks the result type of 714 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 715 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 716 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 717 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 718 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 719 720 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 721 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 722 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 723 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 724 725 if (Subtarget.hasDirectMove()) 726 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 727 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 728 729 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 730 } 731 732 if (Subtarget.hasP8Altivec()) { 733 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 734 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 735 } 736 737 if (Subtarget.hasP9Vector()) { 738 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 739 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 740 } 741 } 742 743 if (Subtarget.hasQPX()) { 744 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 745 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 746 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 747 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 748 749 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 750 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 751 752 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 753 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 754 755 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 756 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 757 758 if (!Subtarget.useCRBits()) 759 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 760 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 761 762 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 763 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 764 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 765 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 766 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 767 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 768 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 769 770 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 771 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 772 773 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 774 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 775 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 776 777 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 778 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 779 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 780 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 781 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 782 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 783 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 784 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 785 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 786 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 787 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 788 789 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 790 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 791 792 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 793 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 794 795 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 796 797 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 798 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 799 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 800 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 801 802 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 803 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 804 805 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 806 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 807 808 if (!Subtarget.useCRBits()) 809 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 810 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 811 812 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 813 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 814 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 815 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 816 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 817 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 818 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 819 820 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 821 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 822 823 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 824 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 825 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 826 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 827 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 828 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 829 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 830 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 831 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 832 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 833 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 834 835 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 836 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 837 838 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 839 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 840 841 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 842 843 setOperationAction(ISD::AND , MVT::v4i1, Legal); 844 setOperationAction(ISD::OR , MVT::v4i1, Legal); 845 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 846 847 if (!Subtarget.useCRBits()) 848 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 849 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 850 851 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 852 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 853 854 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 855 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 856 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 857 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 858 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 859 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 860 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 861 862 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 863 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 864 865 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 866 867 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 868 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 869 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 870 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 871 872 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 873 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 874 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 875 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 876 877 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 878 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 879 880 // These need to set FE_INEXACT, and so cannot be vectorized here. 881 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 882 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 883 884 if (TM.Options.UnsafeFPMath) { 885 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 886 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 887 888 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 889 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 890 } else { 891 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 892 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 893 894 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 895 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 896 } 897 } 898 899 if (Subtarget.has64BitSupport()) 900 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 901 902 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 903 904 if (!isPPC64) { 905 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 906 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 907 } 908 909 setBooleanContents(ZeroOrOneBooleanContent); 910 911 if (Subtarget.hasAltivec()) { 912 // Altivec instructions set fields to all zeros or all ones. 913 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 914 } 915 916 if (!isPPC64) { 917 // These libcalls are not available in 32-bit. 918 setLibcallName(RTLIB::SHL_I128, nullptr); 919 setLibcallName(RTLIB::SRL_I128, nullptr); 920 setLibcallName(RTLIB::SRA_I128, nullptr); 921 } 922 923 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 924 925 // We have target-specific dag combine patterns for the following nodes: 926 setTargetDAGCombine(ISD::SINT_TO_FP); 927 setTargetDAGCombine(ISD::BUILD_VECTOR); 928 if (Subtarget.hasFPCVT()) 929 setTargetDAGCombine(ISD::UINT_TO_FP); 930 setTargetDAGCombine(ISD::LOAD); 931 setTargetDAGCombine(ISD::STORE); 932 setTargetDAGCombine(ISD::BR_CC); 933 if (Subtarget.useCRBits()) 934 setTargetDAGCombine(ISD::BRCOND); 935 setTargetDAGCombine(ISD::BSWAP); 936 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 937 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 938 setTargetDAGCombine(ISD::INTRINSIC_VOID); 939 940 setTargetDAGCombine(ISD::SIGN_EXTEND); 941 setTargetDAGCombine(ISD::ZERO_EXTEND); 942 setTargetDAGCombine(ISD::ANY_EXTEND); 943 944 if (Subtarget.useCRBits()) { 945 setTargetDAGCombine(ISD::TRUNCATE); 946 setTargetDAGCombine(ISD::SETCC); 947 setTargetDAGCombine(ISD::SELECT_CC); 948 } 949 950 // Use reciprocal estimates. 951 if (TM.Options.UnsafeFPMath) { 952 setTargetDAGCombine(ISD::FDIV); 953 setTargetDAGCombine(ISD::FSQRT); 954 } 955 956 // Darwin long double math library functions have $LDBL128 appended. 957 if (Subtarget.isDarwin()) { 958 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 959 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 960 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 961 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 962 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 963 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 964 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 965 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 966 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 967 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 968 } 969 970 // With 32 condition bits, we don't need to sink (and duplicate) compares 971 // aggressively in CodeGenPrep. 972 if (Subtarget.useCRBits()) { 973 setHasMultipleConditionRegisters(); 974 setJumpIsExpensive(); 975 } 976 977 setMinFunctionAlignment(2); 978 if (Subtarget.isDarwin()) 979 setPrefFunctionAlignment(4); 980 981 switch (Subtarget.getDarwinDirective()) { 982 default: break; 983 case PPC::DIR_970: 984 case PPC::DIR_A2: 985 case PPC::DIR_E500mc: 986 case PPC::DIR_E5500: 987 case PPC::DIR_PWR4: 988 case PPC::DIR_PWR5: 989 case PPC::DIR_PWR5X: 990 case PPC::DIR_PWR6: 991 case PPC::DIR_PWR6X: 992 case PPC::DIR_PWR7: 993 case PPC::DIR_PWR8: 994 case PPC::DIR_PWR9: 995 setPrefFunctionAlignment(4); 996 setPrefLoopAlignment(4); 997 break; 998 } 999 1000 if (Subtarget.enableMachineScheduler()) 1001 setSchedulingPreference(Sched::Source); 1002 else 1003 setSchedulingPreference(Sched::Hybrid); 1004 1005 computeRegisterProperties(STI.getRegisterInfo()); 1006 1007 // The Freescale cores do better with aggressive inlining of memcpy and 1008 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1009 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 1010 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 1011 MaxStoresPerMemset = 32; 1012 MaxStoresPerMemsetOptSize = 16; 1013 MaxStoresPerMemcpy = 32; 1014 MaxStoresPerMemcpyOptSize = 8; 1015 MaxStoresPerMemmove = 32; 1016 MaxStoresPerMemmoveOptSize = 8; 1017 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 1018 // The A2 also benefits from (very) aggressive inlining of memcpy and 1019 // friends. The overhead of a the function call, even when warm, can be 1020 // over one hundred cycles. 1021 MaxStoresPerMemset = 128; 1022 MaxStoresPerMemcpy = 128; 1023 MaxStoresPerMemmove = 128; 1024 } 1025 } 1026 1027 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1028 /// the desired ByVal argument alignment. 1029 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1030 unsigned MaxMaxAlign) { 1031 if (MaxAlign == MaxMaxAlign) 1032 return; 1033 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1034 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1035 MaxAlign = 32; 1036 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1037 MaxAlign = 16; 1038 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1039 unsigned EltAlign = 0; 1040 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1041 if (EltAlign > MaxAlign) 1042 MaxAlign = EltAlign; 1043 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1044 for (auto *EltTy : STy->elements()) { 1045 unsigned EltAlign = 0; 1046 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1047 if (EltAlign > MaxAlign) 1048 MaxAlign = EltAlign; 1049 if (MaxAlign == MaxMaxAlign) 1050 break; 1051 } 1052 } 1053 } 1054 1055 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1056 /// function arguments in the caller parameter area. 1057 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1058 const DataLayout &DL) const { 1059 // Darwin passes everything on 4 byte boundary. 1060 if (Subtarget.isDarwin()) 1061 return 4; 1062 1063 // 16byte and wider vectors are passed on 16byte boundary. 1064 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1065 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1066 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1067 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1068 return Align; 1069 } 1070 1071 bool PPCTargetLowering::useSoftFloat() const { 1072 return Subtarget.useSoftFloat(); 1073 } 1074 1075 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1076 switch ((PPCISD::NodeType)Opcode) { 1077 case PPCISD::FIRST_NUMBER: break; 1078 case PPCISD::FSEL: return "PPCISD::FSEL"; 1079 case PPCISD::FCFID: return "PPCISD::FCFID"; 1080 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1081 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1082 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1083 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1084 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1085 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1086 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1087 case PPCISD::FRE: return "PPCISD::FRE"; 1088 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1089 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1090 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1091 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1092 case PPCISD::VPERM: return "PPCISD::VPERM"; 1093 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1094 case PPCISD::XXINSERT: return "PPCISD::XXINSERT"; 1095 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1096 case PPCISD::CMPB: return "PPCISD::CMPB"; 1097 case PPCISD::Hi: return "PPCISD::Hi"; 1098 case PPCISD::Lo: return "PPCISD::Lo"; 1099 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1100 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1101 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1102 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1103 case PPCISD::SRL: return "PPCISD::SRL"; 1104 case PPCISD::SRA: return "PPCISD::SRA"; 1105 case PPCISD::SHL: return "PPCISD::SHL"; 1106 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1107 case PPCISD::CALL: return "PPCISD::CALL"; 1108 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1109 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1110 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1111 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1112 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1113 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1114 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1115 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1116 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1117 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1118 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1119 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1120 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1121 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1122 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1123 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1124 case PPCISD::VCMP: return "PPCISD::VCMP"; 1125 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1126 case PPCISD::LBRX: return "PPCISD::LBRX"; 1127 case PPCISD::STBRX: return "PPCISD::STBRX"; 1128 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1129 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1130 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1131 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1132 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1133 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1134 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1135 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1136 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1137 case PPCISD::BDZ: return "PPCISD::BDZ"; 1138 case PPCISD::MFFS: return "PPCISD::MFFS"; 1139 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1140 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1141 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1142 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1143 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1144 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1145 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1146 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1147 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1148 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1149 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1150 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1151 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1152 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1153 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1154 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1155 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1156 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1157 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1158 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1159 case PPCISD::SC: return "PPCISD::SC"; 1160 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1161 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1162 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1163 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1164 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1165 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1166 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1167 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1168 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1169 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1170 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1171 } 1172 return nullptr; 1173 } 1174 1175 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1176 EVT VT) const { 1177 if (!VT.isVector()) 1178 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1179 1180 if (Subtarget.hasQPX()) 1181 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1182 1183 return VT.changeVectorElementTypeToInteger(); 1184 } 1185 1186 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1187 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1188 return true; 1189 } 1190 1191 //===----------------------------------------------------------------------===// 1192 // Node matching predicates, for use by the tblgen matching code. 1193 //===----------------------------------------------------------------------===// 1194 1195 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1196 static bool isFloatingPointZero(SDValue Op) { 1197 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1198 return CFP->getValueAPF().isZero(); 1199 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1200 // Maybe this has already been legalized into the constant pool? 1201 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1202 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1203 return CFP->getValueAPF().isZero(); 1204 } 1205 return false; 1206 } 1207 1208 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1209 /// true if Op is undef or if it matches the specified value. 1210 static bool isConstantOrUndef(int Op, int Val) { 1211 return Op < 0 || Op == Val; 1212 } 1213 1214 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1215 /// VPKUHUM instruction. 1216 /// The ShuffleKind distinguishes between big-endian operations with 1217 /// two different inputs (0), either-endian operations with two identical 1218 /// inputs (1), and little-endian operations with two different inputs (2). 1219 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1220 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1221 SelectionDAG &DAG) { 1222 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1223 if (ShuffleKind == 0) { 1224 if (IsLE) 1225 return false; 1226 for (unsigned i = 0; i != 16; ++i) 1227 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1228 return false; 1229 } else if (ShuffleKind == 2) { 1230 if (!IsLE) 1231 return false; 1232 for (unsigned i = 0; i != 16; ++i) 1233 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1234 return false; 1235 } else if (ShuffleKind == 1) { 1236 unsigned j = IsLE ? 0 : 1; 1237 for (unsigned i = 0; i != 8; ++i) 1238 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1239 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1240 return false; 1241 } 1242 return true; 1243 } 1244 1245 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1246 /// VPKUWUM instruction. 1247 /// The ShuffleKind distinguishes between big-endian operations with 1248 /// two different inputs (0), either-endian operations with two identical 1249 /// inputs (1), and little-endian operations with two different inputs (2). 1250 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1251 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1252 SelectionDAG &DAG) { 1253 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1254 if (ShuffleKind == 0) { 1255 if (IsLE) 1256 return false; 1257 for (unsigned i = 0; i != 16; i += 2) 1258 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1259 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1260 return false; 1261 } else if (ShuffleKind == 2) { 1262 if (!IsLE) 1263 return false; 1264 for (unsigned i = 0; i != 16; i += 2) 1265 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1266 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1267 return false; 1268 } else if (ShuffleKind == 1) { 1269 unsigned j = IsLE ? 0 : 2; 1270 for (unsigned i = 0; i != 8; i += 2) 1271 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1272 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1273 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1274 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1275 return false; 1276 } 1277 return true; 1278 } 1279 1280 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1281 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1282 /// current subtarget. 1283 /// 1284 /// The ShuffleKind distinguishes between big-endian operations with 1285 /// two different inputs (0), either-endian operations with two identical 1286 /// inputs (1), and little-endian operations with two different inputs (2). 1287 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1288 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1289 SelectionDAG &DAG) { 1290 const PPCSubtarget& Subtarget = 1291 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1292 if (!Subtarget.hasP8Vector()) 1293 return false; 1294 1295 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1296 if (ShuffleKind == 0) { 1297 if (IsLE) 1298 return false; 1299 for (unsigned i = 0; i != 16; i += 4) 1300 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1301 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1302 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1303 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1304 return false; 1305 } else if (ShuffleKind == 2) { 1306 if (!IsLE) 1307 return false; 1308 for (unsigned i = 0; i != 16; i += 4) 1309 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1310 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1311 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1312 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1313 return false; 1314 } else if (ShuffleKind == 1) { 1315 unsigned j = IsLE ? 0 : 4; 1316 for (unsigned i = 0; i != 8; i += 4) 1317 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1318 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1319 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1320 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1321 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1322 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1323 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1324 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1325 return false; 1326 } 1327 return true; 1328 } 1329 1330 /// isVMerge - Common function, used to match vmrg* shuffles. 1331 /// 1332 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1333 unsigned LHSStart, unsigned RHSStart) { 1334 if (N->getValueType(0) != MVT::v16i8) 1335 return false; 1336 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1337 "Unsupported merge size!"); 1338 1339 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1340 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1341 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1342 LHSStart+j+i*UnitSize) || 1343 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1344 RHSStart+j+i*UnitSize)) 1345 return false; 1346 } 1347 return true; 1348 } 1349 1350 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1351 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1352 /// The ShuffleKind distinguishes between big-endian merges with two 1353 /// different inputs (0), either-endian merges with two identical inputs (1), 1354 /// and little-endian merges with two different inputs (2). For the latter, 1355 /// the input operands are swapped (see PPCInstrAltivec.td). 1356 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1357 unsigned ShuffleKind, SelectionDAG &DAG) { 1358 if (DAG.getDataLayout().isLittleEndian()) { 1359 if (ShuffleKind == 1) // unary 1360 return isVMerge(N, UnitSize, 0, 0); 1361 else if (ShuffleKind == 2) // swapped 1362 return isVMerge(N, UnitSize, 0, 16); 1363 else 1364 return false; 1365 } else { 1366 if (ShuffleKind == 1) // unary 1367 return isVMerge(N, UnitSize, 8, 8); 1368 else if (ShuffleKind == 0) // normal 1369 return isVMerge(N, UnitSize, 8, 24); 1370 else 1371 return false; 1372 } 1373 } 1374 1375 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1376 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1377 /// The ShuffleKind distinguishes between big-endian merges with two 1378 /// different inputs (0), either-endian merges with two identical inputs (1), 1379 /// and little-endian merges with two different inputs (2). For the latter, 1380 /// the input operands are swapped (see PPCInstrAltivec.td). 1381 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1382 unsigned ShuffleKind, SelectionDAG &DAG) { 1383 if (DAG.getDataLayout().isLittleEndian()) { 1384 if (ShuffleKind == 1) // unary 1385 return isVMerge(N, UnitSize, 8, 8); 1386 else if (ShuffleKind == 2) // swapped 1387 return isVMerge(N, UnitSize, 8, 24); 1388 else 1389 return false; 1390 } else { 1391 if (ShuffleKind == 1) // unary 1392 return isVMerge(N, UnitSize, 0, 0); 1393 else if (ShuffleKind == 0) // normal 1394 return isVMerge(N, UnitSize, 0, 16); 1395 else 1396 return false; 1397 } 1398 } 1399 1400 /** 1401 * \brief Common function used to match vmrgew and vmrgow shuffles 1402 * 1403 * The indexOffset determines whether to look for even or odd words in 1404 * the shuffle mask. This is based on the of the endianness of the target 1405 * machine. 1406 * - Little Endian: 1407 * - Use offset of 0 to check for odd elements 1408 * - Use offset of 4 to check for even elements 1409 * - Big Endian: 1410 * - Use offset of 0 to check for even elements 1411 * - Use offset of 4 to check for odd elements 1412 * A detailed description of the vector element ordering for little endian and 1413 * big endian can be found at 1414 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1415 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1416 * compiler differences mean to you 1417 * 1418 * The mask to the shuffle vector instruction specifies the indices of the 1419 * elements from the two input vectors to place in the result. The elements are 1420 * numbered in array-access order, starting with the first vector. These vectors 1421 * are always of type v16i8, thus each vector will contain 16 elements of size 1422 * 8. More info on the shuffle vector can be found in the 1423 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1424 * Language Reference. 1425 * 1426 * The RHSStartValue indicates whether the same input vectors are used (unary) 1427 * or two different input vectors are used, based on the following: 1428 * - If the instruction uses the same vector for both inputs, the range of the 1429 * indices will be 0 to 15. In this case, the RHSStart value passed should 1430 * be 0. 1431 * - If the instruction has two different vectors then the range of the 1432 * indices will be 0 to 31. In this case, the RHSStart value passed should 1433 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1434 * to 31 specify elements in the second vector). 1435 * 1436 * \param[in] N The shuffle vector SD Node to analyze 1437 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1438 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1439 * vector to the shuffle_vector instruction 1440 * \return true iff this shuffle vector represents an even or odd word merge 1441 */ 1442 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1443 unsigned RHSStartValue) { 1444 if (N->getValueType(0) != MVT::v16i8) 1445 return false; 1446 1447 for (unsigned i = 0; i < 2; ++i) 1448 for (unsigned j = 0; j < 4; ++j) 1449 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1450 i*RHSStartValue+j+IndexOffset) || 1451 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1452 i*RHSStartValue+j+IndexOffset+8)) 1453 return false; 1454 return true; 1455 } 1456 1457 /** 1458 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1459 * vmrgow instructions. 1460 * 1461 * \param[in] N The shuffle vector SD Node to analyze 1462 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1463 * \param[in] ShuffleKind Identify the type of merge: 1464 * - 0 = big-endian merge with two different inputs; 1465 * - 1 = either-endian merge with two identical inputs; 1466 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1467 * little-endian merges). 1468 * \param[in] DAG The current SelectionDAG 1469 * \return true iff this shuffle mask 1470 */ 1471 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1472 unsigned ShuffleKind, SelectionDAG &DAG) { 1473 if (DAG.getDataLayout().isLittleEndian()) { 1474 unsigned indexOffset = CheckEven ? 4 : 0; 1475 if (ShuffleKind == 1) // Unary 1476 return isVMerge(N, indexOffset, 0); 1477 else if (ShuffleKind == 2) // swapped 1478 return isVMerge(N, indexOffset, 16); 1479 else 1480 return false; 1481 } 1482 else { 1483 unsigned indexOffset = CheckEven ? 0 : 4; 1484 if (ShuffleKind == 1) // Unary 1485 return isVMerge(N, indexOffset, 0); 1486 else if (ShuffleKind == 0) // Normal 1487 return isVMerge(N, indexOffset, 16); 1488 else 1489 return false; 1490 } 1491 return false; 1492 } 1493 1494 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1495 /// amount, otherwise return -1. 1496 /// The ShuffleKind distinguishes between big-endian operations with two 1497 /// different inputs (0), either-endian operations with two identical inputs 1498 /// (1), and little-endian operations with two different inputs (2). For the 1499 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1500 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1501 SelectionDAG &DAG) { 1502 if (N->getValueType(0) != MVT::v16i8) 1503 return -1; 1504 1505 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1506 1507 // Find the first non-undef value in the shuffle mask. 1508 unsigned i; 1509 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1510 /*search*/; 1511 1512 if (i == 16) return -1; // all undef. 1513 1514 // Otherwise, check to see if the rest of the elements are consecutively 1515 // numbered from this value. 1516 unsigned ShiftAmt = SVOp->getMaskElt(i); 1517 if (ShiftAmt < i) return -1; 1518 1519 ShiftAmt -= i; 1520 bool isLE = DAG.getDataLayout().isLittleEndian(); 1521 1522 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1523 // Check the rest of the elements to see if they are consecutive. 1524 for (++i; i != 16; ++i) 1525 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1526 return -1; 1527 } else if (ShuffleKind == 1) { 1528 // Check the rest of the elements to see if they are consecutive. 1529 for (++i; i != 16; ++i) 1530 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1531 return -1; 1532 } else 1533 return -1; 1534 1535 if (isLE) 1536 ShiftAmt = 16 - ShiftAmt; 1537 1538 return ShiftAmt; 1539 } 1540 1541 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1542 /// specifies a splat of a single element that is suitable for input to 1543 /// VSPLTB/VSPLTH/VSPLTW. 1544 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1545 assert(N->getValueType(0) == MVT::v16i8 && 1546 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1547 1548 // The consecutive indices need to specify an element, not part of two 1549 // different elements. So abandon ship early if this isn't the case. 1550 if (N->getMaskElt(0) % EltSize != 0) 1551 return false; 1552 1553 // This is a splat operation if each element of the permute is the same, and 1554 // if the value doesn't reference the second vector. 1555 unsigned ElementBase = N->getMaskElt(0); 1556 1557 // FIXME: Handle UNDEF elements too! 1558 if (ElementBase >= 16) 1559 return false; 1560 1561 // Check that the indices are consecutive, in the case of a multi-byte element 1562 // splatted with a v16i8 mask. 1563 for (unsigned i = 1; i != EltSize; ++i) 1564 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1565 return false; 1566 1567 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1568 if (N->getMaskElt(i) < 0) continue; 1569 for (unsigned j = 0; j != EltSize; ++j) 1570 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1571 return false; 1572 } 1573 return true; 1574 } 1575 1576 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1577 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1578 // Check that the mask is shuffling words 1579 for (unsigned i = 0; i < 4; ++i) { 1580 unsigned B0 = N->getMaskElt(i*4); 1581 unsigned B1 = N->getMaskElt(i*4+1); 1582 unsigned B2 = N->getMaskElt(i*4+2); 1583 unsigned B3 = N->getMaskElt(i*4+3); 1584 if (B0 % 4) 1585 return false; 1586 if (B1 != B0+1 || B2 != B1+1 || B3 != B2+1) 1587 return false; 1588 } 1589 1590 // Now we look at mask elements 0,4,8,12 1591 unsigned M0 = N->getMaskElt(0) / 4; 1592 unsigned M1 = N->getMaskElt(4) / 4; 1593 unsigned M2 = N->getMaskElt(8) / 4; 1594 unsigned M3 = N->getMaskElt(12) / 4; 1595 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1596 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1597 1598 // Below, let H and L be arbitrary elements of the shuffle mask 1599 // where H is in the range [4,7] and L is in the range [0,3]. 1600 // H, 1, 2, 3 or L, 5, 6, 7 1601 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1602 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1603 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1604 InsertAtByte = IsLE ? 12 : 0; 1605 Swap = M0 < 4; 1606 return true; 1607 } 1608 // 0, H, 2, 3 or 4, L, 6, 7 1609 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1610 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1611 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1612 InsertAtByte = IsLE ? 8 : 4; 1613 Swap = M1 < 4; 1614 return true; 1615 } 1616 // 0, 1, H, 3 or 4, 5, L, 7 1617 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1618 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1619 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1620 InsertAtByte = IsLE ? 4 : 8; 1621 Swap = M2 < 4; 1622 return true; 1623 } 1624 // 0, 1, 2, H or 4, 5, 6, L 1625 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1626 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1627 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1628 InsertAtByte = IsLE ? 0 : 12; 1629 Swap = M3 < 4; 1630 return true; 1631 } 1632 1633 // If both vector operands for the shuffle are the same vector, the mask will 1634 // contain only elements from the first one and the second one will be undef. 1635 if (N->getOperand(1).isUndef()) { 1636 ShiftElts = 0; 1637 Swap = true; 1638 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1639 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1640 InsertAtByte = IsLE ? 12 : 0; 1641 return true; 1642 } 1643 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1644 InsertAtByte = IsLE ? 8 : 4; 1645 return true; 1646 } 1647 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1648 InsertAtByte = IsLE ? 4 : 8; 1649 return true; 1650 } 1651 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1652 InsertAtByte = IsLE ? 0 : 12; 1653 return true; 1654 } 1655 } 1656 1657 return false; 1658 } 1659 1660 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1661 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1662 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1663 SelectionDAG &DAG) { 1664 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1665 assert(isSplatShuffleMask(SVOp, EltSize)); 1666 if (DAG.getDataLayout().isLittleEndian()) 1667 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1668 else 1669 return SVOp->getMaskElt(0) / EltSize; 1670 } 1671 1672 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1673 /// by using a vspltis[bhw] instruction of the specified element size, return 1674 /// the constant being splatted. The ByteSize field indicates the number of 1675 /// bytes of each element [124] -> [bhw]. 1676 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1677 SDValue OpVal(nullptr, 0); 1678 1679 // If ByteSize of the splat is bigger than the element size of the 1680 // build_vector, then we have a case where we are checking for a splat where 1681 // multiple elements of the buildvector are folded together into a single 1682 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1683 unsigned EltSize = 16/N->getNumOperands(); 1684 if (EltSize < ByteSize) { 1685 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1686 SDValue UniquedVals[4]; 1687 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1688 1689 // See if all of the elements in the buildvector agree across. 1690 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1691 if (N->getOperand(i).isUndef()) continue; 1692 // If the element isn't a constant, bail fully out. 1693 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1694 1695 if (!UniquedVals[i&(Multiple-1)].getNode()) 1696 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1697 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1698 return SDValue(); // no match. 1699 } 1700 1701 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1702 // either constant or undef values that are identical for each chunk. See 1703 // if these chunks can form into a larger vspltis*. 1704 1705 // Check to see if all of the leading entries are either 0 or -1. If 1706 // neither, then this won't fit into the immediate field. 1707 bool LeadingZero = true; 1708 bool LeadingOnes = true; 1709 for (unsigned i = 0; i != Multiple-1; ++i) { 1710 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1711 1712 LeadingZero &= isNullConstant(UniquedVals[i]); 1713 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1714 } 1715 // Finally, check the least significant entry. 1716 if (LeadingZero) { 1717 if (!UniquedVals[Multiple-1].getNode()) 1718 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1719 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1720 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1721 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1722 } 1723 if (LeadingOnes) { 1724 if (!UniquedVals[Multiple-1].getNode()) 1725 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1726 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1727 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1728 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1729 } 1730 1731 return SDValue(); 1732 } 1733 1734 // Check to see if this buildvec has a single non-undef value in its elements. 1735 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1736 if (N->getOperand(i).isUndef()) continue; 1737 if (!OpVal.getNode()) 1738 OpVal = N->getOperand(i); 1739 else if (OpVal != N->getOperand(i)) 1740 return SDValue(); 1741 } 1742 1743 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1744 1745 unsigned ValSizeInBytes = EltSize; 1746 uint64_t Value = 0; 1747 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1748 Value = CN->getZExtValue(); 1749 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1750 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1751 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1752 } 1753 1754 // If the splat value is larger than the element value, then we can never do 1755 // this splat. The only case that we could fit the replicated bits into our 1756 // immediate field for would be zero, and we prefer to use vxor for it. 1757 if (ValSizeInBytes < ByteSize) return SDValue(); 1758 1759 // If the element value is larger than the splat value, check if it consists 1760 // of a repeated bit pattern of size ByteSize. 1761 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1762 return SDValue(); 1763 1764 // Properly sign extend the value. 1765 int MaskVal = SignExtend32(Value, ByteSize * 8); 1766 1767 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1768 if (MaskVal == 0) return SDValue(); 1769 1770 // Finally, if this value fits in a 5 bit sext field, return it 1771 if (SignExtend32<5>(MaskVal) == MaskVal) 1772 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1773 return SDValue(); 1774 } 1775 1776 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1777 /// amount, otherwise return -1. 1778 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1779 EVT VT = N->getValueType(0); 1780 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1781 return -1; 1782 1783 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1784 1785 // Find the first non-undef value in the shuffle mask. 1786 unsigned i; 1787 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1788 /*search*/; 1789 1790 if (i == 4) return -1; // all undef. 1791 1792 // Otherwise, check to see if the rest of the elements are consecutively 1793 // numbered from this value. 1794 unsigned ShiftAmt = SVOp->getMaskElt(i); 1795 if (ShiftAmt < i) return -1; 1796 ShiftAmt -= i; 1797 1798 // Check the rest of the elements to see if they are consecutive. 1799 for (++i; i != 4; ++i) 1800 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1801 return -1; 1802 1803 return ShiftAmt; 1804 } 1805 1806 //===----------------------------------------------------------------------===// 1807 // Addressing Mode Selection 1808 //===----------------------------------------------------------------------===// 1809 1810 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1811 /// or 64-bit immediate, and if the value can be accurately represented as a 1812 /// sign extension from a 16-bit value. If so, this returns true and the 1813 /// immediate. 1814 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1815 if (!isa<ConstantSDNode>(N)) 1816 return false; 1817 1818 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1819 if (N->getValueType(0) == MVT::i32) 1820 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1821 else 1822 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1823 } 1824 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1825 return isIntS16Immediate(Op.getNode(), Imm); 1826 } 1827 1828 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1829 /// can be represented as an indexed [r+r] operation. Returns false if it 1830 /// can be more efficiently represented with [r+imm]. 1831 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1832 SDValue &Index, 1833 SelectionDAG &DAG) const { 1834 short imm = 0; 1835 if (N.getOpcode() == ISD::ADD) { 1836 if (isIntS16Immediate(N.getOperand(1), imm)) 1837 return false; // r+i 1838 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1839 return false; // r+i 1840 1841 Base = N.getOperand(0); 1842 Index = N.getOperand(1); 1843 return true; 1844 } else if (N.getOpcode() == ISD::OR) { 1845 if (isIntS16Immediate(N.getOperand(1), imm)) 1846 return false; // r+i can fold it if we can. 1847 1848 // If this is an or of disjoint bitfields, we can codegen this as an add 1849 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1850 // disjoint. 1851 KnownBits LHSKnown, RHSKnown; 1852 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 1853 1854 if (LHSKnown.Zero.getBoolValue()) { 1855 DAG.computeKnownBits(N.getOperand(1), RHSKnown); 1856 // If all of the bits are known zero on the LHS or RHS, the add won't 1857 // carry. 1858 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 1859 Base = N.getOperand(0); 1860 Index = N.getOperand(1); 1861 return true; 1862 } 1863 } 1864 } 1865 1866 return false; 1867 } 1868 1869 // If we happen to be doing an i64 load or store into a stack slot that has 1870 // less than a 4-byte alignment, then the frame-index elimination may need to 1871 // use an indexed load or store instruction (because the offset may not be a 1872 // multiple of 4). The extra register needed to hold the offset comes from the 1873 // register scavenger, and it is possible that the scavenger will need to use 1874 // an emergency spill slot. As a result, we need to make sure that a spill slot 1875 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1876 // stack slot. 1877 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1878 // FIXME: This does not handle the LWA case. 1879 if (VT != MVT::i64) 1880 return; 1881 1882 // NOTE: We'll exclude negative FIs here, which come from argument 1883 // lowering, because there are no known test cases triggering this problem 1884 // using packed structures (or similar). We can remove this exclusion if 1885 // we find such a test case. The reason why this is so test-case driven is 1886 // because this entire 'fixup' is only to prevent crashes (from the 1887 // register scavenger) on not-really-valid inputs. For example, if we have: 1888 // %a = alloca i1 1889 // %b = bitcast i1* %a to i64* 1890 // store i64* a, i64 b 1891 // then the store should really be marked as 'align 1', but is not. If it 1892 // were marked as 'align 1' then the indexed form would have been 1893 // instruction-selected initially, and the problem this 'fixup' is preventing 1894 // won't happen regardless. 1895 if (FrameIdx < 0) 1896 return; 1897 1898 MachineFunction &MF = DAG.getMachineFunction(); 1899 MachineFrameInfo &MFI = MF.getFrameInfo(); 1900 1901 unsigned Align = MFI.getObjectAlignment(FrameIdx); 1902 if (Align >= 4) 1903 return; 1904 1905 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1906 FuncInfo->setHasNonRISpills(); 1907 } 1908 1909 /// Returns true if the address N can be represented by a base register plus 1910 /// a signed 16-bit displacement [r+imm], and if it is not better 1911 /// represented as reg+reg. If Aligned is true, only accept displacements 1912 /// suitable for STD and friends, i.e. multiples of 4. 1913 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1914 SDValue &Base, 1915 SelectionDAG &DAG, 1916 bool Aligned) const { 1917 // FIXME dl should come from parent load or store, not from address 1918 SDLoc dl(N); 1919 // If this can be more profitably realized as r+r, fail. 1920 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1921 return false; 1922 1923 if (N.getOpcode() == ISD::ADD) { 1924 short imm = 0; 1925 if (isIntS16Immediate(N.getOperand(1), imm) && 1926 (!Aligned || (imm & 3) == 0)) { 1927 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1928 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1929 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1930 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1931 } else { 1932 Base = N.getOperand(0); 1933 } 1934 return true; // [r+i] 1935 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1936 // Match LOAD (ADD (X, Lo(G))). 1937 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1938 && "Cannot handle constant offsets yet!"); 1939 Disp = N.getOperand(1).getOperand(0); // The global address. 1940 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1941 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1942 Disp.getOpcode() == ISD::TargetConstantPool || 1943 Disp.getOpcode() == ISD::TargetJumpTable); 1944 Base = N.getOperand(0); 1945 return true; // [&g+r] 1946 } 1947 } else if (N.getOpcode() == ISD::OR) { 1948 short imm = 0; 1949 if (isIntS16Immediate(N.getOperand(1), imm) && 1950 (!Aligned || (imm & 3) == 0)) { 1951 // If this is an or of disjoint bitfields, we can codegen this as an add 1952 // (for better address arithmetic) if the LHS and RHS of the OR are 1953 // provably disjoint. 1954 KnownBits LHSKnown; 1955 DAG.computeKnownBits(N.getOperand(0), LHSKnown); 1956 1957 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1958 // If all of the bits are known zero on the LHS or RHS, the add won't 1959 // carry. 1960 if (FrameIndexSDNode *FI = 1961 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1962 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1963 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1964 } else { 1965 Base = N.getOperand(0); 1966 } 1967 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1968 return true; 1969 } 1970 } 1971 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1972 // Loading from a constant address. 1973 1974 // If this address fits entirely in a 16-bit sext immediate field, codegen 1975 // this as "d, 0" 1976 short Imm; 1977 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1978 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1979 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1980 CN->getValueType(0)); 1981 return true; 1982 } 1983 1984 // Handle 32-bit sext immediates with LIS + addr mode. 1985 if ((CN->getValueType(0) == MVT::i32 || 1986 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1987 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1988 int Addr = (int)CN->getZExtValue(); 1989 1990 // Otherwise, break this down into an LIS + disp. 1991 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1992 1993 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1994 MVT::i32); 1995 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1996 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1997 return true; 1998 } 1999 } 2000 2001 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2002 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2003 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2004 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2005 } else 2006 Base = N; 2007 return true; // [r+0] 2008 } 2009 2010 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2011 /// represented as an indexed [r+r] operation. 2012 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2013 SDValue &Index, 2014 SelectionDAG &DAG) const { 2015 // Check to see if we can easily represent this as an [r+r] address. This 2016 // will fail if it thinks that the address is more profitably represented as 2017 // reg+imm, e.g. where imm = 0. 2018 if (SelectAddressRegReg(N, Base, Index, DAG)) 2019 return true; 2020 2021 // If the operand is an addition, always emit this as [r+r], since this is 2022 // better (for code size, and execution, as the memop does the add for free) 2023 // than emitting an explicit add. 2024 if (N.getOpcode() == ISD::ADD) { 2025 Base = N.getOperand(0); 2026 Index = N.getOperand(1); 2027 return true; 2028 } 2029 2030 // Otherwise, do it the hard way, using R0 as the base register. 2031 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2032 N.getValueType()); 2033 Index = N; 2034 return true; 2035 } 2036 2037 /// getPreIndexedAddressParts - returns true by value, base pointer and 2038 /// offset pointer and addressing mode by reference if the node's address 2039 /// can be legally represented as pre-indexed load / store address. 2040 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2041 SDValue &Offset, 2042 ISD::MemIndexedMode &AM, 2043 SelectionDAG &DAG) const { 2044 if (DisablePPCPreinc) return false; 2045 2046 bool isLoad = true; 2047 SDValue Ptr; 2048 EVT VT; 2049 unsigned Alignment; 2050 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2051 Ptr = LD->getBasePtr(); 2052 VT = LD->getMemoryVT(); 2053 Alignment = LD->getAlignment(); 2054 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2055 Ptr = ST->getBasePtr(); 2056 VT = ST->getMemoryVT(); 2057 Alignment = ST->getAlignment(); 2058 isLoad = false; 2059 } else 2060 return false; 2061 2062 // PowerPC doesn't have preinc load/store instructions for vectors (except 2063 // for QPX, which does have preinc r+r forms). 2064 if (VT.isVector()) { 2065 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2066 return false; 2067 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2068 AM = ISD::PRE_INC; 2069 return true; 2070 } 2071 } 2072 2073 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2074 // Common code will reject creating a pre-inc form if the base pointer 2075 // is a frame index, or if N is a store and the base pointer is either 2076 // the same as or a predecessor of the value being stored. Check for 2077 // those situations here, and try with swapped Base/Offset instead. 2078 bool Swap = false; 2079 2080 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2081 Swap = true; 2082 else if (!isLoad) { 2083 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2084 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2085 Swap = true; 2086 } 2087 2088 if (Swap) 2089 std::swap(Base, Offset); 2090 2091 AM = ISD::PRE_INC; 2092 return true; 2093 } 2094 2095 // LDU/STU can only handle immediates that are a multiple of 4. 2096 if (VT != MVT::i64) { 2097 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 2098 return false; 2099 } else { 2100 // LDU/STU need an address with at least 4-byte alignment. 2101 if (Alignment < 4) 2102 return false; 2103 2104 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 2105 return false; 2106 } 2107 2108 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2109 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2110 // sext i32 to i64 when addr mode is r+i. 2111 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2112 LD->getExtensionType() == ISD::SEXTLOAD && 2113 isa<ConstantSDNode>(Offset)) 2114 return false; 2115 } 2116 2117 AM = ISD::PRE_INC; 2118 return true; 2119 } 2120 2121 //===----------------------------------------------------------------------===// 2122 // LowerOperation implementation 2123 //===----------------------------------------------------------------------===// 2124 2125 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2126 /// and LoOpFlags to the target MO flags. 2127 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2128 unsigned &HiOpFlags, unsigned &LoOpFlags, 2129 const GlobalValue *GV = nullptr) { 2130 HiOpFlags = PPCII::MO_HA; 2131 LoOpFlags = PPCII::MO_LO; 2132 2133 // Don't use the pic base if not in PIC relocation model. 2134 if (IsPIC) { 2135 HiOpFlags |= PPCII::MO_PIC_FLAG; 2136 LoOpFlags |= PPCII::MO_PIC_FLAG; 2137 } 2138 2139 // If this is a reference to a global value that requires a non-lazy-ptr, make 2140 // sure that instruction lowering adds it. 2141 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2142 HiOpFlags |= PPCII::MO_NLP_FLAG; 2143 LoOpFlags |= PPCII::MO_NLP_FLAG; 2144 2145 if (GV->hasHiddenVisibility()) { 2146 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2147 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2148 } 2149 } 2150 } 2151 2152 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2153 SelectionDAG &DAG) { 2154 SDLoc DL(HiPart); 2155 EVT PtrVT = HiPart.getValueType(); 2156 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2157 2158 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2159 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2160 2161 // With PIC, the first instruction is actually "GR+hi(&G)". 2162 if (isPIC) 2163 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2164 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2165 2166 // Generate non-pic code that has direct accesses to the constant pool. 2167 // The address of the global is just (hi(&g)+lo(&g)). 2168 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2169 } 2170 2171 static void setUsesTOCBasePtr(MachineFunction &MF) { 2172 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2173 FuncInfo->setUsesTOCBasePtr(); 2174 } 2175 2176 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2177 setUsesTOCBasePtr(DAG.getMachineFunction()); 2178 } 2179 2180 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2181 SDValue GA) { 2182 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2183 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2184 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2185 2186 SDValue Ops[] = { GA, Reg }; 2187 return DAG.getMemIntrinsicNode( 2188 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2189 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2190 false, 0); 2191 } 2192 2193 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2194 SelectionDAG &DAG) const { 2195 EVT PtrVT = Op.getValueType(); 2196 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2197 const Constant *C = CP->getConstVal(); 2198 2199 // 64-bit SVR4 ABI code is always position-independent. 2200 // The actual address of the GlobalValue is stored in the TOC. 2201 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2202 setUsesTOCBasePtr(DAG); 2203 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2204 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2205 } 2206 2207 unsigned MOHiFlag, MOLoFlag; 2208 bool IsPIC = isPositionIndependent(); 2209 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2210 2211 if (IsPIC && Subtarget.isSVR4ABI()) { 2212 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2213 PPCII::MO_PIC_FLAG); 2214 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2215 } 2216 2217 SDValue CPIHi = 2218 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2219 SDValue CPILo = 2220 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2221 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2222 } 2223 2224 // For 64-bit PowerPC, prefer the more compact relative encodings. 2225 // This trades 32 bits per jump table entry for one or two instructions 2226 // on the jump site. 2227 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2228 if (isJumpTableRelative()) 2229 return MachineJumpTableInfo::EK_LabelDifference32; 2230 2231 return TargetLowering::getJumpTableEncoding(); 2232 } 2233 2234 bool PPCTargetLowering::isJumpTableRelative() const { 2235 if (Subtarget.isPPC64()) 2236 return true; 2237 return TargetLowering::isJumpTableRelative(); 2238 } 2239 2240 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2241 SelectionDAG &DAG) const { 2242 if (!Subtarget.isPPC64()) 2243 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2244 2245 switch (getTargetMachine().getCodeModel()) { 2246 case CodeModel::Default: 2247 case CodeModel::Small: 2248 case CodeModel::Medium: 2249 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2250 default: 2251 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2252 getPointerTy(DAG.getDataLayout())); 2253 } 2254 } 2255 2256 const MCExpr * 2257 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2258 unsigned JTI, 2259 MCContext &Ctx) const { 2260 if (!Subtarget.isPPC64()) 2261 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2262 2263 switch (getTargetMachine().getCodeModel()) { 2264 case CodeModel::Default: 2265 case CodeModel::Small: 2266 case CodeModel::Medium: 2267 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2268 default: 2269 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2270 } 2271 } 2272 2273 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2274 EVT PtrVT = Op.getValueType(); 2275 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2276 2277 // 64-bit SVR4 ABI code is always position-independent. 2278 // The actual address of the GlobalValue is stored in the TOC. 2279 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2280 setUsesTOCBasePtr(DAG); 2281 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2282 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2283 } 2284 2285 unsigned MOHiFlag, MOLoFlag; 2286 bool IsPIC = isPositionIndependent(); 2287 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2288 2289 if (IsPIC && Subtarget.isSVR4ABI()) { 2290 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2291 PPCII::MO_PIC_FLAG); 2292 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2293 } 2294 2295 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2296 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2297 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2298 } 2299 2300 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2301 SelectionDAG &DAG) const { 2302 EVT PtrVT = Op.getValueType(); 2303 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2304 const BlockAddress *BA = BASDN->getBlockAddress(); 2305 2306 // 64-bit SVR4 ABI code is always position-independent. 2307 // The actual BlockAddress is stored in the TOC. 2308 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2309 setUsesTOCBasePtr(DAG); 2310 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2311 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2312 } 2313 2314 unsigned MOHiFlag, MOLoFlag; 2315 bool IsPIC = isPositionIndependent(); 2316 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2317 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2318 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2319 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2320 } 2321 2322 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2323 SelectionDAG &DAG) const { 2324 // FIXME: TLS addresses currently use medium model code sequences, 2325 // which is the most useful form. Eventually support for small and 2326 // large models could be added if users need it, at the cost of 2327 // additional complexity. 2328 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2329 if (DAG.getTarget().Options.EmulatedTLS) 2330 return LowerToTLSEmulatedModel(GA, DAG); 2331 2332 SDLoc dl(GA); 2333 const GlobalValue *GV = GA->getGlobal(); 2334 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2335 bool is64bit = Subtarget.isPPC64(); 2336 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2337 PICLevel::Level picLevel = M->getPICLevel(); 2338 2339 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2340 2341 if (Model == TLSModel::LocalExec) { 2342 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2343 PPCII::MO_TPREL_HA); 2344 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2345 PPCII::MO_TPREL_LO); 2346 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2347 is64bit ? MVT::i64 : MVT::i32); 2348 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2349 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2350 } 2351 2352 if (Model == TLSModel::InitialExec) { 2353 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2354 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2355 PPCII::MO_TLS); 2356 SDValue GOTPtr; 2357 if (is64bit) { 2358 setUsesTOCBasePtr(DAG); 2359 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2360 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2361 PtrVT, GOTReg, TGA); 2362 } else 2363 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2364 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2365 PtrVT, TGA, GOTPtr); 2366 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2367 } 2368 2369 if (Model == TLSModel::GeneralDynamic) { 2370 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2371 SDValue GOTPtr; 2372 if (is64bit) { 2373 setUsesTOCBasePtr(DAG); 2374 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2375 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2376 GOTReg, TGA); 2377 } else { 2378 if (picLevel == PICLevel::SmallPIC) 2379 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2380 else 2381 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2382 } 2383 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2384 GOTPtr, TGA, TGA); 2385 } 2386 2387 if (Model == TLSModel::LocalDynamic) { 2388 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2389 SDValue GOTPtr; 2390 if (is64bit) { 2391 setUsesTOCBasePtr(DAG); 2392 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2393 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2394 GOTReg, TGA); 2395 } else { 2396 if (picLevel == PICLevel::SmallPIC) 2397 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2398 else 2399 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2400 } 2401 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2402 PtrVT, GOTPtr, TGA, TGA); 2403 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2404 PtrVT, TLSAddr, TGA); 2405 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2406 } 2407 2408 llvm_unreachable("Unknown TLS model!"); 2409 } 2410 2411 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2412 SelectionDAG &DAG) const { 2413 EVT PtrVT = Op.getValueType(); 2414 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2415 SDLoc DL(GSDN); 2416 const GlobalValue *GV = GSDN->getGlobal(); 2417 2418 // 64-bit SVR4 ABI code is always position-independent. 2419 // The actual address of the GlobalValue is stored in the TOC. 2420 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2421 setUsesTOCBasePtr(DAG); 2422 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2423 return getTOCEntry(DAG, DL, true, GA); 2424 } 2425 2426 unsigned MOHiFlag, MOLoFlag; 2427 bool IsPIC = isPositionIndependent(); 2428 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2429 2430 if (IsPIC && Subtarget.isSVR4ABI()) { 2431 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2432 GSDN->getOffset(), 2433 PPCII::MO_PIC_FLAG); 2434 return getTOCEntry(DAG, DL, false, GA); 2435 } 2436 2437 SDValue GAHi = 2438 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2439 SDValue GALo = 2440 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2441 2442 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2443 2444 // If the global reference is actually to a non-lazy-pointer, we have to do an 2445 // extra load to get the address of the global. 2446 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2447 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2448 return Ptr; 2449 } 2450 2451 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2452 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2453 SDLoc dl(Op); 2454 2455 if (Op.getValueType() == MVT::v2i64) { 2456 // When the operands themselves are v2i64 values, we need to do something 2457 // special because VSX has no underlying comparison operations for these. 2458 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2459 // Equality can be handled by casting to the legal type for Altivec 2460 // comparisons, everything else needs to be expanded. 2461 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2462 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2463 DAG.getSetCC(dl, MVT::v4i32, 2464 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2465 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2466 CC)); 2467 } 2468 2469 return SDValue(); 2470 } 2471 2472 // We handle most of these in the usual way. 2473 return Op; 2474 } 2475 2476 // If we're comparing for equality to zero, expose the fact that this is 2477 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2478 // fold the new nodes. 2479 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 2480 return V; 2481 2482 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2483 // Leave comparisons against 0 and -1 alone for now, since they're usually 2484 // optimized. FIXME: revisit this when we can custom lower all setcc 2485 // optimizations. 2486 if (C->isAllOnesValue() || C->isNullValue()) 2487 return SDValue(); 2488 } 2489 2490 // If we have an integer seteq/setne, turn it into a compare against zero 2491 // by xor'ing the rhs with the lhs, which is faster than setting a 2492 // condition register, reading it back out, and masking the correct bit. The 2493 // normal approach here uses sub to do this instead of xor. Using xor exposes 2494 // the result to other bit-twiddling opportunities. 2495 EVT LHSVT = Op.getOperand(0).getValueType(); 2496 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2497 EVT VT = Op.getValueType(); 2498 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2499 Op.getOperand(1)); 2500 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2501 } 2502 return SDValue(); 2503 } 2504 2505 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2506 SDNode *Node = Op.getNode(); 2507 EVT VT = Node->getValueType(0); 2508 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2509 SDValue InChain = Node->getOperand(0); 2510 SDValue VAListPtr = Node->getOperand(1); 2511 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2512 SDLoc dl(Node); 2513 2514 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2515 2516 // gpr_index 2517 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2518 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2519 InChain = GprIndex.getValue(1); 2520 2521 if (VT == MVT::i64) { 2522 // Check if GprIndex is even 2523 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2524 DAG.getConstant(1, dl, MVT::i32)); 2525 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2526 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2527 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2528 DAG.getConstant(1, dl, MVT::i32)); 2529 // Align GprIndex to be even if it isn't 2530 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2531 GprIndex); 2532 } 2533 2534 // fpr index is 1 byte after gpr 2535 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2536 DAG.getConstant(1, dl, MVT::i32)); 2537 2538 // fpr 2539 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2540 FprPtr, MachinePointerInfo(SV), MVT::i8); 2541 InChain = FprIndex.getValue(1); 2542 2543 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2544 DAG.getConstant(8, dl, MVT::i32)); 2545 2546 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2547 DAG.getConstant(4, dl, MVT::i32)); 2548 2549 // areas 2550 SDValue OverflowArea = 2551 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2552 InChain = OverflowArea.getValue(1); 2553 2554 SDValue RegSaveArea = 2555 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2556 InChain = RegSaveArea.getValue(1); 2557 2558 // select overflow_area if index > 8 2559 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2560 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2561 2562 // adjustment constant gpr_index * 4/8 2563 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2564 VT.isInteger() ? GprIndex : FprIndex, 2565 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2566 MVT::i32)); 2567 2568 // OurReg = RegSaveArea + RegConstant 2569 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2570 RegConstant); 2571 2572 // Floating types are 32 bytes into RegSaveArea 2573 if (VT.isFloatingPoint()) 2574 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2575 DAG.getConstant(32, dl, MVT::i32)); 2576 2577 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2578 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2579 VT.isInteger() ? GprIndex : FprIndex, 2580 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2581 MVT::i32)); 2582 2583 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2584 VT.isInteger() ? VAListPtr : FprPtr, 2585 MachinePointerInfo(SV), MVT::i8); 2586 2587 // determine if we should load from reg_save_area or overflow_area 2588 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2589 2590 // increase overflow_area by 4/8 if gpr/fpr > 8 2591 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2592 DAG.getConstant(VT.isInteger() ? 4 : 8, 2593 dl, MVT::i32)); 2594 2595 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2596 OverflowAreaPlusN); 2597 2598 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 2599 MachinePointerInfo(), MVT::i32); 2600 2601 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 2602 } 2603 2604 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2605 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2606 2607 // We have to copy the entire va_list struct: 2608 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2609 return DAG.getMemcpy(Op.getOperand(0), Op, 2610 Op.getOperand(1), Op.getOperand(2), 2611 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2612 false, MachinePointerInfo(), MachinePointerInfo()); 2613 } 2614 2615 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2616 SelectionDAG &DAG) const { 2617 return Op.getOperand(0); 2618 } 2619 2620 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2621 SelectionDAG &DAG) const { 2622 SDValue Chain = Op.getOperand(0); 2623 SDValue Trmp = Op.getOperand(1); // trampoline 2624 SDValue FPtr = Op.getOperand(2); // nested function 2625 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2626 SDLoc dl(Op); 2627 2628 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2629 bool isPPC64 = (PtrVT == MVT::i64); 2630 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2631 2632 TargetLowering::ArgListTy Args; 2633 TargetLowering::ArgListEntry Entry; 2634 2635 Entry.Ty = IntPtrTy; 2636 Entry.Node = Trmp; Args.push_back(Entry); 2637 2638 // TrampSize == (isPPC64 ? 48 : 40); 2639 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2640 isPPC64 ? MVT::i64 : MVT::i32); 2641 Args.push_back(Entry); 2642 2643 Entry.Node = FPtr; Args.push_back(Entry); 2644 Entry.Node = Nest; Args.push_back(Entry); 2645 2646 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2647 TargetLowering::CallLoweringInfo CLI(DAG); 2648 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 2649 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2650 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 2651 2652 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2653 return CallResult.second; 2654 } 2655 2656 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2657 MachineFunction &MF = DAG.getMachineFunction(); 2658 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2659 EVT PtrVT = getPointerTy(MF.getDataLayout()); 2660 2661 SDLoc dl(Op); 2662 2663 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2664 // vastart just stores the address of the VarArgsFrameIndex slot into the 2665 // memory location argument. 2666 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2667 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2668 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2669 MachinePointerInfo(SV)); 2670 } 2671 2672 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2673 // We suppose the given va_list is already allocated. 2674 // 2675 // typedef struct { 2676 // char gpr; /* index into the array of 8 GPRs 2677 // * stored in the register save area 2678 // * gpr=0 corresponds to r3, 2679 // * gpr=1 to r4, etc. 2680 // */ 2681 // char fpr; /* index into the array of 8 FPRs 2682 // * stored in the register save area 2683 // * fpr=0 corresponds to f1, 2684 // * fpr=1 to f2, etc. 2685 // */ 2686 // char *overflow_arg_area; 2687 // /* location on stack that holds 2688 // * the next overflow argument 2689 // */ 2690 // char *reg_save_area; 2691 // /* where r3:r10 and f1:f8 (if saved) 2692 // * are stored 2693 // */ 2694 // } va_list[1]; 2695 2696 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2697 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2698 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2699 PtrVT); 2700 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2701 PtrVT); 2702 2703 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2704 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2705 2706 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2707 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2708 2709 uint64_t FPROffset = 1; 2710 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2711 2712 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2713 2714 // Store first byte : number of int regs 2715 SDValue firstStore = 2716 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 2717 MachinePointerInfo(SV), MVT::i8); 2718 uint64_t nextOffset = FPROffset; 2719 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2720 ConstFPROffset); 2721 2722 // Store second byte : number of float regs 2723 SDValue secondStore = 2724 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2725 MachinePointerInfo(SV, nextOffset), MVT::i8); 2726 nextOffset += StackOffset; 2727 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2728 2729 // Store second word : arguments given on stack 2730 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2731 MachinePointerInfo(SV, nextOffset)); 2732 nextOffset += FrameOffset; 2733 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2734 2735 // Store third word : arguments given in registers 2736 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2737 MachinePointerInfo(SV, nextOffset)); 2738 } 2739 2740 #include "PPCGenCallingConv.inc" 2741 2742 // Function whose sole purpose is to kill compiler warnings 2743 // stemming from unused functions included from PPCGenCallingConv.inc. 2744 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2745 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2746 } 2747 2748 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2749 CCValAssign::LocInfo &LocInfo, 2750 ISD::ArgFlagsTy &ArgFlags, 2751 CCState &State) { 2752 return true; 2753 } 2754 2755 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2756 MVT &LocVT, 2757 CCValAssign::LocInfo &LocInfo, 2758 ISD::ArgFlagsTy &ArgFlags, 2759 CCState &State) { 2760 static const MCPhysReg ArgRegs[] = { 2761 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2762 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2763 }; 2764 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2765 2766 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2767 2768 // Skip one register if the first unallocated register has an even register 2769 // number and there are still argument registers available which have not been 2770 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2771 // need to skip a register if RegNum is odd. 2772 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2773 State.AllocateReg(ArgRegs[RegNum]); 2774 } 2775 2776 // Always return false here, as this function only makes sure that the first 2777 // unallocated register has an odd register number and does not actually 2778 // allocate a register for the current argument. 2779 return false; 2780 } 2781 2782 bool 2783 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 2784 MVT &LocVT, 2785 CCValAssign::LocInfo &LocInfo, 2786 ISD::ArgFlagsTy &ArgFlags, 2787 CCState &State) { 2788 static const MCPhysReg ArgRegs[] = { 2789 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2790 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2791 }; 2792 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2793 2794 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2795 int RegsLeft = NumArgRegs - RegNum; 2796 2797 // Skip if there is not enough registers left for long double type (4 gpr regs 2798 // in soft float mode) and put long double argument on the stack. 2799 if (RegNum != NumArgRegs && RegsLeft < 4) { 2800 for (int i = 0; i < RegsLeft; i++) { 2801 State.AllocateReg(ArgRegs[RegNum + i]); 2802 } 2803 } 2804 2805 return false; 2806 } 2807 2808 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2809 MVT &LocVT, 2810 CCValAssign::LocInfo &LocInfo, 2811 ISD::ArgFlagsTy &ArgFlags, 2812 CCState &State) { 2813 static const MCPhysReg ArgRegs[] = { 2814 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2815 PPC::F8 2816 }; 2817 2818 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2819 2820 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2821 2822 // If there is only one Floating-point register left we need to put both f64 2823 // values of a split ppc_fp128 value on the stack. 2824 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2825 State.AllocateReg(ArgRegs[RegNum]); 2826 } 2827 2828 // Always return false here, as this function only makes sure that the two f64 2829 // values a ppc_fp128 value is split into are both passed in registers or both 2830 // passed on the stack and does not actually allocate a register for the 2831 // current argument. 2832 return false; 2833 } 2834 2835 /// FPR - The set of FP registers that should be allocated for arguments, 2836 /// on Darwin. 2837 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2838 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2839 PPC::F11, PPC::F12, PPC::F13}; 2840 2841 /// QFPR - The set of QPX registers that should be allocated for arguments. 2842 static const MCPhysReg QFPR[] = { 2843 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2844 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2845 2846 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2847 /// the stack. 2848 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2849 unsigned PtrByteSize) { 2850 unsigned ArgSize = ArgVT.getStoreSize(); 2851 if (Flags.isByVal()) 2852 ArgSize = Flags.getByValSize(); 2853 2854 // Round up to multiples of the pointer size, except for array members, 2855 // which are always packed. 2856 if (!Flags.isInConsecutiveRegs()) 2857 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2858 2859 return ArgSize; 2860 } 2861 2862 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2863 /// on the stack. 2864 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2865 ISD::ArgFlagsTy Flags, 2866 unsigned PtrByteSize) { 2867 unsigned Align = PtrByteSize; 2868 2869 // Altivec parameters are padded to a 16 byte boundary. 2870 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2871 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2872 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2873 ArgVT == MVT::v1i128) 2874 Align = 16; 2875 // QPX vector types stored in double-precision are padded to a 32 byte 2876 // boundary. 2877 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2878 Align = 32; 2879 2880 // ByVal parameters are aligned as requested. 2881 if (Flags.isByVal()) { 2882 unsigned BVAlign = Flags.getByValAlign(); 2883 if (BVAlign > PtrByteSize) { 2884 if (BVAlign % PtrByteSize != 0) 2885 llvm_unreachable( 2886 "ByVal alignment is not a multiple of the pointer size"); 2887 2888 Align = BVAlign; 2889 } 2890 } 2891 2892 // Array members are always packed to their original alignment. 2893 if (Flags.isInConsecutiveRegs()) { 2894 // If the array member was split into multiple registers, the first 2895 // needs to be aligned to the size of the full type. (Except for 2896 // ppcf128, which is only aligned as its f64 components.) 2897 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2898 Align = OrigVT.getStoreSize(); 2899 else 2900 Align = ArgVT.getStoreSize(); 2901 } 2902 2903 return Align; 2904 } 2905 2906 /// CalculateStackSlotUsed - Return whether this argument will use its 2907 /// stack slot (instead of being passed in registers). ArgOffset, 2908 /// AvailableFPRs, and AvailableVRs must hold the current argument 2909 /// position, and will be updated to account for this argument. 2910 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2911 ISD::ArgFlagsTy Flags, 2912 unsigned PtrByteSize, 2913 unsigned LinkageSize, 2914 unsigned ParamAreaSize, 2915 unsigned &ArgOffset, 2916 unsigned &AvailableFPRs, 2917 unsigned &AvailableVRs, bool HasQPX) { 2918 bool UseMemory = false; 2919 2920 // Respect alignment of argument on the stack. 2921 unsigned Align = 2922 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2923 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2924 // If there's no space left in the argument save area, we must 2925 // use memory (this check also catches zero-sized arguments). 2926 if (ArgOffset >= LinkageSize + ParamAreaSize) 2927 UseMemory = true; 2928 2929 // Allocate argument on the stack. 2930 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2931 if (Flags.isInConsecutiveRegsLast()) 2932 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2933 // If we overran the argument save area, we must use memory 2934 // (this check catches arguments passed partially in memory) 2935 if (ArgOffset > LinkageSize + ParamAreaSize) 2936 UseMemory = true; 2937 2938 // However, if the argument is actually passed in an FPR or a VR, 2939 // we don't use memory after all. 2940 if (!Flags.isByVal()) { 2941 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2942 // QPX registers overlap with the scalar FP registers. 2943 (HasQPX && (ArgVT == MVT::v4f32 || 2944 ArgVT == MVT::v4f64 || 2945 ArgVT == MVT::v4i1))) 2946 if (AvailableFPRs > 0) { 2947 --AvailableFPRs; 2948 return false; 2949 } 2950 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2951 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2952 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2953 ArgVT == MVT::v1i128) 2954 if (AvailableVRs > 0) { 2955 --AvailableVRs; 2956 return false; 2957 } 2958 } 2959 2960 return UseMemory; 2961 } 2962 2963 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2964 /// ensure minimum alignment required for target. 2965 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2966 unsigned NumBytes) { 2967 unsigned TargetAlign = Lowering->getStackAlignment(); 2968 unsigned AlignMask = TargetAlign - 1; 2969 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2970 return NumBytes; 2971 } 2972 2973 SDValue PPCTargetLowering::LowerFormalArguments( 2974 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2975 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2976 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2977 if (Subtarget.isSVR4ABI()) { 2978 if (Subtarget.isPPC64()) 2979 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2980 dl, DAG, InVals); 2981 else 2982 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2983 dl, DAG, InVals); 2984 } else { 2985 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2986 dl, DAG, InVals); 2987 } 2988 } 2989 2990 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 2991 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2992 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2993 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2994 2995 // 32-bit SVR4 ABI Stack Frame Layout: 2996 // +-----------------------------------+ 2997 // +--> | Back chain | 2998 // | +-----------------------------------+ 2999 // | | Floating-point register save area | 3000 // | +-----------------------------------+ 3001 // | | General register save area | 3002 // | +-----------------------------------+ 3003 // | | CR save word | 3004 // | +-----------------------------------+ 3005 // | | VRSAVE save word | 3006 // | +-----------------------------------+ 3007 // | | Alignment padding | 3008 // | +-----------------------------------+ 3009 // | | Vector register save area | 3010 // | +-----------------------------------+ 3011 // | | Local variable space | 3012 // | +-----------------------------------+ 3013 // | | Parameter list area | 3014 // | +-----------------------------------+ 3015 // | | LR save word | 3016 // | +-----------------------------------+ 3017 // SP--> +--- | Back chain | 3018 // +-----------------------------------+ 3019 // 3020 // Specifications: 3021 // System V Application Binary Interface PowerPC Processor Supplement 3022 // AltiVec Technology Programming Interface Manual 3023 3024 MachineFunction &MF = DAG.getMachineFunction(); 3025 MachineFrameInfo &MFI = MF.getFrameInfo(); 3026 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3027 3028 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3029 // Potential tail calls could cause overwriting of argument stack slots. 3030 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3031 (CallConv == CallingConv::Fast)); 3032 unsigned PtrByteSize = 4; 3033 3034 // Assign locations to all of the incoming arguments. 3035 SmallVector<CCValAssign, 16> ArgLocs; 3036 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3037 *DAG.getContext()); 3038 3039 // Reserve space for the linkage area on the stack. 3040 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3041 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3042 if (useSoftFloat()) 3043 CCInfo.PreAnalyzeFormalArguments(Ins); 3044 3045 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3046 CCInfo.clearWasPPCF128(); 3047 3048 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3049 CCValAssign &VA = ArgLocs[i]; 3050 3051 // Arguments stored in registers. 3052 if (VA.isRegLoc()) { 3053 const TargetRegisterClass *RC; 3054 EVT ValVT = VA.getValVT(); 3055 3056 switch (ValVT.getSimpleVT().SimpleTy) { 3057 default: 3058 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3059 case MVT::i1: 3060 case MVT::i32: 3061 RC = &PPC::GPRCRegClass; 3062 break; 3063 case MVT::f32: 3064 if (Subtarget.hasP8Vector()) 3065 RC = &PPC::VSSRCRegClass; 3066 else 3067 RC = &PPC::F4RCRegClass; 3068 break; 3069 case MVT::f64: 3070 if (Subtarget.hasVSX()) 3071 RC = &PPC::VSFRCRegClass; 3072 else 3073 RC = &PPC::F8RCRegClass; 3074 break; 3075 case MVT::v16i8: 3076 case MVT::v8i16: 3077 case MVT::v4i32: 3078 RC = &PPC::VRRCRegClass; 3079 break; 3080 case MVT::v4f32: 3081 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3082 break; 3083 case MVT::v2f64: 3084 case MVT::v2i64: 3085 RC = &PPC::VRRCRegClass; 3086 break; 3087 case MVT::v4f64: 3088 RC = &PPC::QFRCRegClass; 3089 break; 3090 case MVT::v4i1: 3091 RC = &PPC::QBRCRegClass; 3092 break; 3093 } 3094 3095 // Transform the arguments stored in physical registers into virtual ones. 3096 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3097 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3098 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3099 3100 if (ValVT == MVT::i1) 3101 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3102 3103 InVals.push_back(ArgValue); 3104 } else { 3105 // Argument stored in memory. 3106 assert(VA.isMemLoc()); 3107 3108 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3109 int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(), 3110 isImmutable); 3111 3112 // Create load nodes to retrieve arguments from the stack. 3113 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3114 InVals.push_back( 3115 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3116 } 3117 } 3118 3119 // Assign locations to all of the incoming aggregate by value arguments. 3120 // Aggregates passed by value are stored in the local variable space of the 3121 // caller's stack frame, right above the parameter list area. 3122 SmallVector<CCValAssign, 16> ByValArgLocs; 3123 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3124 ByValArgLocs, *DAG.getContext()); 3125 3126 // Reserve stack space for the allocations in CCInfo. 3127 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3128 3129 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3130 3131 // Area that is at least reserved in the caller of this function. 3132 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3133 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3134 3135 // Set the size that is at least reserved in caller of this function. Tail 3136 // call optimized function's reserved stack space needs to be aligned so that 3137 // taking the difference between two stack areas will result in an aligned 3138 // stack. 3139 MinReservedArea = 3140 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3141 FuncInfo->setMinReservedArea(MinReservedArea); 3142 3143 SmallVector<SDValue, 8> MemOps; 3144 3145 // If the function takes variable number of arguments, make a frame index for 3146 // the start of the first vararg value... for expansion of llvm.va_start. 3147 if (isVarArg) { 3148 static const MCPhysReg GPArgRegs[] = { 3149 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3150 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3151 }; 3152 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3153 3154 static const MCPhysReg FPArgRegs[] = { 3155 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3156 PPC::F8 3157 }; 3158 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3159 3160 if (useSoftFloat()) 3161 NumFPArgRegs = 0; 3162 3163 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3164 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3165 3166 // Make room for NumGPArgRegs and NumFPArgRegs. 3167 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3168 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3169 3170 FuncInfo->setVarArgsStackOffset( 3171 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3172 CCInfo.getNextStackOffset(), true)); 3173 3174 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3175 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3176 3177 // The fixed integer arguments of a variadic function are stored to the 3178 // VarArgsFrameIndex on the stack so that they may be loaded by 3179 // dereferencing the result of va_next. 3180 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3181 // Get an existing live-in vreg, or add a new one. 3182 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3183 if (!VReg) 3184 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3185 3186 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3187 SDValue Store = 3188 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3189 MemOps.push_back(Store); 3190 // Increment the address by four for the next argument to store 3191 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3192 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3193 } 3194 3195 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3196 // is set. 3197 // The double arguments are stored to the VarArgsFrameIndex 3198 // on the stack. 3199 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3200 // Get an existing live-in vreg, or add a new one. 3201 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3202 if (!VReg) 3203 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3204 3205 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3206 SDValue Store = 3207 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3208 MemOps.push_back(Store); 3209 // Increment the address by eight for the next argument to store 3210 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3211 PtrVT); 3212 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3213 } 3214 } 3215 3216 if (!MemOps.empty()) 3217 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3218 3219 return Chain; 3220 } 3221 3222 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3223 // value to MVT::i64 and then truncate to the correct register size. 3224 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3225 EVT ObjectVT, SelectionDAG &DAG, 3226 SDValue ArgVal, 3227 const SDLoc &dl) const { 3228 if (Flags.isSExt()) 3229 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3230 DAG.getValueType(ObjectVT)); 3231 else if (Flags.isZExt()) 3232 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3233 DAG.getValueType(ObjectVT)); 3234 3235 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3236 } 3237 3238 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3239 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3240 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3241 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3242 // TODO: add description of PPC stack frame format, or at least some docs. 3243 // 3244 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3245 bool isLittleEndian = Subtarget.isLittleEndian(); 3246 MachineFunction &MF = DAG.getMachineFunction(); 3247 MachineFrameInfo &MFI = MF.getFrameInfo(); 3248 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3249 3250 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3251 "fastcc not supported on varargs functions"); 3252 3253 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3254 // Potential tail calls could cause overwriting of argument stack slots. 3255 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3256 (CallConv == CallingConv::Fast)); 3257 unsigned PtrByteSize = 8; 3258 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3259 3260 static const MCPhysReg GPR[] = { 3261 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3262 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3263 }; 3264 static const MCPhysReg VR[] = { 3265 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3266 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3267 }; 3268 3269 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3270 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3271 const unsigned Num_VR_Regs = array_lengthof(VR); 3272 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3273 3274 // Do a first pass over the arguments to determine whether the ABI 3275 // guarantees that our caller has allocated the parameter save area 3276 // on its stack frame. In the ELFv1 ABI, this is always the case; 3277 // in the ELFv2 ABI, it is true if this is a vararg function or if 3278 // any parameter is located in a stack slot. 3279 3280 bool HasParameterArea = !isELFv2ABI || isVarArg; 3281 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3282 unsigned NumBytes = LinkageSize; 3283 unsigned AvailableFPRs = Num_FPR_Regs; 3284 unsigned AvailableVRs = Num_VR_Regs; 3285 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3286 if (Ins[i].Flags.isNest()) 3287 continue; 3288 3289 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3290 PtrByteSize, LinkageSize, ParamAreaSize, 3291 NumBytes, AvailableFPRs, AvailableVRs, 3292 Subtarget.hasQPX())) 3293 HasParameterArea = true; 3294 } 3295 3296 // Add DAG nodes to load the arguments or copy them out of registers. On 3297 // entry to a function on PPC, the arguments start after the linkage area, 3298 // although the first ones are often in registers. 3299 3300 unsigned ArgOffset = LinkageSize; 3301 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3302 unsigned &QFPR_idx = FPR_idx; 3303 SmallVector<SDValue, 8> MemOps; 3304 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3305 unsigned CurArgIdx = 0; 3306 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3307 SDValue ArgVal; 3308 bool needsLoad = false; 3309 EVT ObjectVT = Ins[ArgNo].VT; 3310 EVT OrigVT = Ins[ArgNo].ArgVT; 3311 unsigned ObjSize = ObjectVT.getStoreSize(); 3312 unsigned ArgSize = ObjSize; 3313 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3314 if (Ins[ArgNo].isOrigArg()) { 3315 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3316 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3317 } 3318 // We re-align the argument offset for each argument, except when using the 3319 // fast calling convention, when we need to make sure we do that only when 3320 // we'll actually use a stack slot. 3321 unsigned CurArgOffset, Align; 3322 auto ComputeArgOffset = [&]() { 3323 /* Respect alignment of argument on the stack. */ 3324 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3325 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3326 CurArgOffset = ArgOffset; 3327 }; 3328 3329 if (CallConv != CallingConv::Fast) { 3330 ComputeArgOffset(); 3331 3332 /* Compute GPR index associated with argument offset. */ 3333 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3334 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3335 } 3336 3337 // FIXME the codegen can be much improved in some cases. 3338 // We do not have to keep everything in memory. 3339 if (Flags.isByVal()) { 3340 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3341 3342 if (CallConv == CallingConv::Fast) 3343 ComputeArgOffset(); 3344 3345 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3346 ObjSize = Flags.getByValSize(); 3347 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3348 // Empty aggregate parameters do not take up registers. Examples: 3349 // struct { } a; 3350 // union { } b; 3351 // int c[0]; 3352 // etc. However, we have to provide a place-holder in InVals, so 3353 // pretend we have an 8-byte item at the current address for that 3354 // purpose. 3355 if (!ObjSize) { 3356 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3357 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3358 InVals.push_back(FIN); 3359 continue; 3360 } 3361 3362 // Create a stack object covering all stack doublewords occupied 3363 // by the argument. If the argument is (fully or partially) on 3364 // the stack, or if the argument is fully in registers but the 3365 // caller has allocated the parameter save anyway, we can refer 3366 // directly to the caller's stack frame. Otherwise, create a 3367 // local copy in our own frame. 3368 int FI; 3369 if (HasParameterArea || 3370 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3371 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3372 else 3373 FI = MFI.CreateStackObject(ArgSize, Align, false); 3374 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3375 3376 // Handle aggregates smaller than 8 bytes. 3377 if (ObjSize < PtrByteSize) { 3378 // The value of the object is its address, which differs from the 3379 // address of the enclosing doubleword on big-endian systems. 3380 SDValue Arg = FIN; 3381 if (!isLittleEndian) { 3382 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3383 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3384 } 3385 InVals.push_back(Arg); 3386 3387 if (GPR_idx != Num_GPR_Regs) { 3388 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3389 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3390 SDValue Store; 3391 3392 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3393 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3394 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3395 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3396 MachinePointerInfo(&*FuncArg), ObjType); 3397 } else { 3398 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3399 // store the whole register as-is to the parameter save area 3400 // slot. 3401 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3402 MachinePointerInfo(&*FuncArg)); 3403 } 3404 3405 MemOps.push_back(Store); 3406 } 3407 // Whether we copied from a register or not, advance the offset 3408 // into the parameter save area by a full doubleword. 3409 ArgOffset += PtrByteSize; 3410 continue; 3411 } 3412 3413 // The value of the object is its address, which is the address of 3414 // its first stack doubleword. 3415 InVals.push_back(FIN); 3416 3417 // Store whatever pieces of the object are in registers to memory. 3418 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3419 if (GPR_idx == Num_GPR_Regs) 3420 break; 3421 3422 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3423 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3424 SDValue Addr = FIN; 3425 if (j) { 3426 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3427 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3428 } 3429 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3430 MachinePointerInfo(&*FuncArg, j)); 3431 MemOps.push_back(Store); 3432 ++GPR_idx; 3433 } 3434 ArgOffset += ArgSize; 3435 continue; 3436 } 3437 3438 switch (ObjectVT.getSimpleVT().SimpleTy) { 3439 default: llvm_unreachable("Unhandled argument type!"); 3440 case MVT::i1: 3441 case MVT::i32: 3442 case MVT::i64: 3443 if (Flags.isNest()) { 3444 // The 'nest' parameter, if any, is passed in R11. 3445 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3446 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3447 3448 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3449 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3450 3451 break; 3452 } 3453 3454 // These can be scalar arguments or elements of an integer array type 3455 // passed directly. Clang may use those instead of "byval" aggregate 3456 // types to avoid forcing arguments to memory unnecessarily. 3457 if (GPR_idx != Num_GPR_Regs) { 3458 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3459 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3460 3461 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3462 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3463 // value to MVT::i64 and then truncate to the correct register size. 3464 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3465 } else { 3466 if (CallConv == CallingConv::Fast) 3467 ComputeArgOffset(); 3468 3469 needsLoad = true; 3470 ArgSize = PtrByteSize; 3471 } 3472 if (CallConv != CallingConv::Fast || needsLoad) 3473 ArgOffset += 8; 3474 break; 3475 3476 case MVT::f32: 3477 case MVT::f64: 3478 // These can be scalar arguments or elements of a float array type 3479 // passed directly. The latter are used to implement ELFv2 homogenous 3480 // float aggregates. 3481 if (FPR_idx != Num_FPR_Regs) { 3482 unsigned VReg; 3483 3484 if (ObjectVT == MVT::f32) 3485 VReg = MF.addLiveIn(FPR[FPR_idx], 3486 Subtarget.hasP8Vector() 3487 ? &PPC::VSSRCRegClass 3488 : &PPC::F4RCRegClass); 3489 else 3490 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3491 ? &PPC::VSFRCRegClass 3492 : &PPC::F8RCRegClass); 3493 3494 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3495 ++FPR_idx; 3496 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3497 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3498 // once we support fp <-> gpr moves. 3499 3500 // This can only ever happen in the presence of f32 array types, 3501 // since otherwise we never run out of FPRs before running out 3502 // of GPRs. 3503 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3504 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3505 3506 if (ObjectVT == MVT::f32) { 3507 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3508 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3509 DAG.getConstant(32, dl, MVT::i32)); 3510 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3511 } 3512 3513 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3514 } else { 3515 if (CallConv == CallingConv::Fast) 3516 ComputeArgOffset(); 3517 3518 needsLoad = true; 3519 } 3520 3521 // When passing an array of floats, the array occupies consecutive 3522 // space in the argument area; only round up to the next doubleword 3523 // at the end of the array. Otherwise, each float takes 8 bytes. 3524 if (CallConv != CallingConv::Fast || needsLoad) { 3525 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3526 ArgOffset += ArgSize; 3527 if (Flags.isInConsecutiveRegsLast()) 3528 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3529 } 3530 break; 3531 case MVT::v4f32: 3532 case MVT::v4i32: 3533 case MVT::v8i16: 3534 case MVT::v16i8: 3535 case MVT::v2f64: 3536 case MVT::v2i64: 3537 case MVT::v1i128: 3538 if (!Subtarget.hasQPX()) { 3539 // These can be scalar arguments or elements of a vector array type 3540 // passed directly. The latter are used to implement ELFv2 homogenous 3541 // vector aggregates. 3542 if (VR_idx != Num_VR_Regs) { 3543 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3544 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3545 ++VR_idx; 3546 } else { 3547 if (CallConv == CallingConv::Fast) 3548 ComputeArgOffset(); 3549 3550 needsLoad = true; 3551 } 3552 if (CallConv != CallingConv::Fast || needsLoad) 3553 ArgOffset += 16; 3554 break; 3555 } // not QPX 3556 3557 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3558 "Invalid QPX parameter type"); 3559 /* fall through */ 3560 3561 case MVT::v4f64: 3562 case MVT::v4i1: 3563 // QPX vectors are treated like their scalar floating-point subregisters 3564 // (except that they're larger). 3565 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3566 if (QFPR_idx != Num_QFPR_Regs) { 3567 const TargetRegisterClass *RC; 3568 switch (ObjectVT.getSimpleVT().SimpleTy) { 3569 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3570 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3571 default: RC = &PPC::QBRCRegClass; break; 3572 } 3573 3574 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3575 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3576 ++QFPR_idx; 3577 } else { 3578 if (CallConv == CallingConv::Fast) 3579 ComputeArgOffset(); 3580 needsLoad = true; 3581 } 3582 if (CallConv != CallingConv::Fast || needsLoad) 3583 ArgOffset += Sz; 3584 break; 3585 } 3586 3587 // We need to load the argument to a virtual register if we determined 3588 // above that we ran out of physical registers of the appropriate type. 3589 if (needsLoad) { 3590 if (ObjSize < ArgSize && !isLittleEndian) 3591 CurArgOffset += ArgSize - ObjSize; 3592 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3593 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3594 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3595 } 3596 3597 InVals.push_back(ArgVal); 3598 } 3599 3600 // Area that is at least reserved in the caller of this function. 3601 unsigned MinReservedArea; 3602 if (HasParameterArea) 3603 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3604 else 3605 MinReservedArea = LinkageSize; 3606 3607 // Set the size that is at least reserved in caller of this function. Tail 3608 // call optimized functions' reserved stack space needs to be aligned so that 3609 // taking the difference between two stack areas will result in an aligned 3610 // stack. 3611 MinReservedArea = 3612 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3613 FuncInfo->setMinReservedArea(MinReservedArea); 3614 3615 // If the function takes variable number of arguments, make a frame index for 3616 // the start of the first vararg value... for expansion of llvm.va_start. 3617 if (isVarArg) { 3618 int Depth = ArgOffset; 3619 3620 FuncInfo->setVarArgsFrameIndex( 3621 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 3622 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3623 3624 // If this function is vararg, store any remaining integer argument regs 3625 // to their spots on the stack so that they may be loaded by dereferencing 3626 // the result of va_next. 3627 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3628 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3629 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3630 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3631 SDValue Store = 3632 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3633 MemOps.push_back(Store); 3634 // Increment the address by four for the next argument to store 3635 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3636 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3637 } 3638 } 3639 3640 if (!MemOps.empty()) 3641 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3642 3643 return Chain; 3644 } 3645 3646 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 3647 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3648 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3649 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3650 // TODO: add description of PPC stack frame format, or at least some docs. 3651 // 3652 MachineFunction &MF = DAG.getMachineFunction(); 3653 MachineFrameInfo &MFI = MF.getFrameInfo(); 3654 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3655 3656 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3657 bool isPPC64 = PtrVT == MVT::i64; 3658 // Potential tail calls could cause overwriting of argument stack slots. 3659 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3660 (CallConv == CallingConv::Fast)); 3661 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3662 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3663 unsigned ArgOffset = LinkageSize; 3664 // Area that is at least reserved in caller of this function. 3665 unsigned MinReservedArea = ArgOffset; 3666 3667 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3668 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3669 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3670 }; 3671 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3672 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3673 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3674 }; 3675 static const MCPhysReg VR[] = { 3676 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3677 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3678 }; 3679 3680 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3681 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3682 const unsigned Num_VR_Regs = array_lengthof( VR); 3683 3684 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3685 3686 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3687 3688 // In 32-bit non-varargs functions, the stack space for vectors is after the 3689 // stack space for non-vectors. We do not use this space unless we have 3690 // too many vectors to fit in registers, something that only occurs in 3691 // constructed examples:), but we have to walk the arglist to figure 3692 // that out...for the pathological case, compute VecArgOffset as the 3693 // start of the vector parameter area. Computing VecArgOffset is the 3694 // entire point of the following loop. 3695 unsigned VecArgOffset = ArgOffset; 3696 if (!isVarArg && !isPPC64) { 3697 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3698 ++ArgNo) { 3699 EVT ObjectVT = Ins[ArgNo].VT; 3700 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3701 3702 if (Flags.isByVal()) { 3703 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3704 unsigned ObjSize = Flags.getByValSize(); 3705 unsigned ArgSize = 3706 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3707 VecArgOffset += ArgSize; 3708 continue; 3709 } 3710 3711 switch(ObjectVT.getSimpleVT().SimpleTy) { 3712 default: llvm_unreachable("Unhandled argument type!"); 3713 case MVT::i1: 3714 case MVT::i32: 3715 case MVT::f32: 3716 VecArgOffset += 4; 3717 break; 3718 case MVT::i64: // PPC64 3719 case MVT::f64: 3720 // FIXME: We are guaranteed to be !isPPC64 at this point. 3721 // Does MVT::i64 apply? 3722 VecArgOffset += 8; 3723 break; 3724 case MVT::v4f32: 3725 case MVT::v4i32: 3726 case MVT::v8i16: 3727 case MVT::v16i8: 3728 // Nothing to do, we're only looking at Nonvector args here. 3729 break; 3730 } 3731 } 3732 } 3733 // We've found where the vector parameter area in memory is. Skip the 3734 // first 12 parameters; these don't use that memory. 3735 VecArgOffset = ((VecArgOffset+15)/16)*16; 3736 VecArgOffset += 12*16; 3737 3738 // Add DAG nodes to load the arguments or copy them out of registers. On 3739 // entry to a function on PPC, the arguments start after the linkage area, 3740 // although the first ones are often in registers. 3741 3742 SmallVector<SDValue, 8> MemOps; 3743 unsigned nAltivecParamsAtEnd = 0; 3744 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3745 unsigned CurArgIdx = 0; 3746 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3747 SDValue ArgVal; 3748 bool needsLoad = false; 3749 EVT ObjectVT = Ins[ArgNo].VT; 3750 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3751 unsigned ArgSize = ObjSize; 3752 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3753 if (Ins[ArgNo].isOrigArg()) { 3754 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3755 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3756 } 3757 unsigned CurArgOffset = ArgOffset; 3758 3759 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3760 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3761 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3762 if (isVarArg || isPPC64) { 3763 MinReservedArea = ((MinReservedArea+15)/16)*16; 3764 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3765 Flags, 3766 PtrByteSize); 3767 } else nAltivecParamsAtEnd++; 3768 } else 3769 // Calculate min reserved area. 3770 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3771 Flags, 3772 PtrByteSize); 3773 3774 // FIXME the codegen can be much improved in some cases. 3775 // We do not have to keep everything in memory. 3776 if (Flags.isByVal()) { 3777 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3778 3779 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3780 ObjSize = Flags.getByValSize(); 3781 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3782 // Objects of size 1 and 2 are right justified, everything else is 3783 // left justified. This means the memory address is adjusted forwards. 3784 if (ObjSize==1 || ObjSize==2) { 3785 CurArgOffset = CurArgOffset + (4 - ObjSize); 3786 } 3787 // The value of the object is its address. 3788 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 3789 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3790 InVals.push_back(FIN); 3791 if (ObjSize==1 || ObjSize==2) { 3792 if (GPR_idx != Num_GPR_Regs) { 3793 unsigned VReg; 3794 if (isPPC64) 3795 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3796 else 3797 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3798 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3799 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3800 SDValue Store = 3801 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3802 MachinePointerInfo(&*FuncArg), ObjType); 3803 MemOps.push_back(Store); 3804 ++GPR_idx; 3805 } 3806 3807 ArgOffset += PtrByteSize; 3808 3809 continue; 3810 } 3811 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3812 // Store whatever pieces of the object are in registers 3813 // to memory. ArgOffset will be the address of the beginning 3814 // of the object. 3815 if (GPR_idx != Num_GPR_Regs) { 3816 unsigned VReg; 3817 if (isPPC64) 3818 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3819 else 3820 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3821 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3822 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3823 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3824 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3825 MachinePointerInfo(&*FuncArg, j)); 3826 MemOps.push_back(Store); 3827 ++GPR_idx; 3828 ArgOffset += PtrByteSize; 3829 } else { 3830 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3831 break; 3832 } 3833 } 3834 continue; 3835 } 3836 3837 switch (ObjectVT.getSimpleVT().SimpleTy) { 3838 default: llvm_unreachable("Unhandled argument type!"); 3839 case MVT::i1: 3840 case MVT::i32: 3841 if (!isPPC64) { 3842 if (GPR_idx != Num_GPR_Regs) { 3843 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3844 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3845 3846 if (ObjectVT == MVT::i1) 3847 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3848 3849 ++GPR_idx; 3850 } else { 3851 needsLoad = true; 3852 ArgSize = PtrByteSize; 3853 } 3854 // All int arguments reserve stack space in the Darwin ABI. 3855 ArgOffset += PtrByteSize; 3856 break; 3857 } 3858 LLVM_FALLTHROUGH; 3859 case MVT::i64: // PPC64 3860 if (GPR_idx != Num_GPR_Regs) { 3861 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3862 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3863 3864 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3865 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3866 // value to MVT::i64 and then truncate to the correct register size. 3867 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3868 3869 ++GPR_idx; 3870 } else { 3871 needsLoad = true; 3872 ArgSize = PtrByteSize; 3873 } 3874 // All int arguments reserve stack space in the Darwin ABI. 3875 ArgOffset += 8; 3876 break; 3877 3878 case MVT::f32: 3879 case MVT::f64: 3880 // Every 4 bytes of argument space consumes one of the GPRs available for 3881 // argument passing. 3882 if (GPR_idx != Num_GPR_Regs) { 3883 ++GPR_idx; 3884 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3885 ++GPR_idx; 3886 } 3887 if (FPR_idx != Num_FPR_Regs) { 3888 unsigned VReg; 3889 3890 if (ObjectVT == MVT::f32) 3891 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3892 else 3893 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3894 3895 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3896 ++FPR_idx; 3897 } else { 3898 needsLoad = true; 3899 } 3900 3901 // All FP arguments reserve stack space in the Darwin ABI. 3902 ArgOffset += isPPC64 ? 8 : ObjSize; 3903 break; 3904 case MVT::v4f32: 3905 case MVT::v4i32: 3906 case MVT::v8i16: 3907 case MVT::v16i8: 3908 // Note that vector arguments in registers don't reserve stack space, 3909 // except in varargs functions. 3910 if (VR_idx != Num_VR_Regs) { 3911 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3912 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3913 if (isVarArg) { 3914 while ((ArgOffset % 16) != 0) { 3915 ArgOffset += PtrByteSize; 3916 if (GPR_idx != Num_GPR_Regs) 3917 GPR_idx++; 3918 } 3919 ArgOffset += 16; 3920 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3921 } 3922 ++VR_idx; 3923 } else { 3924 if (!isVarArg && !isPPC64) { 3925 // Vectors go after all the nonvectors. 3926 CurArgOffset = VecArgOffset; 3927 VecArgOffset += 16; 3928 } else { 3929 // Vectors are aligned. 3930 ArgOffset = ((ArgOffset+15)/16)*16; 3931 CurArgOffset = ArgOffset; 3932 ArgOffset += 16; 3933 } 3934 needsLoad = true; 3935 } 3936 break; 3937 } 3938 3939 // We need to load the argument to a virtual register if we determined above 3940 // that we ran out of physical registers of the appropriate type. 3941 if (needsLoad) { 3942 int FI = MFI.CreateFixedObject(ObjSize, 3943 CurArgOffset + (ArgSize - ObjSize), 3944 isImmutable); 3945 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3946 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3947 } 3948 3949 InVals.push_back(ArgVal); 3950 } 3951 3952 // Allow for Altivec parameters at the end, if needed. 3953 if (nAltivecParamsAtEnd) { 3954 MinReservedArea = ((MinReservedArea+15)/16)*16; 3955 MinReservedArea += 16*nAltivecParamsAtEnd; 3956 } 3957 3958 // Area that is at least reserved in the caller of this function. 3959 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3960 3961 // Set the size that is at least reserved in caller of this function. Tail 3962 // call optimized functions' reserved stack space needs to be aligned so that 3963 // taking the difference between two stack areas will result in an aligned 3964 // stack. 3965 MinReservedArea = 3966 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3967 FuncInfo->setMinReservedArea(MinReservedArea); 3968 3969 // If the function takes variable number of arguments, make a frame index for 3970 // the start of the first vararg value... for expansion of llvm.va_start. 3971 if (isVarArg) { 3972 int Depth = ArgOffset; 3973 3974 FuncInfo->setVarArgsFrameIndex( 3975 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3976 Depth, true)); 3977 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3978 3979 // If this function is vararg, store any remaining integer argument regs 3980 // to their spots on the stack so that they may be loaded by dereferencing 3981 // the result of va_next. 3982 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3983 unsigned VReg; 3984 3985 if (isPPC64) 3986 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3987 else 3988 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3989 3990 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3991 SDValue Store = 3992 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3993 MemOps.push_back(Store); 3994 // Increment the address by four for the next argument to store 3995 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3996 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3997 } 3998 } 3999 4000 if (!MemOps.empty()) 4001 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4002 4003 return Chain; 4004 } 4005 4006 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4007 /// adjusted to accommodate the arguments for the tailcall. 4008 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4009 unsigned ParamSize) { 4010 4011 if (!isTailCall) return 0; 4012 4013 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4014 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4015 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4016 // Remember only if the new adjustement is bigger. 4017 if (SPDiff < FI->getTailCallSPDelta()) 4018 FI->setTailCallSPDelta(SPDiff); 4019 4020 return SPDiff; 4021 } 4022 4023 static bool isFunctionGlobalAddress(SDValue Callee); 4024 4025 static bool 4026 resideInSameSection(const Function *Caller, SDValue Callee, 4027 const TargetMachine &TM) { 4028 // If !G, Callee can be an external symbol. 4029 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4030 if (!G) 4031 return false; 4032 4033 const GlobalValue *GV = G->getGlobal(); 4034 if (!GV->isStrongDefinitionForLinker()) 4035 return false; 4036 4037 // Any explicitly-specified sections and section prefixes must also match. 4038 // Also, if we're using -ffunction-sections, then each function is always in 4039 // a different section (the same is true for COMDAT functions). 4040 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4041 GV->getSection() != Caller->getSection()) 4042 return false; 4043 if (const auto *F = dyn_cast<Function>(GV)) { 4044 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4045 return false; 4046 } 4047 4048 // If the callee might be interposed, then we can't assume the ultimate call 4049 // target will be in the same section. Even in cases where we can assume that 4050 // interposition won't happen, in any case where the linker might insert a 4051 // stub to allow for interposition, we must generate code as though 4052 // interposition might occur. To understand why this matters, consider a 4053 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4054 // in the same section, but a is in a different module (i.e. has a different 4055 // TOC base pointer). If the linker allows for interposition between b and c, 4056 // then it will generate a stub for the call edge between b and c which will 4057 // save the TOC pointer into the designated stack slot allocated by b. If we 4058 // return true here, and therefore allow a tail call between b and c, that 4059 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4060 // pointer into the stack slot allocated by a (where the a -> b stub saved 4061 // a's TOC base pointer). If we're not considering a tail call, but rather, 4062 // whether a nop is needed after the call instruction in b, because the linker 4063 // will insert a stub, it might complain about a missing nop if we omit it 4064 // (although many don't complain in this case). 4065 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4066 return false; 4067 4068 return true; 4069 } 4070 4071 static bool 4072 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4073 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4074 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 4075 4076 const unsigned PtrByteSize = 8; 4077 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4078 4079 static const MCPhysReg GPR[] = { 4080 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4081 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4082 }; 4083 static const MCPhysReg VR[] = { 4084 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4085 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4086 }; 4087 4088 const unsigned NumGPRs = array_lengthof(GPR); 4089 const unsigned NumFPRs = 13; 4090 const unsigned NumVRs = array_lengthof(VR); 4091 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4092 4093 unsigned NumBytes = LinkageSize; 4094 unsigned AvailableFPRs = NumFPRs; 4095 unsigned AvailableVRs = NumVRs; 4096 4097 for (const ISD::OutputArg& Param : Outs) { 4098 if (Param.Flags.isNest()) continue; 4099 4100 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4101 PtrByteSize, LinkageSize, ParamAreaSize, 4102 NumBytes, AvailableFPRs, AvailableVRs, 4103 Subtarget.hasQPX())) 4104 return true; 4105 } 4106 return false; 4107 } 4108 4109 static bool 4110 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS) { 4111 if (CS->arg_size() != CallerFn->arg_size()) 4112 return false; 4113 4114 ImmutableCallSite::arg_iterator CalleeArgIter = CS->arg_begin(); 4115 ImmutableCallSite::arg_iterator CalleeArgEnd = CS->arg_end(); 4116 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4117 4118 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4119 const Value* CalleeArg = *CalleeArgIter; 4120 const Value* CallerArg = &(*CallerArgIter); 4121 if (CalleeArg == CallerArg) 4122 continue; 4123 4124 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4125 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4126 // } 4127 // 1st argument of callee is undef and has the same type as caller. 4128 if (CalleeArg->getType() == CallerArg->getType() && 4129 isa<UndefValue>(CalleeArg)) 4130 continue; 4131 4132 return false; 4133 } 4134 4135 return true; 4136 } 4137 4138 bool 4139 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4140 SDValue Callee, 4141 CallingConv::ID CalleeCC, 4142 ImmutableCallSite *CS, 4143 bool isVarArg, 4144 const SmallVectorImpl<ISD::OutputArg> &Outs, 4145 const SmallVectorImpl<ISD::InputArg> &Ins, 4146 SelectionDAG& DAG) const { 4147 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4148 4149 if (DisableSCO && !TailCallOpt) return false; 4150 4151 // Variadic argument functions are not supported. 4152 if (isVarArg) return false; 4153 4154 MachineFunction &MF = DAG.getMachineFunction(); 4155 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4156 4157 // Tail or Sibling call optimization (TCO/SCO) needs callee and caller has 4158 // the same calling convention 4159 if (CallerCC != CalleeCC) return false; 4160 4161 // SCO support C calling convention 4162 if (CalleeCC != CallingConv::Fast && CalleeCC != CallingConv::C) 4163 return false; 4164 4165 // Caller contains any byval parameter is not supported. 4166 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4167 return false; 4168 4169 // Callee contains any byval parameter is not supported, too. 4170 // Note: This is a quick work around, because in some cases, e.g. 4171 // caller's stack size > callee's stack size, we are still able to apply 4172 // sibling call optimization. See: https://reviews.llvm.org/D23441#513574 4173 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4174 return false; 4175 4176 // No TCO/SCO on indirect call because Caller have to restore its TOC 4177 if (!isFunctionGlobalAddress(Callee) && 4178 !isa<ExternalSymbolSDNode>(Callee)) 4179 return false; 4180 4181 // Check if Callee resides in the same section, because for now, PPC64 SVR4 4182 // ABI (ELFv1/ELFv2) doesn't allow tail calls to a symbol resides in another 4183 // section. 4184 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4185 if (!resideInSameSection(MF.getFunction(), Callee, getTargetMachine())) 4186 return false; 4187 4188 // TCO allows altering callee ABI, so we don't have to check further. 4189 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4190 return true; 4191 4192 if (DisableSCO) return false; 4193 4194 // If callee use the same argument list that caller is using, then we can 4195 // apply SCO on this case. If it is not, then we need to check if callee needs 4196 // stack for passing arguments. 4197 if (!hasSameArgumentList(MF.getFunction(), CS) && 4198 needStackSlotPassParameters(Subtarget, Outs)) { 4199 return false; 4200 } 4201 4202 return true; 4203 } 4204 4205 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4206 /// for tail call optimization. Targets which want to do tail call 4207 /// optimization should implement this function. 4208 bool 4209 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4210 CallingConv::ID CalleeCC, 4211 bool isVarArg, 4212 const SmallVectorImpl<ISD::InputArg> &Ins, 4213 SelectionDAG& DAG) const { 4214 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4215 return false; 4216 4217 // Variable argument functions are not supported. 4218 if (isVarArg) 4219 return false; 4220 4221 MachineFunction &MF = DAG.getMachineFunction(); 4222 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4223 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4224 // Functions containing by val parameters are not supported. 4225 for (unsigned i = 0; i != Ins.size(); i++) { 4226 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4227 if (Flags.isByVal()) return false; 4228 } 4229 4230 // Non-PIC/GOT tail calls are supported. 4231 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4232 return true; 4233 4234 // At the moment we can only do local tail calls (in same module, hidden 4235 // or protected) if we are generating PIC. 4236 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4237 return G->getGlobal()->hasHiddenVisibility() 4238 || G->getGlobal()->hasProtectedVisibility(); 4239 } 4240 4241 return false; 4242 } 4243 4244 /// isCallCompatibleAddress - Return the immediate to use if the specified 4245 /// 32-bit value is representable in the immediate field of a BxA instruction. 4246 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4247 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4248 if (!C) return nullptr; 4249 4250 int Addr = C->getZExtValue(); 4251 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4252 SignExtend32<26>(Addr) != Addr) 4253 return nullptr; // Top 6 bits have to be sext of immediate. 4254 4255 return DAG 4256 .getConstant( 4257 (int)C->getZExtValue() >> 2, SDLoc(Op), 4258 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4259 .getNode(); 4260 } 4261 4262 namespace { 4263 4264 struct TailCallArgumentInfo { 4265 SDValue Arg; 4266 SDValue FrameIdxOp; 4267 int FrameIdx = 0; 4268 4269 TailCallArgumentInfo() = default; 4270 }; 4271 4272 } // end anonymous namespace 4273 4274 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4275 static void StoreTailCallArgumentsToStackSlot( 4276 SelectionDAG &DAG, SDValue Chain, 4277 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4278 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4279 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4280 SDValue Arg = TailCallArgs[i].Arg; 4281 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4282 int FI = TailCallArgs[i].FrameIdx; 4283 // Store relative to framepointer. 4284 MemOpChains.push_back(DAG.getStore( 4285 Chain, dl, Arg, FIN, 4286 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4287 } 4288 } 4289 4290 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4291 /// the appropriate stack slot for the tail call optimized function call. 4292 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4293 SDValue OldRetAddr, SDValue OldFP, 4294 int SPDiff, const SDLoc &dl) { 4295 if (SPDiff) { 4296 // Calculate the new stack slot for the return address. 4297 MachineFunction &MF = DAG.getMachineFunction(); 4298 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4299 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4300 bool isPPC64 = Subtarget.isPPC64(); 4301 int SlotSize = isPPC64 ? 8 : 4; 4302 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4303 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4304 NewRetAddrLoc, true); 4305 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4306 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4307 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4308 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4309 4310 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4311 // slot as the FP is never overwritten. 4312 if (Subtarget.isDarwinABI()) { 4313 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4314 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4315 true); 4316 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4317 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4318 MachinePointerInfo::getFixedStack( 4319 DAG.getMachineFunction(), NewFPIdx)); 4320 } 4321 } 4322 return Chain; 4323 } 4324 4325 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4326 /// the position of the argument. 4327 static void 4328 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4329 SDValue Arg, int SPDiff, unsigned ArgOffset, 4330 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4331 int Offset = ArgOffset + SPDiff; 4332 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4333 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4334 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4335 SDValue FIN = DAG.getFrameIndex(FI, VT); 4336 TailCallArgumentInfo Info; 4337 Info.Arg = Arg; 4338 Info.FrameIdxOp = FIN; 4339 Info.FrameIdx = FI; 4340 TailCallArguments.push_back(Info); 4341 } 4342 4343 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4344 /// stack slot. Returns the chain as result and the loaded frame pointers in 4345 /// LROpOut/FPOpout. Used when tail calling. 4346 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4347 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4348 SDValue &FPOpOut, const SDLoc &dl) const { 4349 if (SPDiff) { 4350 // Load the LR and FP stack slot for later adjusting. 4351 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4352 LROpOut = getReturnAddrFrameIndex(DAG); 4353 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4354 Chain = SDValue(LROpOut.getNode(), 1); 4355 4356 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4357 // slot as the FP is never overwritten. 4358 if (Subtarget.isDarwinABI()) { 4359 FPOpOut = getFramePointerFrameIndex(DAG); 4360 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4361 Chain = SDValue(FPOpOut.getNode(), 1); 4362 } 4363 } 4364 return Chain; 4365 } 4366 4367 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4368 /// by "Src" to address "Dst" of size "Size". Alignment information is 4369 /// specified by the specific parameter attribute. The copy will be passed as 4370 /// a byval function parameter. 4371 /// Sometimes what we are copying is the end of a larger object, the part that 4372 /// does not fit in registers. 4373 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4374 SDValue Chain, ISD::ArgFlagsTy Flags, 4375 SelectionDAG &DAG, const SDLoc &dl) { 4376 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4377 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4378 false, false, false, MachinePointerInfo(), 4379 MachinePointerInfo()); 4380 } 4381 4382 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4383 /// tail calls. 4384 static void LowerMemOpCallTo( 4385 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4386 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4387 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4388 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4389 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4390 if (!isTailCall) { 4391 if (isVector) { 4392 SDValue StackPtr; 4393 if (isPPC64) 4394 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4395 else 4396 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4397 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4398 DAG.getConstant(ArgOffset, dl, PtrVT)); 4399 } 4400 MemOpChains.push_back( 4401 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4402 // Calculate and remember argument location. 4403 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4404 TailCallArguments); 4405 } 4406 4407 static void 4408 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4409 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4410 SDValue FPOp, 4411 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4412 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4413 // might overwrite each other in case of tail call optimization. 4414 SmallVector<SDValue, 8> MemOpChains2; 4415 // Do not flag preceding copytoreg stuff together with the following stuff. 4416 InFlag = SDValue(); 4417 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4418 MemOpChains2, dl); 4419 if (!MemOpChains2.empty()) 4420 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4421 4422 // Store the return address to the appropriate stack slot. 4423 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4424 4425 // Emit callseq_end just before tailcall node. 4426 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4427 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4428 InFlag = Chain.getValue(1); 4429 } 4430 4431 // Is this global address that of a function that can be called by name? (as 4432 // opposed to something that must hold a descriptor for an indirect call). 4433 static bool isFunctionGlobalAddress(SDValue Callee) { 4434 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4435 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4436 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4437 return false; 4438 4439 return G->getGlobal()->getValueType()->isFunctionTy(); 4440 } 4441 4442 return false; 4443 } 4444 4445 static unsigned 4446 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4447 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4448 bool isPatchPoint, bool hasNest, 4449 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4450 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4451 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4452 bool isPPC64 = Subtarget.isPPC64(); 4453 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4454 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4455 4456 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4457 NodeTys.push_back(MVT::Other); // Returns a chain 4458 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4459 4460 unsigned CallOpc = PPCISD::CALL; 4461 4462 bool needIndirectCall = true; 4463 if (!isSVR4ABI || !isPPC64) 4464 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4465 // If this is an absolute destination address, use the munged value. 4466 Callee = SDValue(Dest, 0); 4467 needIndirectCall = false; 4468 } 4469 4470 // PC-relative references to external symbols should go through $stub, unless 4471 // we're building with the leopard linker or later, which automatically 4472 // synthesizes these stubs. 4473 const TargetMachine &TM = DAG.getTarget(); 4474 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent(); 4475 const GlobalValue *GV = nullptr; 4476 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4477 GV = G->getGlobal(); 4478 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4479 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4480 4481 if (isFunctionGlobalAddress(Callee)) { 4482 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4483 // A call to a TLS address is actually an indirect call to a 4484 // thread-specific pointer. 4485 unsigned OpFlags = 0; 4486 if (UsePlt) 4487 OpFlags = PPCII::MO_PLT; 4488 4489 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4490 // every direct call is) turn it into a TargetGlobalAddress / 4491 // TargetExternalSymbol node so that legalize doesn't hack it. 4492 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4493 Callee.getValueType(), 0, OpFlags); 4494 needIndirectCall = false; 4495 } 4496 4497 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4498 unsigned char OpFlags = 0; 4499 4500 if (UsePlt) 4501 OpFlags = PPCII::MO_PLT; 4502 4503 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4504 OpFlags); 4505 needIndirectCall = false; 4506 } 4507 4508 if (isPatchPoint) { 4509 // We'll form an invalid direct call when lowering a patchpoint; the full 4510 // sequence for an indirect call is complicated, and many of the 4511 // instructions introduced might have side effects (and, thus, can't be 4512 // removed later). The call itself will be removed as soon as the 4513 // argument/return lowering is complete, so the fact that it has the wrong 4514 // kind of operands should not really matter. 4515 needIndirectCall = false; 4516 } 4517 4518 if (needIndirectCall) { 4519 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4520 // to do the call, we can't use PPCISD::CALL. 4521 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4522 4523 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4524 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4525 // entry point, but to the function descriptor (the function entry point 4526 // address is part of the function descriptor though). 4527 // The function descriptor is a three doubleword structure with the 4528 // following fields: function entry point, TOC base address and 4529 // environment pointer. 4530 // Thus for a call through a function pointer, the following actions need 4531 // to be performed: 4532 // 1. Save the TOC of the caller in the TOC save area of its stack 4533 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4534 // 2. Load the address of the function entry point from the function 4535 // descriptor. 4536 // 3. Load the TOC of the callee from the function descriptor into r2. 4537 // 4. Load the environment pointer from the function descriptor into 4538 // r11. 4539 // 5. Branch to the function entry point address. 4540 // 6. On return of the callee, the TOC of the caller needs to be 4541 // restored (this is done in FinishCall()). 4542 // 4543 // The loads are scheduled at the beginning of the call sequence, and the 4544 // register copies are flagged together to ensure that no other 4545 // operations can be scheduled in between. E.g. without flagging the 4546 // copies together, a TOC access in the caller could be scheduled between 4547 // the assignment of the callee TOC and the branch to the callee, which 4548 // results in the TOC access going through the TOC of the callee instead 4549 // of going through the TOC of the caller, which leads to incorrect code. 4550 4551 // Load the address of the function entry point from the function 4552 // descriptor. 4553 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4554 if (LDChain.getValueType() == MVT::Glue) 4555 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4556 4557 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 4558 ? (MachineMemOperand::MODereferenceable | 4559 MachineMemOperand::MOInvariant) 4560 : MachineMemOperand::MONone; 4561 4562 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4563 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4564 /* Alignment = */ 8, MMOFlags); 4565 4566 // Load environment pointer into r11. 4567 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4568 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4569 SDValue LoadEnvPtr = 4570 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 4571 /* Alignment = */ 8, MMOFlags); 4572 4573 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4574 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4575 SDValue TOCPtr = 4576 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 4577 /* Alignment = */ 8, MMOFlags); 4578 4579 setUsesTOCBasePtr(DAG); 4580 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4581 InFlag); 4582 Chain = TOCVal.getValue(0); 4583 InFlag = TOCVal.getValue(1); 4584 4585 // If the function call has an explicit 'nest' parameter, it takes the 4586 // place of the environment pointer. 4587 if (!hasNest) { 4588 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4589 InFlag); 4590 4591 Chain = EnvVal.getValue(0); 4592 InFlag = EnvVal.getValue(1); 4593 } 4594 4595 MTCTROps[0] = Chain; 4596 MTCTROps[1] = LoadFuncPtr; 4597 MTCTROps[2] = InFlag; 4598 } 4599 4600 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4601 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4602 InFlag = Chain.getValue(1); 4603 4604 NodeTys.clear(); 4605 NodeTys.push_back(MVT::Other); 4606 NodeTys.push_back(MVT::Glue); 4607 Ops.push_back(Chain); 4608 CallOpc = PPCISD::BCTRL; 4609 Callee.setNode(nullptr); 4610 // Add use of X11 (holding environment pointer) 4611 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4612 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4613 // Add CTR register as callee so a bctr can be emitted later. 4614 if (isTailCall) 4615 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4616 } 4617 4618 // If this is a direct call, pass the chain and the callee. 4619 if (Callee.getNode()) { 4620 Ops.push_back(Chain); 4621 Ops.push_back(Callee); 4622 } 4623 // If this is a tail call add stack pointer delta. 4624 if (isTailCall) 4625 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4626 4627 // Add argument registers to the end of the list so that they are known live 4628 // into the call. 4629 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4630 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4631 RegsToPass[i].second.getValueType())); 4632 4633 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4634 // into the call. 4635 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 4636 setUsesTOCBasePtr(DAG); 4637 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4638 } 4639 4640 return CallOpc; 4641 } 4642 4643 SDValue PPCTargetLowering::LowerCallResult( 4644 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4645 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4646 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4647 SmallVector<CCValAssign, 16> RVLocs; 4648 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4649 *DAG.getContext()); 4650 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4651 4652 // Copy all of the result registers out of their specified physreg. 4653 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4654 CCValAssign &VA = RVLocs[i]; 4655 assert(VA.isRegLoc() && "Can only return in registers!"); 4656 4657 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4658 VA.getLocReg(), VA.getLocVT(), InFlag); 4659 Chain = Val.getValue(1); 4660 InFlag = Val.getValue(2); 4661 4662 switch (VA.getLocInfo()) { 4663 default: llvm_unreachable("Unknown loc info!"); 4664 case CCValAssign::Full: break; 4665 case CCValAssign::AExt: 4666 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4667 break; 4668 case CCValAssign::ZExt: 4669 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4670 DAG.getValueType(VA.getValVT())); 4671 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4672 break; 4673 case CCValAssign::SExt: 4674 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4675 DAG.getValueType(VA.getValVT())); 4676 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4677 break; 4678 } 4679 4680 InVals.push_back(Val); 4681 } 4682 4683 return Chain; 4684 } 4685 4686 SDValue PPCTargetLowering::FinishCall( 4687 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 4688 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 4689 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 4690 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 4691 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 4692 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite *CS) const { 4693 std::vector<EVT> NodeTys; 4694 SmallVector<SDValue, 8> Ops; 4695 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4696 SPDiff, isTailCall, isPatchPoint, hasNest, 4697 RegsToPass, Ops, NodeTys, CS, Subtarget); 4698 4699 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4700 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4701 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4702 4703 // When performing tail call optimization the callee pops its arguments off 4704 // the stack. Account for this here so these bytes can be pushed back on in 4705 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4706 int BytesCalleePops = 4707 (CallConv == CallingConv::Fast && 4708 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4709 4710 // Add a register mask operand representing the call-preserved registers. 4711 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4712 const uint32_t *Mask = 4713 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4714 assert(Mask && "Missing call preserved mask for calling convention"); 4715 Ops.push_back(DAG.getRegisterMask(Mask)); 4716 4717 if (InFlag.getNode()) 4718 Ops.push_back(InFlag); 4719 4720 // Emit tail call. 4721 if (isTailCall) { 4722 assert(((Callee.getOpcode() == ISD::Register && 4723 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4724 Callee.getOpcode() == ISD::TargetExternalSymbol || 4725 Callee.getOpcode() == ISD::TargetGlobalAddress || 4726 isa<ConstantSDNode>(Callee)) && 4727 "Expecting an global address, external symbol, absolute value or register"); 4728 4729 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 4730 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4731 } 4732 4733 // Add a NOP immediately after the branch instruction when using the 64-bit 4734 // SVR4 ABI. At link time, if caller and callee are in a different module and 4735 // thus have a different TOC, the call will be replaced with a call to a stub 4736 // function which saves the current TOC, loads the TOC of the callee and 4737 // branches to the callee. The NOP will be replaced with a load instruction 4738 // which restores the TOC of the caller from the TOC save slot of the current 4739 // stack frame. If caller and callee belong to the same module (and have the 4740 // same TOC), the NOP will remain unchanged. 4741 4742 MachineFunction &MF = DAG.getMachineFunction(); 4743 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4744 !isPatchPoint) { 4745 if (CallOpc == PPCISD::BCTRL) { 4746 // This is a call through a function pointer. 4747 // Restore the caller TOC from the save area into R2. 4748 // See PrepareCall() for more information about calls through function 4749 // pointers in the 64-bit SVR4 ABI. 4750 // We are using a target-specific load with r2 hard coded, because the 4751 // result of a target-independent load would never go directly into r2, 4752 // since r2 is a reserved register (which prevents the register allocator 4753 // from allocating it), resulting in an additional register being 4754 // allocated and an unnecessary move instruction being generated. 4755 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4756 4757 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 4758 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4759 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4760 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4761 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4762 4763 // The address needs to go after the chain input but before the flag (or 4764 // any other variadic arguments). 4765 Ops.insert(std::next(Ops.begin()), AddTOC); 4766 } else if (CallOpc == PPCISD::CALL && 4767 !resideInSameSection(MF.getFunction(), Callee, DAG.getTarget())) { 4768 // Otherwise insert NOP for non-local calls. 4769 CallOpc = PPCISD::CALL_NOP; 4770 } 4771 } 4772 4773 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4774 InFlag = Chain.getValue(1); 4775 4776 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4777 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4778 InFlag, dl); 4779 if (!Ins.empty()) 4780 InFlag = Chain.getValue(1); 4781 4782 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4783 Ins, dl, DAG, InVals); 4784 } 4785 4786 SDValue 4787 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4788 SmallVectorImpl<SDValue> &InVals) const { 4789 SelectionDAG &DAG = CLI.DAG; 4790 SDLoc &dl = CLI.DL; 4791 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4792 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4793 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4794 SDValue Chain = CLI.Chain; 4795 SDValue Callee = CLI.Callee; 4796 bool &isTailCall = CLI.IsTailCall; 4797 CallingConv::ID CallConv = CLI.CallConv; 4798 bool isVarArg = CLI.IsVarArg; 4799 bool isPatchPoint = CLI.IsPatchPoint; 4800 ImmutableCallSite *CS = CLI.CS; 4801 4802 if (isTailCall) { 4803 if (Subtarget.useLongCalls() && !(CS && CS->isMustTailCall())) 4804 isTailCall = false; 4805 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 4806 isTailCall = 4807 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 4808 isVarArg, Outs, Ins, DAG); 4809 else 4810 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4811 Ins, DAG); 4812 if (isTailCall) { 4813 ++NumTailCalls; 4814 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4815 ++NumSiblingCalls; 4816 4817 assert(isa<GlobalAddressSDNode>(Callee) && 4818 "Callee should be an llvm::Function object."); 4819 DEBUG( 4820 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 4821 const unsigned Width = 80 - strlen("TCO caller: ") 4822 - strlen(", callee linkage: 0, 0"); 4823 dbgs() << "TCO caller: " 4824 << left_justify(DAG.getMachineFunction().getName(), Width) 4825 << ", callee linkage: " 4826 << GV->getVisibility() << ", " << GV->getLinkage() << "\n" 4827 ); 4828 } 4829 } 4830 4831 if (!isTailCall && CS && CS->isMustTailCall()) 4832 report_fatal_error("failed to perform tail call elimination on a call " 4833 "site marked musttail"); 4834 4835 // When long calls (i.e. indirect calls) are always used, calls are always 4836 // made via function pointer. If we have a function name, first translate it 4837 // into a pointer. 4838 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 4839 !isTailCall) 4840 Callee = LowerGlobalAddress(Callee, DAG); 4841 4842 if (Subtarget.isSVR4ABI()) { 4843 if (Subtarget.isPPC64()) 4844 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4845 isTailCall, isPatchPoint, Outs, OutVals, Ins, 4846 dl, DAG, InVals, CS); 4847 else 4848 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4849 isTailCall, isPatchPoint, Outs, OutVals, Ins, 4850 dl, DAG, InVals, CS); 4851 } 4852 4853 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4854 isTailCall, isPatchPoint, Outs, OutVals, Ins, 4855 dl, DAG, InVals, CS); 4856 } 4857 4858 SDValue PPCTargetLowering::LowerCall_32SVR4( 4859 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 4860 bool isTailCall, bool isPatchPoint, 4861 const SmallVectorImpl<ISD::OutputArg> &Outs, 4862 const SmallVectorImpl<SDValue> &OutVals, 4863 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4864 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 4865 ImmutableCallSite *CS) const { 4866 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4867 // of the 32-bit SVR4 ABI stack frame layout. 4868 4869 assert((CallConv == CallingConv::C || 4870 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4871 4872 unsigned PtrByteSize = 4; 4873 4874 MachineFunction &MF = DAG.getMachineFunction(); 4875 4876 // Mark this function as potentially containing a function that contains a 4877 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4878 // and restoring the callers stack pointer in this functions epilog. This is 4879 // done because by tail calling the called function might overwrite the value 4880 // in this function's (MF) stack pointer stack slot 0(SP). 4881 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4882 CallConv == CallingConv::Fast) 4883 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4884 4885 // Count how many bytes are to be pushed on the stack, including the linkage 4886 // area, parameter list area and the part of the local variable space which 4887 // contains copies of aggregates which are passed by value. 4888 4889 // Assign locations to all of the outgoing arguments. 4890 SmallVector<CCValAssign, 16> ArgLocs; 4891 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 4892 4893 // Reserve space for the linkage area on the stack. 4894 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4895 PtrByteSize); 4896 if (useSoftFloat()) 4897 CCInfo.PreAnalyzeCallOperands(Outs); 4898 4899 if (isVarArg) { 4900 // Handle fixed and variable vector arguments differently. 4901 // Fixed vector arguments go into registers as long as registers are 4902 // available. Variable vector arguments always go into memory. 4903 unsigned NumArgs = Outs.size(); 4904 4905 for (unsigned i = 0; i != NumArgs; ++i) { 4906 MVT ArgVT = Outs[i].VT; 4907 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4908 bool Result; 4909 4910 if (Outs[i].IsFixed) { 4911 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4912 CCInfo); 4913 } else { 4914 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4915 ArgFlags, CCInfo); 4916 } 4917 4918 if (Result) { 4919 #ifndef NDEBUG 4920 errs() << "Call operand #" << i << " has unhandled type " 4921 << EVT(ArgVT).getEVTString() << "\n"; 4922 #endif 4923 llvm_unreachable(nullptr); 4924 } 4925 } 4926 } else { 4927 // All arguments are treated the same. 4928 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4929 } 4930 CCInfo.clearWasPPCF128(); 4931 4932 // Assign locations to all of the outgoing aggregate by value arguments. 4933 SmallVector<CCValAssign, 16> ByValArgLocs; 4934 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 4935 4936 // Reserve stack space for the allocations in CCInfo. 4937 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4938 4939 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4940 4941 // Size of the linkage area, parameter list area and the part of the local 4942 // space variable where copies of aggregates which are passed by value are 4943 // stored. 4944 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4945 4946 // Calculate by how many bytes the stack has to be adjusted in case of tail 4947 // call optimization. 4948 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4949 4950 // Adjust the stack pointer for the new arguments... 4951 // These operations are automatically eliminated by the prolog/epilog pass 4952 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4953 dl); 4954 SDValue CallSeqStart = Chain; 4955 4956 // Load the return address and frame pointer so it can be moved somewhere else 4957 // later. 4958 SDValue LROp, FPOp; 4959 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 4960 4961 // Set up a copy of the stack pointer for use loading and storing any 4962 // arguments that may not fit in the registers available for argument 4963 // passing. 4964 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4965 4966 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4967 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4968 SmallVector<SDValue, 8> MemOpChains; 4969 4970 bool seenFloatArg = false; 4971 // Walk the register/memloc assignments, inserting copies/loads. 4972 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4973 i != e; 4974 ++i) { 4975 CCValAssign &VA = ArgLocs[i]; 4976 SDValue Arg = OutVals[i]; 4977 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4978 4979 if (Flags.isByVal()) { 4980 // Argument is an aggregate which is passed by value, thus we need to 4981 // create a copy of it in the local variable space of the current stack 4982 // frame (which is the stack frame of the caller) and pass the address of 4983 // this copy to the callee. 4984 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4985 CCValAssign &ByValVA = ByValArgLocs[j++]; 4986 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4987 4988 // Memory reserved in the local variable space of the callers stack frame. 4989 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4990 4991 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4992 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4993 StackPtr, PtrOff); 4994 4995 // Create a copy of the argument in the local area of the current 4996 // stack frame. 4997 SDValue MemcpyCall = 4998 CreateCopyOfByValArgument(Arg, PtrOff, 4999 CallSeqStart.getNode()->getOperand(0), 5000 Flags, DAG, dl); 5001 5002 // This must go outside the CALLSEQ_START..END. 5003 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 5004 CallSeqStart.getNode()->getOperand(1), 5005 SDLoc(MemcpyCall)); 5006 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5007 NewCallSeqStart.getNode()); 5008 Chain = CallSeqStart = NewCallSeqStart; 5009 5010 // Pass the address of the aggregate copy on the stack either in a 5011 // physical register or in the parameter list area of the current stack 5012 // frame to the callee. 5013 Arg = PtrOff; 5014 } 5015 5016 if (VA.isRegLoc()) { 5017 if (Arg.getValueType() == MVT::i1) 5018 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 5019 5020 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5021 // Put argument in a physical register. 5022 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5023 } else { 5024 // Put argument in the parameter list area of the current stack frame. 5025 assert(VA.isMemLoc()); 5026 unsigned LocMemOffset = VA.getLocMemOffset(); 5027 5028 if (!isTailCall) { 5029 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5030 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5031 StackPtr, PtrOff); 5032 5033 MemOpChains.push_back( 5034 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5035 } else { 5036 // Calculate and remember argument location. 5037 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5038 TailCallArguments); 5039 } 5040 } 5041 } 5042 5043 if (!MemOpChains.empty()) 5044 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5045 5046 // Build a sequence of copy-to-reg nodes chained together with token chain 5047 // and flag operands which copy the outgoing args into the appropriate regs. 5048 SDValue InFlag; 5049 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5050 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5051 RegsToPass[i].second, InFlag); 5052 InFlag = Chain.getValue(1); 5053 } 5054 5055 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5056 // registers. 5057 if (isVarArg) { 5058 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5059 SDValue Ops[] = { Chain, InFlag }; 5060 5061 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5062 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5063 5064 InFlag = Chain.getValue(1); 5065 } 5066 5067 if (isTailCall) 5068 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5069 TailCallArguments); 5070 5071 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5072 /* unused except on PPC64 ELFv1 */ false, DAG, 5073 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5074 NumBytes, Ins, InVals, CS); 5075 } 5076 5077 // Copy an argument into memory, being careful to do this outside the 5078 // call sequence for the call to which the argument belongs. 5079 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5080 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5081 SelectionDAG &DAG, const SDLoc &dl) const { 5082 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5083 CallSeqStart.getNode()->getOperand(0), 5084 Flags, DAG, dl); 5085 // The MEMCPY must go outside the CALLSEQ_START..END. 5086 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 5087 CallSeqStart.getNode()->getOperand(1), 5088 SDLoc(MemcpyCall)); 5089 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5090 NewCallSeqStart.getNode()); 5091 return NewCallSeqStart; 5092 } 5093 5094 SDValue PPCTargetLowering::LowerCall_64SVR4( 5095 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5096 bool isTailCall, bool isPatchPoint, 5097 const SmallVectorImpl<ISD::OutputArg> &Outs, 5098 const SmallVectorImpl<SDValue> &OutVals, 5099 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5100 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5101 ImmutableCallSite *CS) const { 5102 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5103 bool isLittleEndian = Subtarget.isLittleEndian(); 5104 unsigned NumOps = Outs.size(); 5105 bool hasNest = false; 5106 bool IsSibCall = false; 5107 5108 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5109 unsigned PtrByteSize = 8; 5110 5111 MachineFunction &MF = DAG.getMachineFunction(); 5112 5113 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5114 IsSibCall = true; 5115 5116 // Mark this function as potentially containing a function that contains a 5117 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5118 // and restoring the callers stack pointer in this functions epilog. This is 5119 // done because by tail calling the called function might overwrite the value 5120 // in this function's (MF) stack pointer stack slot 0(SP). 5121 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5122 CallConv == CallingConv::Fast) 5123 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5124 5125 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5126 "fastcc not supported on varargs functions"); 5127 5128 // Count how many bytes are to be pushed on the stack, including the linkage 5129 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5130 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5131 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5132 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5133 unsigned NumBytes = LinkageSize; 5134 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5135 unsigned &QFPR_idx = FPR_idx; 5136 5137 static const MCPhysReg GPR[] = { 5138 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5139 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5140 }; 5141 static const MCPhysReg VR[] = { 5142 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5143 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5144 }; 5145 5146 const unsigned NumGPRs = array_lengthof(GPR); 5147 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5148 const unsigned NumVRs = array_lengthof(VR); 5149 const unsigned NumQFPRs = NumFPRs; 5150 5151 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5152 // can be passed to the callee in registers. 5153 // For the fast calling convention, there is another check below. 5154 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5155 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5156 if (!HasParameterArea) { 5157 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5158 unsigned AvailableFPRs = NumFPRs; 5159 unsigned AvailableVRs = NumVRs; 5160 unsigned NumBytesTmp = NumBytes; 5161 for (unsigned i = 0; i != NumOps; ++i) { 5162 if (Outs[i].Flags.isNest()) continue; 5163 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5164 PtrByteSize, LinkageSize, ParamAreaSize, 5165 NumBytesTmp, AvailableFPRs, AvailableVRs, 5166 Subtarget.hasQPX())) 5167 HasParameterArea = true; 5168 } 5169 } 5170 5171 // When using the fast calling convention, we don't provide backing for 5172 // arguments that will be in registers. 5173 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5174 5175 // Add up all the space actually used. 5176 for (unsigned i = 0; i != NumOps; ++i) { 5177 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5178 EVT ArgVT = Outs[i].VT; 5179 EVT OrigVT = Outs[i].ArgVT; 5180 5181 if (Flags.isNest()) 5182 continue; 5183 5184 if (CallConv == CallingConv::Fast) { 5185 if (Flags.isByVal()) 5186 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5187 else 5188 switch (ArgVT.getSimpleVT().SimpleTy) { 5189 default: llvm_unreachable("Unexpected ValueType for argument!"); 5190 case MVT::i1: 5191 case MVT::i32: 5192 case MVT::i64: 5193 if (++NumGPRsUsed <= NumGPRs) 5194 continue; 5195 break; 5196 case MVT::v4i32: 5197 case MVT::v8i16: 5198 case MVT::v16i8: 5199 case MVT::v2f64: 5200 case MVT::v2i64: 5201 case MVT::v1i128: 5202 if (++NumVRsUsed <= NumVRs) 5203 continue; 5204 break; 5205 case MVT::v4f32: 5206 // When using QPX, this is handled like a FP register, otherwise, it 5207 // is an Altivec register. 5208 if (Subtarget.hasQPX()) { 5209 if (++NumFPRsUsed <= NumFPRs) 5210 continue; 5211 } else { 5212 if (++NumVRsUsed <= NumVRs) 5213 continue; 5214 } 5215 break; 5216 case MVT::f32: 5217 case MVT::f64: 5218 case MVT::v4f64: // QPX 5219 case MVT::v4i1: // QPX 5220 if (++NumFPRsUsed <= NumFPRs) 5221 continue; 5222 break; 5223 } 5224 } 5225 5226 /* Respect alignment of argument on the stack. */ 5227 unsigned Align = 5228 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5229 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5230 5231 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5232 if (Flags.isInConsecutiveRegsLast()) 5233 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5234 } 5235 5236 unsigned NumBytesActuallyUsed = NumBytes; 5237 5238 // In the old ELFv1 ABI, 5239 // the prolog code of the callee may store up to 8 GPR argument registers to 5240 // the stack, allowing va_start to index over them in memory if its varargs. 5241 // Because we cannot tell if this is needed on the caller side, we have to 5242 // conservatively assume that it is needed. As such, make sure we have at 5243 // least enough stack space for the caller to store the 8 GPRs. 5244 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5245 // really requires memory operands, e.g. a vararg function. 5246 if (HasParameterArea) 5247 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5248 else 5249 NumBytes = LinkageSize; 5250 5251 // Tail call needs the stack to be aligned. 5252 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5253 CallConv == CallingConv::Fast) 5254 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5255 5256 int SPDiff = 0; 5257 5258 // Calculate by how many bytes the stack has to be adjusted in case of tail 5259 // call optimization. 5260 if (!IsSibCall) 5261 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5262 5263 // To protect arguments on the stack from being clobbered in a tail call, 5264 // force all the loads to happen before doing any other lowering. 5265 if (isTailCall) 5266 Chain = DAG.getStackArgumentTokenFactor(Chain); 5267 5268 // Adjust the stack pointer for the new arguments... 5269 // These operations are automatically eliminated by the prolog/epilog pass 5270 if (!IsSibCall) 5271 Chain = DAG.getCALLSEQ_START(Chain, 5272 DAG.getIntPtrConstant(NumBytes, dl, true), dl); 5273 SDValue CallSeqStart = Chain; 5274 5275 // Load the return address and frame pointer so it can be move somewhere else 5276 // later. 5277 SDValue LROp, FPOp; 5278 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5279 5280 // Set up a copy of the stack pointer for use loading and storing any 5281 // arguments that may not fit in the registers available for argument 5282 // passing. 5283 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5284 5285 // Figure out which arguments are going to go in registers, and which in 5286 // memory. Also, if this is a vararg function, floating point operations 5287 // must be stored to our stack, and loaded into integer regs as well, if 5288 // any integer regs are available for argument passing. 5289 unsigned ArgOffset = LinkageSize; 5290 5291 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5292 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5293 5294 SmallVector<SDValue, 8> MemOpChains; 5295 for (unsigned i = 0; i != NumOps; ++i) { 5296 SDValue Arg = OutVals[i]; 5297 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5298 EVT ArgVT = Outs[i].VT; 5299 EVT OrigVT = Outs[i].ArgVT; 5300 5301 // PtrOff will be used to store the current argument to the stack if a 5302 // register cannot be found for it. 5303 SDValue PtrOff; 5304 5305 // We re-align the argument offset for each argument, except when using the 5306 // fast calling convention, when we need to make sure we do that only when 5307 // we'll actually use a stack slot. 5308 auto ComputePtrOff = [&]() { 5309 /* Respect alignment of argument on the stack. */ 5310 unsigned Align = 5311 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5312 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5313 5314 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5315 5316 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5317 }; 5318 5319 if (CallConv != CallingConv::Fast) { 5320 ComputePtrOff(); 5321 5322 /* Compute GPR index associated with argument offset. */ 5323 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5324 GPR_idx = std::min(GPR_idx, NumGPRs); 5325 } 5326 5327 // Promote integers to 64-bit values. 5328 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5329 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5330 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5331 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5332 } 5333 5334 // FIXME memcpy is used way more than necessary. Correctness first. 5335 // Note: "by value" is code for passing a structure by value, not 5336 // basic types. 5337 if (Flags.isByVal()) { 5338 // Note: Size includes alignment padding, so 5339 // struct x { short a; char b; } 5340 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5341 // These are the proper values we need for right-justifying the 5342 // aggregate in a parameter register. 5343 unsigned Size = Flags.getByValSize(); 5344 5345 // An empty aggregate parameter takes up no storage and no 5346 // registers. 5347 if (Size == 0) 5348 continue; 5349 5350 if (CallConv == CallingConv::Fast) 5351 ComputePtrOff(); 5352 5353 // All aggregates smaller than 8 bytes must be passed right-justified. 5354 if (Size==1 || Size==2 || Size==4) { 5355 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5356 if (GPR_idx != NumGPRs) { 5357 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5358 MachinePointerInfo(), VT); 5359 MemOpChains.push_back(Load.getValue(1)); 5360 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5361 5362 ArgOffset += PtrByteSize; 5363 continue; 5364 } 5365 } 5366 5367 if (GPR_idx == NumGPRs && Size < 8) { 5368 SDValue AddPtr = PtrOff; 5369 if (!isLittleEndian) { 5370 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5371 PtrOff.getValueType()); 5372 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5373 } 5374 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5375 CallSeqStart, 5376 Flags, DAG, dl); 5377 ArgOffset += PtrByteSize; 5378 continue; 5379 } 5380 // Copy entire object into memory. There are cases where gcc-generated 5381 // code assumes it is there, even if it could be put entirely into 5382 // registers. (This is not what the doc says.) 5383 5384 // FIXME: The above statement is likely due to a misunderstanding of the 5385 // documents. All arguments must be copied into the parameter area BY 5386 // THE CALLEE in the event that the callee takes the address of any 5387 // formal argument. That has not yet been implemented. However, it is 5388 // reasonable to use the stack area as a staging area for the register 5389 // load. 5390 5391 // Skip this for small aggregates, as we will use the same slot for a 5392 // right-justified copy, below. 5393 if (Size >= 8) 5394 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5395 CallSeqStart, 5396 Flags, DAG, dl); 5397 5398 // When a register is available, pass a small aggregate right-justified. 5399 if (Size < 8 && GPR_idx != NumGPRs) { 5400 // The easiest way to get this right-justified in a register 5401 // is to copy the structure into the rightmost portion of a 5402 // local variable slot, then load the whole slot into the 5403 // register. 5404 // FIXME: The memcpy seems to produce pretty awful code for 5405 // small aggregates, particularly for packed ones. 5406 // FIXME: It would be preferable to use the slot in the 5407 // parameter save area instead of a new local variable. 5408 SDValue AddPtr = PtrOff; 5409 if (!isLittleEndian) { 5410 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5411 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5412 } 5413 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5414 CallSeqStart, 5415 Flags, DAG, dl); 5416 5417 // Load the slot into the register. 5418 SDValue Load = 5419 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5420 MemOpChains.push_back(Load.getValue(1)); 5421 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5422 5423 // Done with this argument. 5424 ArgOffset += PtrByteSize; 5425 continue; 5426 } 5427 5428 // For aggregates larger than PtrByteSize, copy the pieces of the 5429 // object that fit into registers from the parameter save area. 5430 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5431 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5432 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5433 if (GPR_idx != NumGPRs) { 5434 SDValue Load = 5435 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5436 MemOpChains.push_back(Load.getValue(1)); 5437 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5438 ArgOffset += PtrByteSize; 5439 } else { 5440 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5441 break; 5442 } 5443 } 5444 continue; 5445 } 5446 5447 switch (Arg.getSimpleValueType().SimpleTy) { 5448 default: llvm_unreachable("Unexpected ValueType for argument!"); 5449 case MVT::i1: 5450 case MVT::i32: 5451 case MVT::i64: 5452 if (Flags.isNest()) { 5453 // The 'nest' parameter, if any, is passed in R11. 5454 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5455 hasNest = true; 5456 break; 5457 } 5458 5459 // These can be scalar arguments or elements of an integer array type 5460 // passed directly. Clang may use those instead of "byval" aggregate 5461 // types to avoid forcing arguments to memory unnecessarily. 5462 if (GPR_idx != NumGPRs) { 5463 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5464 } else { 5465 if (CallConv == CallingConv::Fast) 5466 ComputePtrOff(); 5467 5468 assert(HasParameterArea && 5469 "Parameter area must exist to pass an argument in memory."); 5470 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5471 true, isTailCall, false, MemOpChains, 5472 TailCallArguments, dl); 5473 if (CallConv == CallingConv::Fast) 5474 ArgOffset += PtrByteSize; 5475 } 5476 if (CallConv != CallingConv::Fast) 5477 ArgOffset += PtrByteSize; 5478 break; 5479 case MVT::f32: 5480 case MVT::f64: { 5481 // These can be scalar arguments or elements of a float array type 5482 // passed directly. The latter are used to implement ELFv2 homogenous 5483 // float aggregates. 5484 5485 // Named arguments go into FPRs first, and once they overflow, the 5486 // remaining arguments go into GPRs and then the parameter save area. 5487 // Unnamed arguments for vararg functions always go to GPRs and 5488 // then the parameter save area. For now, put all arguments to vararg 5489 // routines always in both locations (FPR *and* GPR or stack slot). 5490 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5491 bool NeededLoad = false; 5492 5493 // First load the argument into the next available FPR. 5494 if (FPR_idx != NumFPRs) 5495 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5496 5497 // Next, load the argument into GPR or stack slot if needed. 5498 if (!NeedGPROrStack) 5499 ; 5500 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5501 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5502 // once we support fp <-> gpr moves. 5503 5504 // In the non-vararg case, this can only ever happen in the 5505 // presence of f32 array types, since otherwise we never run 5506 // out of FPRs before running out of GPRs. 5507 SDValue ArgVal; 5508 5509 // Double values are always passed in a single GPR. 5510 if (Arg.getValueType() != MVT::f32) { 5511 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5512 5513 // Non-array float values are extended and passed in a GPR. 5514 } else if (!Flags.isInConsecutiveRegs()) { 5515 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5516 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5517 5518 // If we have an array of floats, we collect every odd element 5519 // together with its predecessor into one GPR. 5520 } else if (ArgOffset % PtrByteSize != 0) { 5521 SDValue Lo, Hi; 5522 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5523 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5524 if (!isLittleEndian) 5525 std::swap(Lo, Hi); 5526 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5527 5528 // The final element, if even, goes into the first half of a GPR. 5529 } else if (Flags.isInConsecutiveRegsLast()) { 5530 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5531 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5532 if (!isLittleEndian) 5533 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5534 DAG.getConstant(32, dl, MVT::i32)); 5535 5536 // Non-final even elements are skipped; they will be handled 5537 // together the with subsequent argument on the next go-around. 5538 } else 5539 ArgVal = SDValue(); 5540 5541 if (ArgVal.getNode()) 5542 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5543 } else { 5544 if (CallConv == CallingConv::Fast) 5545 ComputePtrOff(); 5546 5547 // Single-precision floating-point values are mapped to the 5548 // second (rightmost) word of the stack doubleword. 5549 if (Arg.getValueType() == MVT::f32 && 5550 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5551 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5552 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5553 } 5554 5555 assert(HasParameterArea && 5556 "Parameter area must exist to pass an argument in memory."); 5557 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5558 true, isTailCall, false, MemOpChains, 5559 TailCallArguments, dl); 5560 5561 NeededLoad = true; 5562 } 5563 // When passing an array of floats, the array occupies consecutive 5564 // space in the argument area; only round up to the next doubleword 5565 // at the end of the array. Otherwise, each float takes 8 bytes. 5566 if (CallConv != CallingConv::Fast || NeededLoad) { 5567 ArgOffset += (Arg.getValueType() == MVT::f32 && 5568 Flags.isInConsecutiveRegs()) ? 4 : 8; 5569 if (Flags.isInConsecutiveRegsLast()) 5570 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5571 } 5572 break; 5573 } 5574 case MVT::v4f32: 5575 case MVT::v4i32: 5576 case MVT::v8i16: 5577 case MVT::v16i8: 5578 case MVT::v2f64: 5579 case MVT::v2i64: 5580 case MVT::v1i128: 5581 if (!Subtarget.hasQPX()) { 5582 // These can be scalar arguments or elements of a vector array type 5583 // passed directly. The latter are used to implement ELFv2 homogenous 5584 // vector aggregates. 5585 5586 // For a varargs call, named arguments go into VRs or on the stack as 5587 // usual; unnamed arguments always go to the stack or the corresponding 5588 // GPRs when within range. For now, we always put the value in both 5589 // locations (or even all three). 5590 if (isVarArg) { 5591 assert(HasParameterArea && 5592 "Parameter area must exist if we have a varargs call."); 5593 // We could elide this store in the case where the object fits 5594 // entirely in R registers. Maybe later. 5595 SDValue Store = 5596 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5597 MemOpChains.push_back(Store); 5598 if (VR_idx != NumVRs) { 5599 SDValue Load = 5600 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 5601 MemOpChains.push_back(Load.getValue(1)); 5602 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5603 } 5604 ArgOffset += 16; 5605 for (unsigned i=0; i<16; i+=PtrByteSize) { 5606 if (GPR_idx == NumGPRs) 5607 break; 5608 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5609 DAG.getConstant(i, dl, PtrVT)); 5610 SDValue Load = 5611 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5612 MemOpChains.push_back(Load.getValue(1)); 5613 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5614 } 5615 break; 5616 } 5617 5618 // Non-varargs Altivec params go into VRs or on the stack. 5619 if (VR_idx != NumVRs) { 5620 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5621 } else { 5622 if (CallConv == CallingConv::Fast) 5623 ComputePtrOff(); 5624 5625 assert(HasParameterArea && 5626 "Parameter area must exist to pass an argument in memory."); 5627 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5628 true, isTailCall, true, MemOpChains, 5629 TailCallArguments, dl); 5630 if (CallConv == CallingConv::Fast) 5631 ArgOffset += 16; 5632 } 5633 5634 if (CallConv != CallingConv::Fast) 5635 ArgOffset += 16; 5636 break; 5637 } // not QPX 5638 5639 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5640 "Invalid QPX parameter type"); 5641 5642 /* fall through */ 5643 case MVT::v4f64: 5644 case MVT::v4i1: { 5645 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5646 if (isVarArg) { 5647 assert(HasParameterArea && 5648 "Parameter area must exist if we have a varargs call."); 5649 // We could elide this store in the case where the object fits 5650 // entirely in R registers. Maybe later. 5651 SDValue Store = 5652 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5653 MemOpChains.push_back(Store); 5654 if (QFPR_idx != NumQFPRs) { 5655 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 5656 PtrOff, MachinePointerInfo()); 5657 MemOpChains.push_back(Load.getValue(1)); 5658 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5659 } 5660 ArgOffset += (IsF32 ? 16 : 32); 5661 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5662 if (GPR_idx == NumGPRs) 5663 break; 5664 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5665 DAG.getConstant(i, dl, PtrVT)); 5666 SDValue Load = 5667 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5668 MemOpChains.push_back(Load.getValue(1)); 5669 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5670 } 5671 break; 5672 } 5673 5674 // Non-varargs QPX params go into registers or on the stack. 5675 if (QFPR_idx != NumQFPRs) { 5676 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5677 } else { 5678 if (CallConv == CallingConv::Fast) 5679 ComputePtrOff(); 5680 5681 assert(HasParameterArea && 5682 "Parameter area must exist to pass an argument in memory."); 5683 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5684 true, isTailCall, true, MemOpChains, 5685 TailCallArguments, dl); 5686 if (CallConv == CallingConv::Fast) 5687 ArgOffset += (IsF32 ? 16 : 32); 5688 } 5689 5690 if (CallConv != CallingConv::Fast) 5691 ArgOffset += (IsF32 ? 16 : 32); 5692 break; 5693 } 5694 } 5695 } 5696 5697 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 5698 "mismatch in size of parameter area"); 5699 (void)NumBytesActuallyUsed; 5700 5701 if (!MemOpChains.empty()) 5702 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5703 5704 // Check if this is an indirect call (MTCTR/BCTRL). 5705 // See PrepareCall() for more information about calls through function 5706 // pointers in the 64-bit SVR4 ABI. 5707 if (!isTailCall && !isPatchPoint && 5708 !isFunctionGlobalAddress(Callee) && 5709 !isa<ExternalSymbolSDNode>(Callee)) { 5710 // Load r2 into a virtual register and store it to the TOC save area. 5711 setUsesTOCBasePtr(DAG); 5712 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5713 // TOC save area offset. 5714 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5715 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5716 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5717 Chain = DAG.getStore( 5718 Val.getValue(1), dl, Val, AddPtr, 5719 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 5720 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5721 // This does not mean the MTCTR instruction must use R12; it's easier 5722 // to model this as an extra parameter, so do that. 5723 if (isELFv2ABI && !isPatchPoint) 5724 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5725 } 5726 5727 // Build a sequence of copy-to-reg nodes chained together with token chain 5728 // and flag operands which copy the outgoing args into the appropriate regs. 5729 SDValue InFlag; 5730 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5731 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5732 RegsToPass[i].second, InFlag); 5733 InFlag = Chain.getValue(1); 5734 } 5735 5736 if (isTailCall && !IsSibCall) 5737 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5738 TailCallArguments); 5739 5740 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 5741 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 5742 SPDiff, NumBytes, Ins, InVals, CS); 5743 } 5744 5745 SDValue PPCTargetLowering::LowerCall_Darwin( 5746 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5747 bool isTailCall, bool isPatchPoint, 5748 const SmallVectorImpl<ISD::OutputArg> &Outs, 5749 const SmallVectorImpl<SDValue> &OutVals, 5750 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5751 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5752 ImmutableCallSite *CS) const { 5753 unsigned NumOps = Outs.size(); 5754 5755 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5756 bool isPPC64 = PtrVT == MVT::i64; 5757 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5758 5759 MachineFunction &MF = DAG.getMachineFunction(); 5760 5761 // Mark this function as potentially containing a function that contains a 5762 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5763 // and restoring the callers stack pointer in this functions epilog. This is 5764 // done because by tail calling the called function might overwrite the value 5765 // in this function's (MF) stack pointer stack slot 0(SP). 5766 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5767 CallConv == CallingConv::Fast) 5768 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5769 5770 // Count how many bytes are to be pushed on the stack, including the linkage 5771 // area, and parameter passing area. We start with 24/48 bytes, which is 5772 // prereserved space for [SP][CR][LR][3 x unused]. 5773 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5774 unsigned NumBytes = LinkageSize; 5775 5776 // Add up all the space actually used. 5777 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5778 // they all go in registers, but we must reserve stack space for them for 5779 // possible use by the caller. In varargs or 64-bit calls, parameters are 5780 // assigned stack space in order, with padding so Altivec parameters are 5781 // 16-byte aligned. 5782 unsigned nAltivecParamsAtEnd = 0; 5783 for (unsigned i = 0; i != NumOps; ++i) { 5784 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5785 EVT ArgVT = Outs[i].VT; 5786 // Varargs Altivec parameters are padded to a 16 byte boundary. 5787 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5788 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5789 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5790 if (!isVarArg && !isPPC64) { 5791 // Non-varargs Altivec parameters go after all the non-Altivec 5792 // parameters; handle those later so we know how much padding we need. 5793 nAltivecParamsAtEnd++; 5794 continue; 5795 } 5796 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5797 NumBytes = ((NumBytes+15)/16)*16; 5798 } 5799 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5800 } 5801 5802 // Allow for Altivec parameters at the end, if needed. 5803 if (nAltivecParamsAtEnd) { 5804 NumBytes = ((NumBytes+15)/16)*16; 5805 NumBytes += 16*nAltivecParamsAtEnd; 5806 } 5807 5808 // The prolog code of the callee may store up to 8 GPR argument registers to 5809 // the stack, allowing va_start to index over them in memory if its varargs. 5810 // Because we cannot tell if this is needed on the caller side, we have to 5811 // conservatively assume that it is needed. As such, make sure we have at 5812 // least enough stack space for the caller to store the 8 GPRs. 5813 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5814 5815 // Tail call needs the stack to be aligned. 5816 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5817 CallConv == CallingConv::Fast) 5818 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5819 5820 // Calculate by how many bytes the stack has to be adjusted in case of tail 5821 // call optimization. 5822 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5823 5824 // To protect arguments on the stack from being clobbered in a tail call, 5825 // force all the loads to happen before doing any other lowering. 5826 if (isTailCall) 5827 Chain = DAG.getStackArgumentTokenFactor(Chain); 5828 5829 // Adjust the stack pointer for the new arguments... 5830 // These operations are automatically eliminated by the prolog/epilog pass 5831 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5832 dl); 5833 SDValue CallSeqStart = Chain; 5834 5835 // Load the return address and frame pointer so it can be move somewhere else 5836 // later. 5837 SDValue LROp, FPOp; 5838 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5839 5840 // Set up a copy of the stack pointer for use loading and storing any 5841 // arguments that may not fit in the registers available for argument 5842 // passing. 5843 SDValue StackPtr; 5844 if (isPPC64) 5845 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5846 else 5847 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5848 5849 // Figure out which arguments are going to go in registers, and which in 5850 // memory. Also, if this is a vararg function, floating point operations 5851 // must be stored to our stack, and loaded into integer regs as well, if 5852 // any integer regs are available for argument passing. 5853 unsigned ArgOffset = LinkageSize; 5854 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5855 5856 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5857 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5858 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5859 }; 5860 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5861 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5862 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5863 }; 5864 static const MCPhysReg VR[] = { 5865 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5866 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5867 }; 5868 const unsigned NumGPRs = array_lengthof(GPR_32); 5869 const unsigned NumFPRs = 13; 5870 const unsigned NumVRs = array_lengthof(VR); 5871 5872 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5873 5874 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5875 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5876 5877 SmallVector<SDValue, 8> MemOpChains; 5878 for (unsigned i = 0; i != NumOps; ++i) { 5879 SDValue Arg = OutVals[i]; 5880 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5881 5882 // PtrOff will be used to store the current argument to the stack if a 5883 // register cannot be found for it. 5884 SDValue PtrOff; 5885 5886 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5887 5888 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5889 5890 // On PPC64, promote integers to 64-bit values. 5891 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5892 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5893 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5894 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5895 } 5896 5897 // FIXME memcpy is used way more than necessary. Correctness first. 5898 // Note: "by value" is code for passing a structure by value, not 5899 // basic types. 5900 if (Flags.isByVal()) { 5901 unsigned Size = Flags.getByValSize(); 5902 // Very small objects are passed right-justified. Everything else is 5903 // passed left-justified. 5904 if (Size==1 || Size==2) { 5905 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5906 if (GPR_idx != NumGPRs) { 5907 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5908 MachinePointerInfo(), VT); 5909 MemOpChains.push_back(Load.getValue(1)); 5910 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5911 5912 ArgOffset += PtrByteSize; 5913 } else { 5914 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5915 PtrOff.getValueType()); 5916 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5917 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5918 CallSeqStart, 5919 Flags, DAG, dl); 5920 ArgOffset += PtrByteSize; 5921 } 5922 continue; 5923 } 5924 // Copy entire object into memory. There are cases where gcc-generated 5925 // code assumes it is there, even if it could be put entirely into 5926 // registers. (This is not what the doc says.) 5927 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5928 CallSeqStart, 5929 Flags, DAG, dl); 5930 5931 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5932 // copy the pieces of the object that fit into registers from the 5933 // parameter save area. 5934 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5935 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5936 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5937 if (GPR_idx != NumGPRs) { 5938 SDValue Load = 5939 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5940 MemOpChains.push_back(Load.getValue(1)); 5941 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5942 ArgOffset += PtrByteSize; 5943 } else { 5944 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5945 break; 5946 } 5947 } 5948 continue; 5949 } 5950 5951 switch (Arg.getSimpleValueType().SimpleTy) { 5952 default: llvm_unreachable("Unexpected ValueType for argument!"); 5953 case MVT::i1: 5954 case MVT::i32: 5955 case MVT::i64: 5956 if (GPR_idx != NumGPRs) { 5957 if (Arg.getValueType() == MVT::i1) 5958 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5959 5960 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5961 } else { 5962 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5963 isPPC64, isTailCall, false, MemOpChains, 5964 TailCallArguments, dl); 5965 } 5966 ArgOffset += PtrByteSize; 5967 break; 5968 case MVT::f32: 5969 case MVT::f64: 5970 if (FPR_idx != NumFPRs) { 5971 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5972 5973 if (isVarArg) { 5974 SDValue Store = 5975 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5976 MemOpChains.push_back(Store); 5977 5978 // Float varargs are always shadowed in available integer registers 5979 if (GPR_idx != NumGPRs) { 5980 SDValue Load = 5981 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 5982 MemOpChains.push_back(Load.getValue(1)); 5983 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5984 } 5985 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5986 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5987 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5988 SDValue Load = 5989 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 5990 MemOpChains.push_back(Load.getValue(1)); 5991 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5992 } 5993 } else { 5994 // If we have any FPRs remaining, we may also have GPRs remaining. 5995 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5996 // GPRs. 5997 if (GPR_idx != NumGPRs) 5998 ++GPR_idx; 5999 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6000 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6001 ++GPR_idx; 6002 } 6003 } else 6004 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6005 isPPC64, isTailCall, false, MemOpChains, 6006 TailCallArguments, dl); 6007 if (isPPC64) 6008 ArgOffset += 8; 6009 else 6010 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6011 break; 6012 case MVT::v4f32: 6013 case MVT::v4i32: 6014 case MVT::v8i16: 6015 case MVT::v16i8: 6016 if (isVarArg) { 6017 // These go aligned on the stack, or in the corresponding R registers 6018 // when within range. The Darwin PPC ABI doc claims they also go in 6019 // V registers; in fact gcc does this only for arguments that are 6020 // prototyped, not for those that match the ... We do it for all 6021 // arguments, seems to work. 6022 while (ArgOffset % 16 !=0) { 6023 ArgOffset += PtrByteSize; 6024 if (GPR_idx != NumGPRs) 6025 GPR_idx++; 6026 } 6027 // We could elide this store in the case where the object fits 6028 // entirely in R registers. Maybe later. 6029 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6030 DAG.getConstant(ArgOffset, dl, PtrVT)); 6031 SDValue Store = 6032 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6033 MemOpChains.push_back(Store); 6034 if (VR_idx != NumVRs) { 6035 SDValue Load = 6036 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6037 MemOpChains.push_back(Load.getValue(1)); 6038 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6039 } 6040 ArgOffset += 16; 6041 for (unsigned i=0; i<16; i+=PtrByteSize) { 6042 if (GPR_idx == NumGPRs) 6043 break; 6044 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6045 DAG.getConstant(i, dl, PtrVT)); 6046 SDValue Load = 6047 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6048 MemOpChains.push_back(Load.getValue(1)); 6049 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6050 } 6051 break; 6052 } 6053 6054 // Non-varargs Altivec params generally go in registers, but have 6055 // stack space allocated at the end. 6056 if (VR_idx != NumVRs) { 6057 // Doesn't have GPR space allocated. 6058 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6059 } else if (nAltivecParamsAtEnd==0) { 6060 // We are emitting Altivec params in order. 6061 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6062 isPPC64, isTailCall, true, MemOpChains, 6063 TailCallArguments, dl); 6064 ArgOffset += 16; 6065 } 6066 break; 6067 } 6068 } 6069 // If all Altivec parameters fit in registers, as they usually do, 6070 // they get stack space following the non-Altivec parameters. We 6071 // don't track this here because nobody below needs it. 6072 // If there are more Altivec parameters than fit in registers emit 6073 // the stores here. 6074 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6075 unsigned j = 0; 6076 // Offset is aligned; skip 1st 12 params which go in V registers. 6077 ArgOffset = ((ArgOffset+15)/16)*16; 6078 ArgOffset += 12*16; 6079 for (unsigned i = 0; i != NumOps; ++i) { 6080 SDValue Arg = OutVals[i]; 6081 EVT ArgType = Outs[i].VT; 6082 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6083 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6084 if (++j > NumVRs) { 6085 SDValue PtrOff; 6086 // We are emitting Altivec params in order. 6087 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6088 isPPC64, isTailCall, true, MemOpChains, 6089 TailCallArguments, dl); 6090 ArgOffset += 16; 6091 } 6092 } 6093 } 6094 } 6095 6096 if (!MemOpChains.empty()) 6097 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6098 6099 // On Darwin, R12 must contain the address of an indirect callee. This does 6100 // not mean the MTCTR instruction must use R12; it's easier to model this as 6101 // an extra parameter, so do that. 6102 if (!isTailCall && 6103 !isFunctionGlobalAddress(Callee) && 6104 !isa<ExternalSymbolSDNode>(Callee) && 6105 !isBLACompatibleAddress(Callee, DAG)) 6106 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6107 PPC::R12), Callee)); 6108 6109 // Build a sequence of copy-to-reg nodes chained together with token chain 6110 // and flag operands which copy the outgoing args into the appropriate regs. 6111 SDValue InFlag; 6112 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6113 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6114 RegsToPass[i].second, InFlag); 6115 InFlag = Chain.getValue(1); 6116 } 6117 6118 if (isTailCall) 6119 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6120 TailCallArguments); 6121 6122 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6123 /* unused except on PPC64 ELFv1 */ false, DAG, 6124 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6125 NumBytes, Ins, InVals, CS); 6126 } 6127 6128 bool 6129 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 6130 MachineFunction &MF, bool isVarArg, 6131 const SmallVectorImpl<ISD::OutputArg> &Outs, 6132 LLVMContext &Context) const { 6133 SmallVector<CCValAssign, 16> RVLocs; 6134 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6135 return CCInfo.CheckReturn(Outs, RetCC_PPC); 6136 } 6137 6138 SDValue 6139 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6140 bool isVarArg, 6141 const SmallVectorImpl<ISD::OutputArg> &Outs, 6142 const SmallVectorImpl<SDValue> &OutVals, 6143 const SDLoc &dl, SelectionDAG &DAG) const { 6144 SmallVector<CCValAssign, 16> RVLocs; 6145 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6146 *DAG.getContext()); 6147 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 6148 6149 SDValue Flag; 6150 SmallVector<SDValue, 4> RetOps(1, Chain); 6151 6152 // Copy the result values into the output registers. 6153 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6154 CCValAssign &VA = RVLocs[i]; 6155 assert(VA.isRegLoc() && "Can only return in registers!"); 6156 6157 SDValue Arg = OutVals[i]; 6158 6159 switch (VA.getLocInfo()) { 6160 default: llvm_unreachable("Unknown loc info!"); 6161 case CCValAssign::Full: break; 6162 case CCValAssign::AExt: 6163 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6164 break; 6165 case CCValAssign::ZExt: 6166 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6167 break; 6168 case CCValAssign::SExt: 6169 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6170 break; 6171 } 6172 6173 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6174 Flag = Chain.getValue(1); 6175 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6176 } 6177 6178 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6179 const MCPhysReg *I = 6180 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6181 if (I) { 6182 for (; *I; ++I) { 6183 6184 if (PPC::G8RCRegClass.contains(*I)) 6185 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6186 else if (PPC::F8RCRegClass.contains(*I)) 6187 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6188 else if (PPC::CRRCRegClass.contains(*I)) 6189 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6190 else if (PPC::VRRCRegClass.contains(*I)) 6191 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6192 else 6193 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6194 } 6195 } 6196 6197 RetOps[0] = Chain; // Update chain. 6198 6199 // Add the flag if we have it. 6200 if (Flag.getNode()) 6201 RetOps.push_back(Flag); 6202 6203 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6204 } 6205 6206 SDValue 6207 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6208 SelectionDAG &DAG) const { 6209 SDLoc dl(Op); 6210 6211 // Get the corect type for integers. 6212 EVT IntVT = Op.getValueType(); 6213 6214 // Get the inputs. 6215 SDValue Chain = Op.getOperand(0); 6216 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6217 // Build a DYNAREAOFFSET node. 6218 SDValue Ops[2] = {Chain, FPSIdx}; 6219 SDVTList VTs = DAG.getVTList(IntVT); 6220 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6221 } 6222 6223 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6224 SelectionDAG &DAG) const { 6225 // When we pop the dynamic allocation we need to restore the SP link. 6226 SDLoc dl(Op); 6227 6228 // Get the corect type for pointers. 6229 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6230 6231 // Construct the stack pointer operand. 6232 bool isPPC64 = Subtarget.isPPC64(); 6233 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6234 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6235 6236 // Get the operands for the STACKRESTORE. 6237 SDValue Chain = Op.getOperand(0); 6238 SDValue SaveSP = Op.getOperand(1); 6239 6240 // Load the old link SP. 6241 SDValue LoadLinkSP = 6242 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6243 6244 // Restore the stack pointer. 6245 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6246 6247 // Store the old link SP. 6248 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6249 } 6250 6251 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6252 MachineFunction &MF = DAG.getMachineFunction(); 6253 bool isPPC64 = Subtarget.isPPC64(); 6254 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6255 6256 // Get current frame pointer save index. The users of this index will be 6257 // primarily DYNALLOC instructions. 6258 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6259 int RASI = FI->getReturnAddrSaveIndex(); 6260 6261 // If the frame pointer save index hasn't been defined yet. 6262 if (!RASI) { 6263 // Find out what the fix offset of the frame pointer save area. 6264 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6265 // Allocate the frame index for frame pointer save area. 6266 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6267 // Save the result. 6268 FI->setReturnAddrSaveIndex(RASI); 6269 } 6270 return DAG.getFrameIndex(RASI, PtrVT); 6271 } 6272 6273 SDValue 6274 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6275 MachineFunction &MF = DAG.getMachineFunction(); 6276 bool isPPC64 = Subtarget.isPPC64(); 6277 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6278 6279 // Get current frame pointer save index. The users of this index will be 6280 // primarily DYNALLOC instructions. 6281 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6282 int FPSI = FI->getFramePointerSaveIndex(); 6283 6284 // If the frame pointer save index hasn't been defined yet. 6285 if (!FPSI) { 6286 // Find out what the fix offset of the frame pointer save area. 6287 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6288 // Allocate the frame index for frame pointer save area. 6289 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6290 // Save the result. 6291 FI->setFramePointerSaveIndex(FPSI); 6292 } 6293 return DAG.getFrameIndex(FPSI, PtrVT); 6294 } 6295 6296 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6297 SelectionDAG &DAG) const { 6298 // Get the inputs. 6299 SDValue Chain = Op.getOperand(0); 6300 SDValue Size = Op.getOperand(1); 6301 SDLoc dl(Op); 6302 6303 // Get the corect type for pointers. 6304 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6305 // Negate the size. 6306 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6307 DAG.getConstant(0, dl, PtrVT), Size); 6308 // Construct a node for the frame pointer save index. 6309 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6310 // Build a DYNALLOC node. 6311 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6312 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6313 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6314 } 6315 6316 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 6317 SelectionDAG &DAG) const { 6318 MachineFunction &MF = DAG.getMachineFunction(); 6319 6320 bool isPPC64 = Subtarget.isPPC64(); 6321 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6322 6323 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 6324 return DAG.getFrameIndex(FI, PtrVT); 6325 } 6326 6327 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6328 SelectionDAG &DAG) const { 6329 SDLoc DL(Op); 6330 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6331 DAG.getVTList(MVT::i32, MVT::Other), 6332 Op.getOperand(0), Op.getOperand(1)); 6333 } 6334 6335 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6336 SelectionDAG &DAG) const { 6337 SDLoc DL(Op); 6338 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6339 Op.getOperand(0), Op.getOperand(1)); 6340 } 6341 6342 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6343 if (Op.getValueType().isVector()) 6344 return LowerVectorLoad(Op, DAG); 6345 6346 assert(Op.getValueType() == MVT::i1 && 6347 "Custom lowering only for i1 loads"); 6348 6349 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6350 6351 SDLoc dl(Op); 6352 LoadSDNode *LD = cast<LoadSDNode>(Op); 6353 6354 SDValue Chain = LD->getChain(); 6355 SDValue BasePtr = LD->getBasePtr(); 6356 MachineMemOperand *MMO = LD->getMemOperand(); 6357 6358 SDValue NewLD = 6359 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6360 BasePtr, MVT::i8, MMO); 6361 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6362 6363 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6364 return DAG.getMergeValues(Ops, dl); 6365 } 6366 6367 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6368 if (Op.getOperand(1).getValueType().isVector()) 6369 return LowerVectorStore(Op, DAG); 6370 6371 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6372 "Custom lowering only for i1 stores"); 6373 6374 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6375 6376 SDLoc dl(Op); 6377 StoreSDNode *ST = cast<StoreSDNode>(Op); 6378 6379 SDValue Chain = ST->getChain(); 6380 SDValue BasePtr = ST->getBasePtr(); 6381 SDValue Value = ST->getValue(); 6382 MachineMemOperand *MMO = ST->getMemOperand(); 6383 6384 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6385 Value); 6386 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6387 } 6388 6389 // FIXME: Remove this once the ANDI glue bug is fixed: 6390 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6391 assert(Op.getValueType() == MVT::i1 && 6392 "Custom lowering only for i1 results"); 6393 6394 SDLoc DL(Op); 6395 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6396 Op.getOperand(0)); 6397 } 6398 6399 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6400 /// possible. 6401 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6402 // Not FP? Not a fsel. 6403 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6404 !Op.getOperand(2).getValueType().isFloatingPoint()) 6405 return Op; 6406 6407 // We might be able to do better than this under some circumstances, but in 6408 // general, fsel-based lowering of select is a finite-math-only optimization. 6409 // For more information, see section F.3 of the 2.06 ISA specification. 6410 if (!DAG.getTarget().Options.NoInfsFPMath || 6411 !DAG.getTarget().Options.NoNaNsFPMath) 6412 return Op; 6413 // TODO: Propagate flags from the select rather than global settings. 6414 SDNodeFlags Flags; 6415 Flags.setNoInfs(true); 6416 Flags.setNoNaNs(true); 6417 6418 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6419 6420 EVT ResVT = Op.getValueType(); 6421 EVT CmpVT = Op.getOperand(0).getValueType(); 6422 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6423 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6424 SDLoc dl(Op); 6425 6426 // If the RHS of the comparison is a 0.0, we don't need to do the 6427 // subtraction at all. 6428 SDValue Sel1; 6429 if (isFloatingPointZero(RHS)) 6430 switch (CC) { 6431 default: break; // SETUO etc aren't handled by fsel. 6432 case ISD::SETNE: 6433 std::swap(TV, FV); 6434 case ISD::SETEQ: 6435 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6436 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6437 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6438 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6439 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6440 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6441 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6442 case ISD::SETULT: 6443 case ISD::SETLT: 6444 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6445 case ISD::SETOGE: 6446 case ISD::SETGE: 6447 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6448 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6449 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6450 case ISD::SETUGT: 6451 case ISD::SETGT: 6452 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6453 case ISD::SETOLE: 6454 case ISD::SETLE: 6455 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6456 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6457 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6458 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6459 } 6460 6461 SDValue Cmp; 6462 switch (CC) { 6463 default: break; // SETUO etc aren't handled by fsel. 6464 case ISD::SETNE: 6465 std::swap(TV, FV); 6466 case ISD::SETEQ: 6467 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6468 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6469 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6470 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6471 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6472 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6473 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6474 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6475 case ISD::SETULT: 6476 case ISD::SETLT: 6477 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6478 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6479 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6480 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6481 case ISD::SETOGE: 6482 case ISD::SETGE: 6483 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 6484 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6485 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6486 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6487 case ISD::SETUGT: 6488 case ISD::SETGT: 6489 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6490 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6491 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6492 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6493 case ISD::SETOLE: 6494 case ISD::SETLE: 6495 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 6496 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6497 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6498 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6499 } 6500 return Op; 6501 } 6502 6503 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6504 SelectionDAG &DAG, 6505 const SDLoc &dl) const { 6506 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6507 SDValue Src = Op.getOperand(0); 6508 if (Src.getValueType() == MVT::f32) 6509 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6510 6511 SDValue Tmp; 6512 switch (Op.getSimpleValueType().SimpleTy) { 6513 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6514 case MVT::i32: 6515 Tmp = DAG.getNode( 6516 Op.getOpcode() == ISD::FP_TO_SINT 6517 ? PPCISD::FCTIWZ 6518 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6519 dl, MVT::f64, Src); 6520 break; 6521 case MVT::i64: 6522 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6523 "i64 FP_TO_UINT is supported only with FPCVT"); 6524 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6525 PPCISD::FCTIDUZ, 6526 dl, MVT::f64, Src); 6527 break; 6528 } 6529 6530 // Convert the FP value to an int value through memory. 6531 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6532 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6533 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6534 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6535 MachinePointerInfo MPI = 6536 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6537 6538 // Emit a store to the stack slot. 6539 SDValue Chain; 6540 if (i32Stack) { 6541 MachineFunction &MF = DAG.getMachineFunction(); 6542 MachineMemOperand *MMO = 6543 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6544 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6545 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6546 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6547 } else 6548 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 6549 6550 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6551 // add in a bias on big endian. 6552 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6553 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6554 DAG.getConstant(4, dl, FIPtr.getValueType())); 6555 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6556 } 6557 6558 RLI.Chain = Chain; 6559 RLI.Ptr = FIPtr; 6560 RLI.MPI = MPI; 6561 } 6562 6563 /// \brief Custom lowers floating point to integer conversions to use 6564 /// the direct move instructions available in ISA 2.07 to avoid the 6565 /// need for load/store combinations. 6566 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6567 SelectionDAG &DAG, 6568 const SDLoc &dl) const { 6569 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6570 SDValue Src = Op.getOperand(0); 6571 6572 if (Src.getValueType() == MVT::f32) 6573 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6574 6575 SDValue Tmp; 6576 switch (Op.getSimpleValueType().SimpleTy) { 6577 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6578 case MVT::i32: 6579 Tmp = DAG.getNode( 6580 Op.getOpcode() == ISD::FP_TO_SINT 6581 ? PPCISD::FCTIWZ 6582 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6583 dl, MVT::f64, Src); 6584 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6585 break; 6586 case MVT::i64: 6587 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6588 "i64 FP_TO_UINT is supported only with FPCVT"); 6589 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6590 PPCISD::FCTIDUZ, 6591 dl, MVT::f64, Src); 6592 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6593 break; 6594 } 6595 return Tmp; 6596 } 6597 6598 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6599 const SDLoc &dl) const { 6600 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6601 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6602 6603 ReuseLoadInfo RLI; 6604 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6605 6606 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 6607 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 6608 } 6609 6610 // We're trying to insert a regular store, S, and then a load, L. If the 6611 // incoming value, O, is a load, we might just be able to have our load use the 6612 // address used by O. However, we don't know if anything else will store to 6613 // that address before we can load from it. To prevent this situation, we need 6614 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6615 // the same chain operand as O, we create a token factor from the chain results 6616 // of O and L, and we replace all uses of O's chain result with that token 6617 // factor (see spliceIntoChain below for this last part). 6618 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6619 ReuseLoadInfo &RLI, 6620 SelectionDAG &DAG, 6621 ISD::LoadExtType ET) const { 6622 SDLoc dl(Op); 6623 if (ET == ISD::NON_EXTLOAD && 6624 (Op.getOpcode() == ISD::FP_TO_UINT || 6625 Op.getOpcode() == ISD::FP_TO_SINT) && 6626 isOperationLegalOrCustom(Op.getOpcode(), 6627 Op.getOperand(0).getValueType())) { 6628 6629 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6630 return true; 6631 } 6632 6633 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6634 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6635 LD->isNonTemporal()) 6636 return false; 6637 if (LD->getMemoryVT() != MemVT) 6638 return false; 6639 6640 RLI.Ptr = LD->getBasePtr(); 6641 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 6642 assert(LD->getAddressingMode() == ISD::PRE_INC && 6643 "Non-pre-inc AM on PPC?"); 6644 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6645 LD->getOffset()); 6646 } 6647 6648 RLI.Chain = LD->getChain(); 6649 RLI.MPI = LD->getPointerInfo(); 6650 RLI.IsDereferenceable = LD->isDereferenceable(); 6651 RLI.IsInvariant = LD->isInvariant(); 6652 RLI.Alignment = LD->getAlignment(); 6653 RLI.AAInfo = LD->getAAInfo(); 6654 RLI.Ranges = LD->getRanges(); 6655 6656 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6657 return true; 6658 } 6659 6660 // Given the head of the old chain, ResChain, insert a token factor containing 6661 // it and NewResChain, and make users of ResChain now be users of that token 6662 // factor. 6663 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6664 SDValue NewResChain, 6665 SelectionDAG &DAG) const { 6666 if (!ResChain) 6667 return; 6668 6669 SDLoc dl(NewResChain); 6670 6671 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6672 NewResChain, DAG.getUNDEF(MVT::Other)); 6673 assert(TF.getNode() != NewResChain.getNode() && 6674 "A new TF really is required here"); 6675 6676 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6677 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6678 } 6679 6680 /// \brief Analyze profitability of direct move 6681 /// prefer float load to int load plus direct move 6682 /// when there is no integer use of int load 6683 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 6684 SDNode *Origin = Op.getOperand(0).getNode(); 6685 if (Origin->getOpcode() != ISD::LOAD) 6686 return true; 6687 6688 // If there is no LXSIBZX/LXSIHZX, like Power8, 6689 // prefer direct move if the memory size is 1 or 2 bytes. 6690 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 6691 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 6692 return true; 6693 6694 for (SDNode::use_iterator UI = Origin->use_begin(), 6695 UE = Origin->use_end(); 6696 UI != UE; ++UI) { 6697 6698 // Only look at the users of the loaded value. 6699 if (UI.getUse().get().getResNo() != 0) 6700 continue; 6701 6702 if (UI->getOpcode() != ISD::SINT_TO_FP && 6703 UI->getOpcode() != ISD::UINT_TO_FP) 6704 return true; 6705 } 6706 6707 return false; 6708 } 6709 6710 /// \brief Custom lowers integer to floating point conversions to use 6711 /// the direct move instructions available in ISA 2.07 to avoid the 6712 /// need for load/store combinations. 6713 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6714 SelectionDAG &DAG, 6715 const SDLoc &dl) const { 6716 assert((Op.getValueType() == MVT::f32 || 6717 Op.getValueType() == MVT::f64) && 6718 "Invalid floating point type as target of conversion"); 6719 assert(Subtarget.hasFPCVT() && 6720 "Int to FP conversions with direct moves require FPCVT"); 6721 SDValue FP; 6722 SDValue Src = Op.getOperand(0); 6723 bool SinglePrec = Op.getValueType() == MVT::f32; 6724 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6725 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6726 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6727 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6728 6729 if (WordInt) { 6730 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6731 dl, MVT::f64, Src); 6732 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6733 } 6734 else { 6735 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6736 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6737 } 6738 6739 return FP; 6740 } 6741 6742 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6743 SelectionDAG &DAG) const { 6744 SDLoc dl(Op); 6745 6746 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6747 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6748 return SDValue(); 6749 6750 SDValue Value = Op.getOperand(0); 6751 // The values are now known to be -1 (false) or 1 (true). To convert this 6752 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6753 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6754 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6755 6756 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 6757 6758 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6759 6760 if (Op.getValueType() != MVT::v4f64) 6761 Value = DAG.getNode(ISD::FP_ROUND, dl, 6762 Op.getValueType(), Value, 6763 DAG.getIntPtrConstant(1, dl)); 6764 return Value; 6765 } 6766 6767 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6768 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6769 return SDValue(); 6770 6771 if (Op.getOperand(0).getValueType() == MVT::i1) 6772 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6773 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6774 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6775 6776 // If we have direct moves, we can do all the conversion, skip the store/load 6777 // however, without FPCVT we can't do most conversions. 6778 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 6779 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6780 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6781 6782 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6783 "UINT_TO_FP is supported only with FPCVT"); 6784 6785 // If we have FCFIDS, then use it when converting to single-precision. 6786 // Otherwise, convert to double-precision and then round. 6787 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6788 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6789 : PPCISD::FCFIDS) 6790 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6791 : PPCISD::FCFID); 6792 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6793 ? MVT::f32 6794 : MVT::f64; 6795 6796 if (Op.getOperand(0).getValueType() == MVT::i64) { 6797 SDValue SINT = Op.getOperand(0); 6798 // When converting to single-precision, we actually need to convert 6799 // to double-precision first and then round to single-precision. 6800 // To avoid double-rounding effects during that operation, we have 6801 // to prepare the input operand. Bits that might be truncated when 6802 // converting to double-precision are replaced by a bit that won't 6803 // be lost at this stage, but is below the single-precision rounding 6804 // position. 6805 // 6806 // However, if -enable-unsafe-fp-math is in effect, accept double 6807 // rounding to avoid the extra overhead. 6808 if (Op.getValueType() == MVT::f32 && 6809 !Subtarget.hasFPCVT() && 6810 !DAG.getTarget().Options.UnsafeFPMath) { 6811 6812 // Twiddle input to make sure the low 11 bits are zero. (If this 6813 // is the case, we are guaranteed the value will fit into the 53 bit 6814 // mantissa of an IEEE double-precision value without rounding.) 6815 // If any of those low 11 bits were not zero originally, make sure 6816 // bit 12 (value 2048) is set instead, so that the final rounding 6817 // to single-precision gets the correct result. 6818 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6819 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6820 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6821 Round, DAG.getConstant(2047, dl, MVT::i64)); 6822 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6823 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6824 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6825 6826 // However, we cannot use that value unconditionally: if the magnitude 6827 // of the input value is small, the bit-twiddling we did above might 6828 // end up visibly changing the output. Fortunately, in that case, we 6829 // don't need to twiddle bits since the original input will convert 6830 // exactly to double-precision floating-point already. Therefore, 6831 // construct a conditional to use the original value if the top 11 6832 // bits are all sign-bit copies, and use the rounded value computed 6833 // above otherwise. 6834 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6835 SINT, DAG.getConstant(53, dl, MVT::i32)); 6836 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6837 Cond, DAG.getConstant(1, dl, MVT::i64)); 6838 Cond = DAG.getSetCC(dl, MVT::i32, 6839 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6840 6841 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6842 } 6843 6844 ReuseLoadInfo RLI; 6845 SDValue Bits; 6846 6847 MachineFunction &MF = DAG.getMachineFunction(); 6848 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6849 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 6850 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 6851 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6852 } else if (Subtarget.hasLFIWAX() && 6853 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6854 MachineMemOperand *MMO = 6855 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6856 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6857 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6858 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6859 DAG.getVTList(MVT::f64, MVT::Other), 6860 Ops, MVT::i32, MMO); 6861 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6862 } else if (Subtarget.hasFPCVT() && 6863 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6864 MachineMemOperand *MMO = 6865 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6866 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6867 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6868 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6869 DAG.getVTList(MVT::f64, MVT::Other), 6870 Ops, MVT::i32, MMO); 6871 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6872 } else if (((Subtarget.hasLFIWAX() && 6873 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6874 (Subtarget.hasFPCVT() && 6875 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6876 SINT.getOperand(0).getValueType() == MVT::i32) { 6877 MachineFrameInfo &MFI = MF.getFrameInfo(); 6878 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6879 6880 int FrameIdx = MFI.CreateStackObject(4, 4, false); 6881 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6882 6883 SDValue Store = 6884 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6885 MachinePointerInfo::getFixedStack( 6886 DAG.getMachineFunction(), FrameIdx)); 6887 6888 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6889 "Expected an i32 store"); 6890 6891 RLI.Ptr = FIdx; 6892 RLI.Chain = Store; 6893 RLI.MPI = 6894 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6895 RLI.Alignment = 4; 6896 6897 MachineMemOperand *MMO = 6898 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6899 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6900 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6901 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6902 PPCISD::LFIWZX : PPCISD::LFIWAX, 6903 dl, DAG.getVTList(MVT::f64, MVT::Other), 6904 Ops, MVT::i32, MMO); 6905 } else 6906 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6907 6908 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6909 6910 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6911 FP = DAG.getNode(ISD::FP_ROUND, dl, 6912 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 6913 return FP; 6914 } 6915 6916 assert(Op.getOperand(0).getValueType() == MVT::i32 && 6917 "Unhandled INT_TO_FP type in custom expander!"); 6918 // Since we only generate this in 64-bit mode, we can take advantage of 6919 // 64-bit registers. In particular, sign extend the input value into the 6920 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 6921 // then lfd it and fcfid it. 6922 MachineFunction &MF = DAG.getMachineFunction(); 6923 MachineFrameInfo &MFI = MF.getFrameInfo(); 6924 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6925 6926 SDValue Ld; 6927 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 6928 ReuseLoadInfo RLI; 6929 bool ReusingLoad; 6930 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 6931 DAG))) { 6932 int FrameIdx = MFI.CreateStackObject(4, 4, false); 6933 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6934 6935 SDValue Store = 6936 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 6937 MachinePointerInfo::getFixedStack( 6938 DAG.getMachineFunction(), FrameIdx)); 6939 6940 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6941 "Expected an i32 store"); 6942 6943 RLI.Ptr = FIdx; 6944 RLI.Chain = Store; 6945 RLI.MPI = 6946 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6947 RLI.Alignment = 4; 6948 } 6949 6950 MachineMemOperand *MMO = 6951 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6952 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6953 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6954 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 6955 PPCISD::LFIWZX : PPCISD::LFIWAX, 6956 dl, DAG.getVTList(MVT::f64, MVT::Other), 6957 Ops, MVT::i32, MMO); 6958 if (ReusingLoad) 6959 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 6960 } else { 6961 assert(Subtarget.isPPC64() && 6962 "i32->FP without LFIWAX supported only on PPC64"); 6963 6964 int FrameIdx = MFI.CreateStackObject(8, 8, false); 6965 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6966 6967 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 6968 Op.getOperand(0)); 6969 6970 // STD the extended value into the stack slot. 6971 SDValue Store = DAG.getStore( 6972 DAG.getEntryNode(), dl, Ext64, FIdx, 6973 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 6974 6975 // Load the value as a double. 6976 Ld = DAG.getLoad( 6977 MVT::f64, dl, Store, FIdx, 6978 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 6979 } 6980 6981 // FCFID it and return it. 6982 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 6983 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6984 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 6985 DAG.getIntPtrConstant(0, dl)); 6986 return FP; 6987 } 6988 6989 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6990 SelectionDAG &DAG) const { 6991 SDLoc dl(Op); 6992 /* 6993 The rounding mode is in bits 30:31 of FPSR, and has the following 6994 settings: 6995 00 Round to nearest 6996 01 Round to 0 6997 10 Round to +inf 6998 11 Round to -inf 6999 7000 FLT_ROUNDS, on the other hand, expects the following: 7001 -1 Undefined 7002 0 Round to 0 7003 1 Round to nearest 7004 2 Round to +inf 7005 3 Round to -inf 7006 7007 To perform the conversion, we do: 7008 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 7009 */ 7010 7011 MachineFunction &MF = DAG.getMachineFunction(); 7012 EVT VT = Op.getValueType(); 7013 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7014 7015 // Save FP Control Word to register 7016 EVT NodeTys[] = { 7017 MVT::f64, // return register 7018 MVT::Glue // unused in this context 7019 }; 7020 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 7021 7022 // Save FP register to stack slot 7023 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 7024 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 7025 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 7026 MachinePointerInfo()); 7027 7028 // Load FP Control Word from low 32 bits of stack slot. 7029 SDValue Four = DAG.getConstant(4, dl, PtrVT); 7030 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 7031 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 7032 7033 // Transform as necessary 7034 SDValue CWD1 = 7035 DAG.getNode(ISD::AND, dl, MVT::i32, 7036 CWD, DAG.getConstant(3, dl, MVT::i32)); 7037 SDValue CWD2 = 7038 DAG.getNode(ISD::SRL, dl, MVT::i32, 7039 DAG.getNode(ISD::AND, dl, MVT::i32, 7040 DAG.getNode(ISD::XOR, dl, MVT::i32, 7041 CWD, DAG.getConstant(3, dl, MVT::i32)), 7042 DAG.getConstant(3, dl, MVT::i32)), 7043 DAG.getConstant(1, dl, MVT::i32)); 7044 7045 SDValue RetVal = 7046 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 7047 7048 return DAG.getNode((VT.getSizeInBits() < 16 ? 7049 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 7050 } 7051 7052 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7053 EVT VT = Op.getValueType(); 7054 unsigned BitWidth = VT.getSizeInBits(); 7055 SDLoc dl(Op); 7056 assert(Op.getNumOperands() == 3 && 7057 VT == Op.getOperand(1).getValueType() && 7058 "Unexpected SHL!"); 7059 7060 // Expand into a bunch of logical ops. Note that these ops 7061 // depend on the PPC behavior for oversized shift amounts. 7062 SDValue Lo = Op.getOperand(0); 7063 SDValue Hi = Op.getOperand(1); 7064 SDValue Amt = Op.getOperand(2); 7065 EVT AmtVT = Amt.getValueType(); 7066 7067 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7068 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7069 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 7070 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 7071 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 7072 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7073 DAG.getConstant(-BitWidth, dl, AmtVT)); 7074 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 7075 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7076 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 7077 SDValue OutOps[] = { OutLo, OutHi }; 7078 return DAG.getMergeValues(OutOps, dl); 7079 } 7080 7081 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 7082 EVT VT = Op.getValueType(); 7083 SDLoc dl(Op); 7084 unsigned BitWidth = VT.getSizeInBits(); 7085 assert(Op.getNumOperands() == 3 && 7086 VT == Op.getOperand(1).getValueType() && 7087 "Unexpected SRL!"); 7088 7089 // Expand into a bunch of logical ops. Note that these ops 7090 // depend on the PPC behavior for oversized shift amounts. 7091 SDValue Lo = Op.getOperand(0); 7092 SDValue Hi = Op.getOperand(1); 7093 SDValue Amt = Op.getOperand(2); 7094 EVT AmtVT = Amt.getValueType(); 7095 7096 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7097 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7098 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7099 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7100 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7101 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7102 DAG.getConstant(-BitWidth, dl, AmtVT)); 7103 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 7104 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 7105 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 7106 SDValue OutOps[] = { OutLo, OutHi }; 7107 return DAG.getMergeValues(OutOps, dl); 7108 } 7109 7110 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 7111 SDLoc dl(Op); 7112 EVT VT = Op.getValueType(); 7113 unsigned BitWidth = VT.getSizeInBits(); 7114 assert(Op.getNumOperands() == 3 && 7115 VT == Op.getOperand(1).getValueType() && 7116 "Unexpected SRA!"); 7117 7118 // Expand into a bunch of logical ops, followed by a select_cc. 7119 SDValue Lo = Op.getOperand(0); 7120 SDValue Hi = Op.getOperand(1); 7121 SDValue Amt = Op.getOperand(2); 7122 EVT AmtVT = Amt.getValueType(); 7123 7124 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 7125 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 7126 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 7127 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 7128 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7129 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 7130 DAG.getConstant(-BitWidth, dl, AmtVT)); 7131 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 7132 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 7133 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 7134 Tmp4, Tmp6, ISD::SETLE); 7135 SDValue OutOps[] = { OutLo, OutHi }; 7136 return DAG.getMergeValues(OutOps, dl); 7137 } 7138 7139 //===----------------------------------------------------------------------===// 7140 // Vector related lowering. 7141 // 7142 7143 /// BuildSplatI - Build a canonical splati of Val with an element size of 7144 /// SplatSize. Cast the result to VT. 7145 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7146 SelectionDAG &DAG, const SDLoc &dl) { 7147 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7148 7149 static const MVT VTys[] = { // canonical VT to use for each size. 7150 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7151 }; 7152 7153 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7154 7155 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7156 if (Val == -1) 7157 SplatSize = 1; 7158 7159 EVT CanonicalVT = VTys[SplatSize-1]; 7160 7161 // Build a canonical splat for this value. 7162 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7163 } 7164 7165 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7166 /// specified intrinsic ID. 7167 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7168 const SDLoc &dl, EVT DestVT = MVT::Other) { 7169 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7170 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7171 DAG.getConstant(IID, dl, MVT::i32), Op); 7172 } 7173 7174 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7175 /// specified intrinsic ID. 7176 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7177 SelectionDAG &DAG, const SDLoc &dl, 7178 EVT DestVT = MVT::Other) { 7179 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7180 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7181 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7182 } 7183 7184 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7185 /// specified intrinsic ID. 7186 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7187 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7188 EVT DestVT = MVT::Other) { 7189 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7190 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7191 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7192 } 7193 7194 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7195 /// amount. The result has the specified value type. 7196 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7197 SelectionDAG &DAG, const SDLoc &dl) { 7198 // Force LHS/RHS to be the right type. 7199 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7200 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7201 7202 int Ops[16]; 7203 for (unsigned i = 0; i != 16; ++i) 7204 Ops[i] = i + Amt; 7205 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7206 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7207 } 7208 7209 /// Do we have an efficient pattern in a .td file for this node? 7210 /// 7211 /// \param V - pointer to the BuildVectorSDNode being matched 7212 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 7213 /// 7214 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 7215 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 7216 /// the opposite is true (expansion is beneficial) are: 7217 /// - The node builds a vector out of integers that are not 32 or 64-bits 7218 /// - The node builds a vector out of constants 7219 /// - The node is a "load-and-splat" 7220 /// In all other cases, we will choose to keep the BUILD_VECTOR. 7221 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 7222 bool HasDirectMove) { 7223 EVT VecVT = V->getValueType(0); 7224 bool RightType = VecVT == MVT::v2f64 || VecVT == MVT::v4f32 || 7225 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 7226 if (!RightType) 7227 return false; 7228 7229 bool IsSplat = true; 7230 bool IsLoad = false; 7231 SDValue Op0 = V->getOperand(0); 7232 7233 // This function is called in a block that confirms the node is not a constant 7234 // splat. So a constant BUILD_VECTOR here means the vector is built out of 7235 // different constants. 7236 if (V->isConstant()) 7237 return false; 7238 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 7239 if (V->getOperand(i).isUndef()) 7240 return false; 7241 // We want to expand nodes that represent load-and-splat even if the 7242 // loaded value is a floating point truncation or conversion to int. 7243 if (V->getOperand(i).getOpcode() == ISD::LOAD || 7244 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 7245 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7246 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 7247 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 7248 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 7249 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 7250 IsLoad = true; 7251 // If the operands are different or the input is not a load and has more 7252 // uses than just this BV node, then it isn't a splat. 7253 if (V->getOperand(i) != Op0 || 7254 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 7255 IsSplat = false; 7256 } 7257 return !(IsSplat && IsLoad); 7258 } 7259 7260 // If this is a case we can't handle, return null and let the default 7261 // expansion code take care of it. If we CAN select this case, and if it 7262 // selects to a single instruction, return Op. Otherwise, if we can codegen 7263 // this case more efficiently than a constant pool load, lower it to the 7264 // sequence of ops that should be used. 7265 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7266 SelectionDAG &DAG) const { 7267 SDLoc dl(Op); 7268 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7269 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7270 7271 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7272 // We first build an i32 vector, load it into a QPX register, 7273 // then convert it to a floating-point vector and compare it 7274 // to a zero vector to get the boolean result. 7275 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7276 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7277 MachinePointerInfo PtrInfo = 7278 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7279 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7280 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7281 7282 assert(BVN->getNumOperands() == 4 && 7283 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7284 7285 bool IsConst = true; 7286 for (unsigned i = 0; i < 4; ++i) { 7287 if (BVN->getOperand(i).isUndef()) continue; 7288 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7289 IsConst = false; 7290 break; 7291 } 7292 } 7293 7294 if (IsConst) { 7295 Constant *One = 7296 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7297 Constant *NegOne = 7298 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7299 7300 Constant *CV[4]; 7301 for (unsigned i = 0; i < 4; ++i) { 7302 if (BVN->getOperand(i).isUndef()) 7303 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7304 else if (isNullConstant(BVN->getOperand(i))) 7305 CV[i] = NegOne; 7306 else 7307 CV[i] = One; 7308 } 7309 7310 Constant *CP = ConstantVector::get(CV); 7311 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7312 16 /* alignment */); 7313 7314 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7315 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7316 return DAG.getMemIntrinsicNode( 7317 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7318 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7319 } 7320 7321 SmallVector<SDValue, 4> Stores; 7322 for (unsigned i = 0; i < 4; ++i) { 7323 if (BVN->getOperand(i).isUndef()) continue; 7324 7325 unsigned Offset = 4*i; 7326 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7327 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7328 7329 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7330 if (StoreSize > 4) { 7331 Stores.push_back( 7332 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7333 PtrInfo.getWithOffset(Offset), MVT::i32)); 7334 } else { 7335 SDValue StoreValue = BVN->getOperand(i); 7336 if (StoreSize < 4) 7337 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7338 7339 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7340 PtrInfo.getWithOffset(Offset))); 7341 } 7342 } 7343 7344 SDValue StoreChain; 7345 if (!Stores.empty()) 7346 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7347 else 7348 StoreChain = DAG.getEntryNode(); 7349 7350 // Now load from v4i32 into the QPX register; this will extend it to 7351 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7352 // is typed as v4f64 because the QPX register integer states are not 7353 // explicitly represented. 7354 7355 SDValue Ops[] = {StoreChain, 7356 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7357 FIdx}; 7358 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7359 7360 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7361 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7362 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7363 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7364 LoadedVect); 7365 7366 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7367 7368 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7369 } 7370 7371 // All other QPX vectors are handled by generic code. 7372 if (Subtarget.hasQPX()) 7373 return SDValue(); 7374 7375 // Check if this is a splat of a constant value. 7376 APInt APSplatBits, APSplatUndef; 7377 unsigned SplatBitSize; 7378 bool HasAnyUndefs; 7379 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7380 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7381 SplatBitSize > 32) { 7382 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 7383 // lowered to VSX instructions under certain conditions. 7384 // Without VSX, there is no pattern more efficient than expanding the node. 7385 if (Subtarget.hasVSX() && 7386 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove())) 7387 return Op; 7388 return SDValue(); 7389 } 7390 7391 unsigned SplatBits = APSplatBits.getZExtValue(); 7392 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7393 unsigned SplatSize = SplatBitSize / 8; 7394 7395 // First, handle single instruction cases. 7396 7397 // All zeros? 7398 if (SplatBits == 0) { 7399 // Canonicalize all zero vectors to be v4i32. 7400 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7401 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7402 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7403 } 7404 return Op; 7405 } 7406 7407 // We have XXSPLTIB for constant splats one byte wide 7408 if (Subtarget.hasP9Vector() && SplatSize == 1) { 7409 // This is a splat of 1-byte elements with some elements potentially undef. 7410 // Rather than trying to match undef in the SDAG patterns, ensure that all 7411 // elements are the same constant. 7412 if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) { 7413 SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits, 7414 dl, MVT::i32)); 7415 SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops); 7416 if (Op.getValueType() != MVT::v16i8) 7417 return DAG.getBitcast(Op.getValueType(), NewBV); 7418 return NewBV; 7419 } 7420 return Op; 7421 } 7422 7423 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7424 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7425 (32-SplatBitSize)); 7426 if (SextVal >= -16 && SextVal <= 15) 7427 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7428 7429 // Two instruction sequences. 7430 7431 // If this value is in the range [-32,30] and is even, use: 7432 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7433 // If this value is in the range [17,31] and is odd, use: 7434 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7435 // If this value is in the range [-31,-17] and is odd, use: 7436 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7437 // Note the last two are three-instruction sequences. 7438 if (SextVal >= -32 && SextVal <= 31) { 7439 // To avoid having these optimizations undone by constant folding, 7440 // we convert to a pseudo that will be expanded later into one of 7441 // the above forms. 7442 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7443 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7444 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7445 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7446 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7447 if (VT == Op.getValueType()) 7448 return RetVal; 7449 else 7450 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7451 } 7452 7453 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7454 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7455 // for fneg/fabs. 7456 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7457 // Make -1 and vspltisw -1: 7458 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7459 7460 // Make the VSLW intrinsic, computing 0x8000_0000. 7461 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7462 OnesV, DAG, dl); 7463 7464 // xor by OnesV to invert it. 7465 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7466 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7467 } 7468 7469 // Check to see if this is a wide variety of vsplti*, binop self cases. 7470 static const signed char SplatCsts[] = { 7471 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7472 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7473 }; 7474 7475 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7476 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7477 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7478 int i = SplatCsts[idx]; 7479 7480 // Figure out what shift amount will be used by altivec if shifted by i in 7481 // this splat size. 7482 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7483 7484 // vsplti + shl self. 7485 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7486 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7487 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7488 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7489 Intrinsic::ppc_altivec_vslw 7490 }; 7491 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7492 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7493 } 7494 7495 // vsplti + srl self. 7496 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7497 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7498 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7499 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7500 Intrinsic::ppc_altivec_vsrw 7501 }; 7502 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7503 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7504 } 7505 7506 // vsplti + sra self. 7507 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7508 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7509 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7510 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7511 Intrinsic::ppc_altivec_vsraw 7512 }; 7513 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7514 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7515 } 7516 7517 // vsplti + rol self. 7518 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7519 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7520 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7521 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7522 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7523 Intrinsic::ppc_altivec_vrlw 7524 }; 7525 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7526 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7527 } 7528 7529 // t = vsplti c, result = vsldoi t, t, 1 7530 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7531 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7532 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7533 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7534 } 7535 // t = vsplti c, result = vsldoi t, t, 2 7536 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7537 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7538 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7539 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7540 } 7541 // t = vsplti c, result = vsldoi t, t, 3 7542 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7543 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7544 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7545 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7546 } 7547 } 7548 7549 return SDValue(); 7550 } 7551 7552 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7553 /// the specified operations to build the shuffle. 7554 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7555 SDValue RHS, SelectionDAG &DAG, 7556 const SDLoc &dl) { 7557 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7558 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7559 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7560 7561 enum { 7562 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7563 OP_VMRGHW, 7564 OP_VMRGLW, 7565 OP_VSPLTISW0, 7566 OP_VSPLTISW1, 7567 OP_VSPLTISW2, 7568 OP_VSPLTISW3, 7569 OP_VSLDOI4, 7570 OP_VSLDOI8, 7571 OP_VSLDOI12 7572 }; 7573 7574 if (OpNum == OP_COPY) { 7575 if (LHSID == (1*9+2)*9+3) return LHS; 7576 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7577 return RHS; 7578 } 7579 7580 SDValue OpLHS, OpRHS; 7581 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7582 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7583 7584 int ShufIdxs[16]; 7585 switch (OpNum) { 7586 default: llvm_unreachable("Unknown i32 permute!"); 7587 case OP_VMRGHW: 7588 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7589 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7590 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7591 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7592 break; 7593 case OP_VMRGLW: 7594 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7595 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7596 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7597 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7598 break; 7599 case OP_VSPLTISW0: 7600 for (unsigned i = 0; i != 16; ++i) 7601 ShufIdxs[i] = (i&3)+0; 7602 break; 7603 case OP_VSPLTISW1: 7604 for (unsigned i = 0; i != 16; ++i) 7605 ShufIdxs[i] = (i&3)+4; 7606 break; 7607 case OP_VSPLTISW2: 7608 for (unsigned i = 0; i != 16; ++i) 7609 ShufIdxs[i] = (i&3)+8; 7610 break; 7611 case OP_VSPLTISW3: 7612 for (unsigned i = 0; i != 16; ++i) 7613 ShufIdxs[i] = (i&3)+12; 7614 break; 7615 case OP_VSLDOI4: 7616 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7617 case OP_VSLDOI8: 7618 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7619 case OP_VSLDOI12: 7620 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7621 } 7622 EVT VT = OpLHS.getValueType(); 7623 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7624 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7625 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7626 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7627 } 7628 7629 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7630 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7631 /// return the code it can be lowered into. Worst case, it can always be 7632 /// lowered into a vperm. 7633 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7634 SelectionDAG &DAG) const { 7635 SDLoc dl(Op); 7636 SDValue V1 = Op.getOperand(0); 7637 SDValue V2 = Op.getOperand(1); 7638 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7639 EVT VT = Op.getValueType(); 7640 bool isLittleEndian = Subtarget.isLittleEndian(); 7641 7642 unsigned ShiftElts, InsertAtByte; 7643 bool Swap; 7644 if (Subtarget.hasP9Vector() && 7645 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 7646 isLittleEndian)) { 7647 if (Swap) 7648 std::swap(V1, V2); 7649 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7650 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 7651 if (ShiftElts) { 7652 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 7653 DAG.getConstant(ShiftElts, dl, MVT::i32)); 7654 SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Shl, 7655 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 7656 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 7657 } 7658 SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Conv2, 7659 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 7660 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 7661 } 7662 7663 if (Subtarget.hasVSX()) { 7664 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 7665 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 7666 7667 // If the source for the shuffle is a scalar_to_vector that came from a 7668 // 32-bit load, it will have used LXVWSX so we don't need to splat again. 7669 if (Subtarget.hasP9Vector() && 7670 ((isLittleEndian && SplatIdx == 3) || 7671 (!isLittleEndian && SplatIdx == 0))) { 7672 SDValue Src = V1.getOperand(0); 7673 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR && 7674 Src.getOperand(0).getOpcode() == ISD::LOAD && 7675 Src.getOperand(0).hasOneUse()) 7676 return V1; 7677 } 7678 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7679 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 7680 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7681 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 7682 } 7683 7684 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 7685 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 7686 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 7687 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 7688 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 7689 } 7690 } 7691 7692 if (Subtarget.hasQPX()) { 7693 if (VT.getVectorNumElements() != 4) 7694 return SDValue(); 7695 7696 if (V2.isUndef()) V2 = V1; 7697 7698 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 7699 if (AlignIdx != -1) { 7700 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7701 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7702 } else if (SVOp->isSplat()) { 7703 int SplatIdx = SVOp->getSplatIndex(); 7704 if (SplatIdx >= 4) { 7705 std::swap(V1, V2); 7706 SplatIdx -= 4; 7707 } 7708 7709 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7710 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7711 } 7712 7713 // Lower this into a qvgpci/qvfperm pair. 7714 7715 // Compute the qvgpci literal 7716 unsigned idx = 0; 7717 for (unsigned i = 0; i < 4; ++i) { 7718 int m = SVOp->getMaskElt(i); 7719 unsigned mm = m >= 0 ? (unsigned) m : i; 7720 idx |= mm << (3-i)*3; 7721 } 7722 7723 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7724 DAG.getConstant(idx, dl, MVT::i32)); 7725 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7726 } 7727 7728 // Cases that are handled by instructions that take permute immediates 7729 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7730 // selected by the instruction selector. 7731 if (V2.isUndef()) { 7732 if (PPC::isSplatShuffleMask(SVOp, 1) || 7733 PPC::isSplatShuffleMask(SVOp, 2) || 7734 PPC::isSplatShuffleMask(SVOp, 4) || 7735 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7736 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7737 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7738 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7739 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7740 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7741 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7742 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7743 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 7744 (Subtarget.hasP8Altivec() && ( 7745 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7746 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 7747 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 7748 return Op; 7749 } 7750 } 7751 7752 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7753 // and produce a fixed permutation. If any of these match, do not lower to 7754 // VPERM. 7755 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7756 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7757 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7758 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7759 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7760 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7761 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7762 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7763 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7764 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7765 (Subtarget.hasP8Altivec() && ( 7766 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7767 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 7768 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 7769 return Op; 7770 7771 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7772 // perfect shuffle table to emit an optimal matching sequence. 7773 ArrayRef<int> PermMask = SVOp->getMask(); 7774 7775 unsigned PFIndexes[4]; 7776 bool isFourElementShuffle = true; 7777 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7778 unsigned EltNo = 8; // Start out undef. 7779 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7780 if (PermMask[i*4+j] < 0) 7781 continue; // Undef, ignore it. 7782 7783 unsigned ByteSource = PermMask[i*4+j]; 7784 if ((ByteSource & 3) != j) { 7785 isFourElementShuffle = false; 7786 break; 7787 } 7788 7789 if (EltNo == 8) { 7790 EltNo = ByteSource/4; 7791 } else if (EltNo != ByteSource/4) { 7792 isFourElementShuffle = false; 7793 break; 7794 } 7795 } 7796 PFIndexes[i] = EltNo; 7797 } 7798 7799 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7800 // perfect shuffle vector to determine if it is cost effective to do this as 7801 // discrete instructions, or whether we should use a vperm. 7802 // For now, we skip this for little endian until such time as we have a 7803 // little-endian perfect shuffle table. 7804 if (isFourElementShuffle && !isLittleEndian) { 7805 // Compute the index in the perfect shuffle table. 7806 unsigned PFTableIndex = 7807 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7808 7809 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7810 unsigned Cost = (PFEntry >> 30); 7811 7812 // Determining when to avoid vperm is tricky. Many things affect the cost 7813 // of vperm, particularly how many times the perm mask needs to be computed. 7814 // For example, if the perm mask can be hoisted out of a loop or is already 7815 // used (perhaps because there are multiple permutes with the same shuffle 7816 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7817 // the loop requires an extra register. 7818 // 7819 // As a compromise, we only emit discrete instructions if the shuffle can be 7820 // generated in 3 or fewer operations. When we have loop information 7821 // available, if this block is within a loop, we should avoid using vperm 7822 // for 3-operation perms and use a constant pool load instead. 7823 if (Cost < 3) 7824 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7825 } 7826 7827 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7828 // vector that will get spilled to the constant pool. 7829 if (V2.isUndef()) V2 = V1; 7830 7831 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7832 // that it is in input element units, not in bytes. Convert now. 7833 7834 // For little endian, the order of the input vectors is reversed, and 7835 // the permutation mask is complemented with respect to 31. This is 7836 // necessary to produce proper semantics with the big-endian-biased vperm 7837 // instruction. 7838 EVT EltVT = V1.getValueType().getVectorElementType(); 7839 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7840 7841 SmallVector<SDValue, 16> ResultMask; 7842 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7843 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7844 7845 for (unsigned j = 0; j != BytesPerElement; ++j) 7846 if (isLittleEndian) 7847 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7848 dl, MVT::i32)); 7849 else 7850 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7851 MVT::i32)); 7852 } 7853 7854 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 7855 if (isLittleEndian) 7856 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7857 V2, V1, VPermMask); 7858 else 7859 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7860 V1, V2, VPermMask); 7861 } 7862 7863 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 7864 /// vector comparison. If it is, return true and fill in Opc/isDot with 7865 /// information about the intrinsic. 7866 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 7867 bool &isDot, const PPCSubtarget &Subtarget) { 7868 unsigned IntrinsicID = 7869 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7870 CompareOpc = -1; 7871 isDot = false; 7872 switch (IntrinsicID) { 7873 default: 7874 return false; 7875 // Comparison predicates. 7876 case Intrinsic::ppc_altivec_vcmpbfp_p: 7877 CompareOpc = 966; 7878 isDot = true; 7879 break; 7880 case Intrinsic::ppc_altivec_vcmpeqfp_p: 7881 CompareOpc = 198; 7882 isDot = true; 7883 break; 7884 case Intrinsic::ppc_altivec_vcmpequb_p: 7885 CompareOpc = 6; 7886 isDot = true; 7887 break; 7888 case Intrinsic::ppc_altivec_vcmpequh_p: 7889 CompareOpc = 70; 7890 isDot = true; 7891 break; 7892 case Intrinsic::ppc_altivec_vcmpequw_p: 7893 CompareOpc = 134; 7894 isDot = true; 7895 break; 7896 case Intrinsic::ppc_altivec_vcmpequd_p: 7897 if (Subtarget.hasP8Altivec()) { 7898 CompareOpc = 199; 7899 isDot = true; 7900 } else 7901 return false; 7902 break; 7903 case Intrinsic::ppc_altivec_vcmpneb_p: 7904 case Intrinsic::ppc_altivec_vcmpneh_p: 7905 case Intrinsic::ppc_altivec_vcmpnew_p: 7906 case Intrinsic::ppc_altivec_vcmpnezb_p: 7907 case Intrinsic::ppc_altivec_vcmpnezh_p: 7908 case Intrinsic::ppc_altivec_vcmpnezw_p: 7909 if (Subtarget.hasP9Altivec()) { 7910 switch (IntrinsicID) { 7911 default: 7912 llvm_unreachable("Unknown comparison intrinsic."); 7913 case Intrinsic::ppc_altivec_vcmpneb_p: 7914 CompareOpc = 7; 7915 break; 7916 case Intrinsic::ppc_altivec_vcmpneh_p: 7917 CompareOpc = 71; 7918 break; 7919 case Intrinsic::ppc_altivec_vcmpnew_p: 7920 CompareOpc = 135; 7921 break; 7922 case Intrinsic::ppc_altivec_vcmpnezb_p: 7923 CompareOpc = 263; 7924 break; 7925 case Intrinsic::ppc_altivec_vcmpnezh_p: 7926 CompareOpc = 327; 7927 break; 7928 case Intrinsic::ppc_altivec_vcmpnezw_p: 7929 CompareOpc = 391; 7930 break; 7931 } 7932 isDot = true; 7933 } else 7934 return false; 7935 break; 7936 case Intrinsic::ppc_altivec_vcmpgefp_p: 7937 CompareOpc = 454; 7938 isDot = true; 7939 break; 7940 case Intrinsic::ppc_altivec_vcmpgtfp_p: 7941 CompareOpc = 710; 7942 isDot = true; 7943 break; 7944 case Intrinsic::ppc_altivec_vcmpgtsb_p: 7945 CompareOpc = 774; 7946 isDot = true; 7947 break; 7948 case Intrinsic::ppc_altivec_vcmpgtsh_p: 7949 CompareOpc = 838; 7950 isDot = true; 7951 break; 7952 case Intrinsic::ppc_altivec_vcmpgtsw_p: 7953 CompareOpc = 902; 7954 isDot = true; 7955 break; 7956 case Intrinsic::ppc_altivec_vcmpgtsd_p: 7957 if (Subtarget.hasP8Altivec()) { 7958 CompareOpc = 967; 7959 isDot = true; 7960 } else 7961 return false; 7962 break; 7963 case Intrinsic::ppc_altivec_vcmpgtub_p: 7964 CompareOpc = 518; 7965 isDot = true; 7966 break; 7967 case Intrinsic::ppc_altivec_vcmpgtuh_p: 7968 CompareOpc = 582; 7969 isDot = true; 7970 break; 7971 case Intrinsic::ppc_altivec_vcmpgtuw_p: 7972 CompareOpc = 646; 7973 isDot = true; 7974 break; 7975 case Intrinsic::ppc_altivec_vcmpgtud_p: 7976 if (Subtarget.hasP8Altivec()) { 7977 CompareOpc = 711; 7978 isDot = true; 7979 } else 7980 return false; 7981 break; 7982 7983 // VSX predicate comparisons use the same infrastructure 7984 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 7985 case Intrinsic::ppc_vsx_xvcmpgedp_p: 7986 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 7987 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 7988 case Intrinsic::ppc_vsx_xvcmpgesp_p: 7989 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 7990 if (Subtarget.hasVSX()) { 7991 switch (IntrinsicID) { 7992 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 7993 CompareOpc = 99; 7994 break; 7995 case Intrinsic::ppc_vsx_xvcmpgedp_p: 7996 CompareOpc = 115; 7997 break; 7998 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 7999 CompareOpc = 107; 8000 break; 8001 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 8002 CompareOpc = 67; 8003 break; 8004 case Intrinsic::ppc_vsx_xvcmpgesp_p: 8005 CompareOpc = 83; 8006 break; 8007 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 8008 CompareOpc = 75; 8009 break; 8010 } 8011 isDot = true; 8012 } else 8013 return false; 8014 break; 8015 8016 // Normal Comparisons. 8017 case Intrinsic::ppc_altivec_vcmpbfp: 8018 CompareOpc = 966; 8019 break; 8020 case Intrinsic::ppc_altivec_vcmpeqfp: 8021 CompareOpc = 198; 8022 break; 8023 case Intrinsic::ppc_altivec_vcmpequb: 8024 CompareOpc = 6; 8025 break; 8026 case Intrinsic::ppc_altivec_vcmpequh: 8027 CompareOpc = 70; 8028 break; 8029 case Intrinsic::ppc_altivec_vcmpequw: 8030 CompareOpc = 134; 8031 break; 8032 case Intrinsic::ppc_altivec_vcmpequd: 8033 if (Subtarget.hasP8Altivec()) 8034 CompareOpc = 199; 8035 else 8036 return false; 8037 break; 8038 case Intrinsic::ppc_altivec_vcmpneb: 8039 case Intrinsic::ppc_altivec_vcmpneh: 8040 case Intrinsic::ppc_altivec_vcmpnew: 8041 case Intrinsic::ppc_altivec_vcmpnezb: 8042 case Intrinsic::ppc_altivec_vcmpnezh: 8043 case Intrinsic::ppc_altivec_vcmpnezw: 8044 if (Subtarget.hasP9Altivec()) 8045 switch (IntrinsicID) { 8046 default: 8047 llvm_unreachable("Unknown comparison intrinsic."); 8048 case Intrinsic::ppc_altivec_vcmpneb: 8049 CompareOpc = 7; 8050 break; 8051 case Intrinsic::ppc_altivec_vcmpneh: 8052 CompareOpc = 71; 8053 break; 8054 case Intrinsic::ppc_altivec_vcmpnew: 8055 CompareOpc = 135; 8056 break; 8057 case Intrinsic::ppc_altivec_vcmpnezb: 8058 CompareOpc = 263; 8059 break; 8060 case Intrinsic::ppc_altivec_vcmpnezh: 8061 CompareOpc = 327; 8062 break; 8063 case Intrinsic::ppc_altivec_vcmpnezw: 8064 CompareOpc = 391; 8065 break; 8066 } 8067 else 8068 return false; 8069 break; 8070 case Intrinsic::ppc_altivec_vcmpgefp: 8071 CompareOpc = 454; 8072 break; 8073 case Intrinsic::ppc_altivec_vcmpgtfp: 8074 CompareOpc = 710; 8075 break; 8076 case Intrinsic::ppc_altivec_vcmpgtsb: 8077 CompareOpc = 774; 8078 break; 8079 case Intrinsic::ppc_altivec_vcmpgtsh: 8080 CompareOpc = 838; 8081 break; 8082 case Intrinsic::ppc_altivec_vcmpgtsw: 8083 CompareOpc = 902; 8084 break; 8085 case Intrinsic::ppc_altivec_vcmpgtsd: 8086 if (Subtarget.hasP8Altivec()) 8087 CompareOpc = 967; 8088 else 8089 return false; 8090 break; 8091 case Intrinsic::ppc_altivec_vcmpgtub: 8092 CompareOpc = 518; 8093 break; 8094 case Intrinsic::ppc_altivec_vcmpgtuh: 8095 CompareOpc = 582; 8096 break; 8097 case Intrinsic::ppc_altivec_vcmpgtuw: 8098 CompareOpc = 646; 8099 break; 8100 case Intrinsic::ppc_altivec_vcmpgtud: 8101 if (Subtarget.hasP8Altivec()) 8102 CompareOpc = 711; 8103 else 8104 return false; 8105 break; 8106 } 8107 return true; 8108 } 8109 8110 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 8111 /// lower, do it, otherwise return null. 8112 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 8113 SelectionDAG &DAG) const { 8114 unsigned IntrinsicID = 8115 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8116 8117 if (IntrinsicID == Intrinsic::thread_pointer) { 8118 // Reads the thread pointer register, used for __builtin_thread_pointer. 8119 bool is64bit = Subtarget.isPPC64(); 8120 return DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 8121 is64bit ? MVT::i64 : MVT::i32); 8122 } 8123 8124 // If this is a lowered altivec predicate compare, CompareOpc is set to the 8125 // opcode number of the comparison. 8126 SDLoc dl(Op); 8127 int CompareOpc; 8128 bool isDot; 8129 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 8130 return SDValue(); // Don't custom lower most intrinsics. 8131 8132 // If this is a non-dot comparison, make the VCMP node and we are done. 8133 if (!isDot) { 8134 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 8135 Op.getOperand(1), Op.getOperand(2), 8136 DAG.getConstant(CompareOpc, dl, MVT::i32)); 8137 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 8138 } 8139 8140 // Create the PPCISD altivec 'dot' comparison node. 8141 SDValue Ops[] = { 8142 Op.getOperand(2), // LHS 8143 Op.getOperand(3), // RHS 8144 DAG.getConstant(CompareOpc, dl, MVT::i32) 8145 }; 8146 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 8147 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 8148 8149 // Now that we have the comparison, emit a copy from the CR to a GPR. 8150 // This is flagged to the above dot comparison. 8151 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 8152 DAG.getRegister(PPC::CR6, MVT::i32), 8153 CompNode.getValue(1)); 8154 8155 // Unpack the result based on how the target uses it. 8156 unsigned BitNo; // Bit # of CR6. 8157 bool InvertBit; // Invert result? 8158 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 8159 default: // Can't happen, don't crash on invalid number though. 8160 case 0: // Return the value of the EQ bit of CR6. 8161 BitNo = 0; InvertBit = false; 8162 break; 8163 case 1: // Return the inverted value of the EQ bit of CR6. 8164 BitNo = 0; InvertBit = true; 8165 break; 8166 case 2: // Return the value of the LT bit of CR6. 8167 BitNo = 2; InvertBit = false; 8168 break; 8169 case 3: // Return the inverted value of the LT bit of CR6. 8170 BitNo = 2; InvertBit = true; 8171 break; 8172 } 8173 8174 // Shift the bit into the low position. 8175 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 8176 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 8177 // Isolate the bit. 8178 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 8179 DAG.getConstant(1, dl, MVT::i32)); 8180 8181 // If we are supposed to, toggle the bit. 8182 if (InvertBit) 8183 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 8184 DAG.getConstant(1, dl, MVT::i32)); 8185 return Flags; 8186 } 8187 8188 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 8189 SelectionDAG &DAG) const { 8190 SDLoc dl(Op); 8191 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 8192 // instructions), but for smaller types, we need to first extend up to v2i32 8193 // before doing going farther. 8194 if (Op.getValueType() == MVT::v2i64) { 8195 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 8196 if (ExtVT != MVT::v2i32) { 8197 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 8198 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 8199 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 8200 ExtVT.getVectorElementType(), 4))); 8201 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 8202 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 8203 DAG.getValueType(MVT::v2i32)); 8204 } 8205 8206 return Op; 8207 } 8208 8209 return SDValue(); 8210 } 8211 8212 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 8213 SelectionDAG &DAG) const { 8214 SDLoc dl(Op); 8215 // Create a stack slot that is 16-byte aligned. 8216 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8217 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8218 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8219 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8220 8221 // Store the input value into Value#0 of the stack slot. 8222 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 8223 MachinePointerInfo()); 8224 // Load it out. 8225 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 8226 } 8227 8228 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 8229 SelectionDAG &DAG) const { 8230 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 8231 "Should only be called for ISD::INSERT_VECTOR_ELT"); 8232 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 8233 // We have legal lowering for constant indices but not for variable ones. 8234 if (C) 8235 return Op; 8236 return SDValue(); 8237 } 8238 8239 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 8240 SelectionDAG &DAG) const { 8241 SDLoc dl(Op); 8242 SDNode *N = Op.getNode(); 8243 8244 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 8245 "Unknown extract_vector_elt type"); 8246 8247 SDValue Value = N->getOperand(0); 8248 8249 // The first part of this is like the store lowering except that we don't 8250 // need to track the chain. 8251 8252 // The values are now known to be -1 (false) or 1 (true). To convert this 8253 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8254 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8255 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8256 8257 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8258 // understand how to form the extending load. 8259 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8260 8261 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8262 8263 // Now convert to an integer and store. 8264 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8265 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8266 Value); 8267 8268 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8269 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8270 MachinePointerInfo PtrInfo = 8271 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8272 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8273 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8274 8275 SDValue StoreChain = DAG.getEntryNode(); 8276 SDValue Ops[] = {StoreChain, 8277 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8278 Value, FIdx}; 8279 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8280 8281 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8282 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8283 8284 // Extract the value requested. 8285 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 8286 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8287 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8288 8289 SDValue IntVal = 8290 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 8291 8292 if (!Subtarget.useCRBits()) 8293 return IntVal; 8294 8295 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 8296 } 8297 8298 /// Lowering for QPX v4i1 loads 8299 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 8300 SelectionDAG &DAG) const { 8301 SDLoc dl(Op); 8302 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 8303 SDValue LoadChain = LN->getChain(); 8304 SDValue BasePtr = LN->getBasePtr(); 8305 8306 if (Op.getValueType() == MVT::v4f64 || 8307 Op.getValueType() == MVT::v4f32) { 8308 EVT MemVT = LN->getMemoryVT(); 8309 unsigned Alignment = LN->getAlignment(); 8310 8311 // If this load is properly aligned, then it is legal. 8312 if (Alignment >= MemVT.getStoreSize()) 8313 return Op; 8314 8315 EVT ScalarVT = Op.getValueType().getScalarType(), 8316 ScalarMemVT = MemVT.getScalarType(); 8317 unsigned Stride = ScalarMemVT.getStoreSize(); 8318 8319 SDValue Vals[4], LoadChains[4]; 8320 for (unsigned Idx = 0; Idx < 4; ++Idx) { 8321 SDValue Load; 8322 if (ScalarVT != ScalarMemVT) 8323 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 8324 BasePtr, 8325 LN->getPointerInfo().getWithOffset(Idx * Stride), 8326 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 8327 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8328 else 8329 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 8330 LN->getPointerInfo().getWithOffset(Idx * Stride), 8331 MinAlign(Alignment, Idx * Stride), 8332 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8333 8334 if (Idx == 0 && LN->isIndexed()) { 8335 assert(LN->getAddressingMode() == ISD::PRE_INC && 8336 "Unknown addressing mode on vector load"); 8337 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 8338 LN->getAddressingMode()); 8339 } 8340 8341 Vals[Idx] = Load; 8342 LoadChains[Idx] = Load.getValue(1); 8343 8344 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8345 DAG.getConstant(Stride, dl, 8346 BasePtr.getValueType())); 8347 } 8348 8349 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8350 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 8351 8352 if (LN->isIndexed()) { 8353 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 8354 return DAG.getMergeValues(RetOps, dl); 8355 } 8356 8357 SDValue RetOps[] = { Value, TF }; 8358 return DAG.getMergeValues(RetOps, dl); 8359 } 8360 8361 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 8362 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 8363 8364 // To lower v4i1 from a byte array, we load the byte elements of the 8365 // vector and then reuse the BUILD_VECTOR logic. 8366 8367 SDValue VectElmts[4], VectElmtChains[4]; 8368 for (unsigned i = 0; i < 4; ++i) { 8369 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8370 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8371 8372 VectElmts[i] = DAG.getExtLoad( 8373 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 8374 LN->getPointerInfo().getWithOffset(i), MVT::i8, 8375 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8376 VectElmtChains[i] = VectElmts[i].getValue(1); 8377 } 8378 8379 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 8380 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 8381 8382 SDValue RVals[] = { Value, LoadChain }; 8383 return DAG.getMergeValues(RVals, dl); 8384 } 8385 8386 /// Lowering for QPX v4i1 stores 8387 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 8388 SelectionDAG &DAG) const { 8389 SDLoc dl(Op); 8390 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 8391 SDValue StoreChain = SN->getChain(); 8392 SDValue BasePtr = SN->getBasePtr(); 8393 SDValue Value = SN->getValue(); 8394 8395 if (Value.getValueType() == MVT::v4f64 || 8396 Value.getValueType() == MVT::v4f32) { 8397 EVT MemVT = SN->getMemoryVT(); 8398 unsigned Alignment = SN->getAlignment(); 8399 8400 // If this store is properly aligned, then it is legal. 8401 if (Alignment >= MemVT.getStoreSize()) 8402 return Op; 8403 8404 EVT ScalarVT = Value.getValueType().getScalarType(), 8405 ScalarMemVT = MemVT.getScalarType(); 8406 unsigned Stride = ScalarMemVT.getStoreSize(); 8407 8408 SDValue Stores[4]; 8409 for (unsigned Idx = 0; Idx < 4; ++Idx) { 8410 SDValue Ex = DAG.getNode( 8411 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 8412 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 8413 SDValue Store; 8414 if (ScalarVT != ScalarMemVT) 8415 Store = 8416 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 8417 SN->getPointerInfo().getWithOffset(Idx * Stride), 8418 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 8419 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 8420 else 8421 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 8422 SN->getPointerInfo().getWithOffset(Idx * Stride), 8423 MinAlign(Alignment, Idx * Stride), 8424 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 8425 8426 if (Idx == 0 && SN->isIndexed()) { 8427 assert(SN->getAddressingMode() == ISD::PRE_INC && 8428 "Unknown addressing mode on vector store"); 8429 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 8430 SN->getAddressingMode()); 8431 } 8432 8433 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8434 DAG.getConstant(Stride, dl, 8435 BasePtr.getValueType())); 8436 Stores[Idx] = Store; 8437 } 8438 8439 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8440 8441 if (SN->isIndexed()) { 8442 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 8443 return DAG.getMergeValues(RetOps, dl); 8444 } 8445 8446 return TF; 8447 } 8448 8449 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 8450 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 8451 8452 // The values are now known to be -1 (false) or 1 (true). To convert this 8453 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8454 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8455 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8456 8457 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8458 // understand how to form the extending load. 8459 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8460 8461 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8462 8463 // Now convert to an integer and store. 8464 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8465 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8466 Value); 8467 8468 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8469 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8470 MachinePointerInfo PtrInfo = 8471 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8472 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8473 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8474 8475 SDValue Ops[] = {StoreChain, 8476 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8477 Value, FIdx}; 8478 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8479 8480 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8481 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8482 8483 // Move data into the byte array. 8484 SDValue Loads[4], LoadChains[4]; 8485 for (unsigned i = 0; i < 4; ++i) { 8486 unsigned Offset = 4*i; 8487 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8488 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8489 8490 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 8491 PtrInfo.getWithOffset(Offset)); 8492 LoadChains[i] = Loads[i].getValue(1); 8493 } 8494 8495 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8496 8497 SDValue Stores[4]; 8498 for (unsigned i = 0; i < 4; ++i) { 8499 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8500 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8501 8502 Stores[i] = DAG.getTruncStore( 8503 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 8504 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 8505 SN->getAAInfo()); 8506 } 8507 8508 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8509 8510 return StoreChain; 8511 } 8512 8513 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 8514 SDLoc dl(Op); 8515 if (Op.getValueType() == MVT::v4i32) { 8516 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8517 8518 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 8519 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 8520 8521 SDValue RHSSwap = // = vrlw RHS, 16 8522 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 8523 8524 // Shrinkify inputs to v8i16. 8525 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 8526 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 8527 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 8528 8529 // Low parts multiplied together, generating 32-bit results (we ignore the 8530 // top parts). 8531 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 8532 LHS, RHS, DAG, dl, MVT::v4i32); 8533 8534 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 8535 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 8536 // Shift the high parts up 16 bits. 8537 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 8538 Neg16, DAG, dl); 8539 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 8540 } else if (Op.getValueType() == MVT::v8i16) { 8541 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8542 8543 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 8544 8545 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 8546 LHS, RHS, Zero, DAG, dl); 8547 } else if (Op.getValueType() == MVT::v16i8) { 8548 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8549 bool isLittleEndian = Subtarget.isLittleEndian(); 8550 8551 // Multiply the even 8-bit parts, producing 16-bit sums. 8552 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 8553 LHS, RHS, DAG, dl, MVT::v8i16); 8554 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 8555 8556 // Multiply the odd 8-bit parts, producing 16-bit sums. 8557 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 8558 LHS, RHS, DAG, dl, MVT::v8i16); 8559 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 8560 8561 // Merge the results together. Because vmuleub and vmuloub are 8562 // instructions with a big-endian bias, we must reverse the 8563 // element numbering and reverse the meaning of "odd" and "even" 8564 // when generating little endian code. 8565 int Ops[16]; 8566 for (unsigned i = 0; i != 8; ++i) { 8567 if (isLittleEndian) { 8568 Ops[i*2 ] = 2*i; 8569 Ops[i*2+1] = 2*i+16; 8570 } else { 8571 Ops[i*2 ] = 2*i+1; 8572 Ops[i*2+1] = 2*i+1+16; 8573 } 8574 } 8575 if (isLittleEndian) 8576 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 8577 else 8578 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 8579 } else { 8580 llvm_unreachable("Unknown mul to lower!"); 8581 } 8582 } 8583 8584 /// LowerOperation - Provide custom lowering hooks for some operations. 8585 /// 8586 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8587 switch (Op.getOpcode()) { 8588 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 8589 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 8590 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 8591 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 8592 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 8593 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 8594 case ISD::SETCC: return LowerSETCC(Op, DAG); 8595 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 8596 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 8597 case ISD::VASTART: 8598 return LowerVASTART(Op, DAG); 8599 8600 case ISD::VAARG: 8601 return LowerVAARG(Op, DAG); 8602 8603 case ISD::VACOPY: 8604 return LowerVACOPY(Op, DAG); 8605 8606 case ISD::STACKRESTORE: 8607 return LowerSTACKRESTORE(Op, DAG); 8608 8609 case ISD::DYNAMIC_STACKALLOC: 8610 return LowerDYNAMIC_STACKALLOC(Op, DAG); 8611 8612 case ISD::GET_DYNAMIC_AREA_OFFSET: 8613 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 8614 8615 case ISD::EH_DWARF_CFA: 8616 return LowerEH_DWARF_CFA(Op, DAG); 8617 8618 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 8619 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 8620 8621 case ISD::LOAD: return LowerLOAD(Op, DAG); 8622 case ISD::STORE: return LowerSTORE(Op, DAG); 8623 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 8624 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 8625 case ISD::FP_TO_UINT: 8626 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 8627 SDLoc(Op)); 8628 case ISD::UINT_TO_FP: 8629 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 8630 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 8631 8632 // Lower 64-bit shifts. 8633 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 8634 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 8635 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 8636 8637 // Vector-related lowering. 8638 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 8639 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 8640 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 8641 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 8642 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 8643 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 8644 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 8645 case ISD::MUL: return LowerMUL(Op, DAG); 8646 8647 // For counter-based loop handling. 8648 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 8649 8650 // Frame & Return address. 8651 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 8652 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 8653 } 8654 } 8655 8656 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 8657 SmallVectorImpl<SDValue>&Results, 8658 SelectionDAG &DAG) const { 8659 SDLoc dl(N); 8660 switch (N->getOpcode()) { 8661 default: 8662 llvm_unreachable("Do not know how to custom type legalize this operation!"); 8663 case ISD::READCYCLECOUNTER: { 8664 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 8665 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 8666 8667 Results.push_back(RTB); 8668 Results.push_back(RTB.getValue(1)); 8669 Results.push_back(RTB.getValue(2)); 8670 break; 8671 } 8672 case ISD::INTRINSIC_W_CHAIN: { 8673 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 8674 Intrinsic::ppc_is_decremented_ctr_nonzero) 8675 break; 8676 8677 assert(N->getValueType(0) == MVT::i1 && 8678 "Unexpected result type for CTR decrement intrinsic"); 8679 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 8680 N->getValueType(0)); 8681 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 8682 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 8683 N->getOperand(1)); 8684 8685 Results.push_back(NewInt); 8686 Results.push_back(NewInt.getValue(1)); 8687 break; 8688 } 8689 case ISD::VAARG: { 8690 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 8691 return; 8692 8693 EVT VT = N->getValueType(0); 8694 8695 if (VT == MVT::i64) { 8696 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 8697 8698 Results.push_back(NewNode); 8699 Results.push_back(NewNode.getValue(1)); 8700 } 8701 return; 8702 } 8703 case ISD::FP_ROUND_INREG: { 8704 assert(N->getValueType(0) == MVT::ppcf128); 8705 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 8706 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8707 MVT::f64, N->getOperand(0), 8708 DAG.getIntPtrConstant(0, dl)); 8709 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8710 MVT::f64, N->getOperand(0), 8711 DAG.getIntPtrConstant(1, dl)); 8712 8713 // Add the two halves of the long double in round-to-zero mode. 8714 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 8715 8716 // We know the low half is about to be thrown away, so just use something 8717 // convenient. 8718 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 8719 FPreg, FPreg)); 8720 return; 8721 } 8722 case ISD::FP_TO_SINT: 8723 case ISD::FP_TO_UINT: 8724 // LowerFP_TO_INT() can only handle f32 and f64. 8725 if (N->getOperand(0).getValueType() == MVT::ppcf128) 8726 return; 8727 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 8728 return; 8729 } 8730 } 8731 8732 //===----------------------------------------------------------------------===// 8733 // Other Lowering Code 8734 //===----------------------------------------------------------------------===// 8735 8736 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 8737 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 8738 Function *Func = Intrinsic::getDeclaration(M, Id); 8739 return Builder.CreateCall(Func, {}); 8740 } 8741 8742 // The mappings for emitLeading/TrailingFence is taken from 8743 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 8744 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 8745 AtomicOrdering Ord, bool IsStore, 8746 bool IsLoad) const { 8747 if (Ord == AtomicOrdering::SequentiallyConsistent) 8748 return callIntrinsic(Builder, Intrinsic::ppc_sync); 8749 if (isReleaseOrStronger(Ord)) 8750 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8751 return nullptr; 8752 } 8753 8754 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 8755 AtomicOrdering Ord, bool IsStore, 8756 bool IsLoad) const { 8757 if (IsLoad && isAcquireOrStronger(Ord)) 8758 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8759 // FIXME: this is too conservative, a dependent branch + isync is enough. 8760 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 8761 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 8762 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 8763 return nullptr; 8764 } 8765 8766 MachineBasicBlock * 8767 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 8768 unsigned AtomicSize, 8769 unsigned BinOpcode, 8770 unsigned CmpOpcode, 8771 unsigned CmpPred) const { 8772 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8773 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8774 8775 auto LoadMnemonic = PPC::LDARX; 8776 auto StoreMnemonic = PPC::STDCX; 8777 switch (AtomicSize) { 8778 default: 8779 llvm_unreachable("Unexpected size of atomic entity"); 8780 case 1: 8781 LoadMnemonic = PPC::LBARX; 8782 StoreMnemonic = PPC::STBCX; 8783 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8784 break; 8785 case 2: 8786 LoadMnemonic = PPC::LHARX; 8787 StoreMnemonic = PPC::STHCX; 8788 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8789 break; 8790 case 4: 8791 LoadMnemonic = PPC::LWARX; 8792 StoreMnemonic = PPC::STWCX; 8793 break; 8794 case 8: 8795 LoadMnemonic = PPC::LDARX; 8796 StoreMnemonic = PPC::STDCX; 8797 break; 8798 } 8799 8800 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8801 MachineFunction *F = BB->getParent(); 8802 MachineFunction::iterator It = ++BB->getIterator(); 8803 8804 unsigned dest = MI.getOperand(0).getReg(); 8805 unsigned ptrA = MI.getOperand(1).getReg(); 8806 unsigned ptrB = MI.getOperand(2).getReg(); 8807 unsigned incr = MI.getOperand(3).getReg(); 8808 DebugLoc dl = MI.getDebugLoc(); 8809 8810 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8811 MachineBasicBlock *loop2MBB = 8812 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 8813 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8814 F->insert(It, loopMBB); 8815 if (CmpOpcode) 8816 F->insert(It, loop2MBB); 8817 F->insert(It, exitMBB); 8818 exitMBB->splice(exitMBB->begin(), BB, 8819 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8820 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8821 8822 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8823 unsigned TmpReg = (!BinOpcode) ? incr : 8824 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 8825 : &PPC::GPRCRegClass); 8826 8827 // thisMBB: 8828 // ... 8829 // fallthrough --> loopMBB 8830 BB->addSuccessor(loopMBB); 8831 8832 // loopMBB: 8833 // l[wd]arx dest, ptr 8834 // add r0, dest, incr 8835 // st[wd]cx. r0, ptr 8836 // bne- loopMBB 8837 // fallthrough --> exitMBB 8838 8839 // For max/min... 8840 // loopMBB: 8841 // l[wd]arx dest, ptr 8842 // cmpl?[wd] incr, dest 8843 // bgt exitMBB 8844 // loop2MBB: 8845 // st[wd]cx. dest, ptr 8846 // bne- loopMBB 8847 // fallthrough --> exitMBB 8848 8849 BB = loopMBB; 8850 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8851 .addReg(ptrA).addReg(ptrB); 8852 if (BinOpcode) 8853 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 8854 if (CmpOpcode) { 8855 // Signed comparisons of byte or halfword values must be sign-extended. 8856 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 8857 unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 8858 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 8859 ExtReg).addReg(dest); 8860 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 8861 .addReg(incr).addReg(ExtReg); 8862 } else 8863 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 8864 .addReg(incr).addReg(dest); 8865 8866 BuildMI(BB, dl, TII->get(PPC::BCC)) 8867 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 8868 BB->addSuccessor(loop2MBB); 8869 BB->addSuccessor(exitMBB); 8870 BB = loop2MBB; 8871 } 8872 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8873 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 8874 BuildMI(BB, dl, TII->get(PPC::BCC)) 8875 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8876 BB->addSuccessor(loopMBB); 8877 BB->addSuccessor(exitMBB); 8878 8879 // exitMBB: 8880 // ... 8881 BB = exitMBB; 8882 return BB; 8883 } 8884 8885 MachineBasicBlock * 8886 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 8887 MachineBasicBlock *BB, 8888 bool is8bit, // operation 8889 unsigned BinOpcode, 8890 unsigned CmpOpcode, 8891 unsigned CmpPred) const { 8892 // If we support part-word atomic mnemonics, just use them 8893 if (Subtarget.hasPartwordAtomics()) 8894 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, 8895 CmpOpcode, CmpPred); 8896 8897 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8898 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8899 // In 64 bit mode we have to use 64 bits for addresses, even though the 8900 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 8901 // registers without caring whether they're 32 or 64, but here we're 8902 // doing actual arithmetic on the addresses. 8903 bool is64bit = Subtarget.isPPC64(); 8904 bool isLittleEndian = Subtarget.isLittleEndian(); 8905 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8906 8907 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8908 MachineFunction *F = BB->getParent(); 8909 MachineFunction::iterator It = ++BB->getIterator(); 8910 8911 unsigned dest = MI.getOperand(0).getReg(); 8912 unsigned ptrA = MI.getOperand(1).getReg(); 8913 unsigned ptrB = MI.getOperand(2).getReg(); 8914 unsigned incr = MI.getOperand(3).getReg(); 8915 DebugLoc dl = MI.getDebugLoc(); 8916 8917 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8918 MachineBasicBlock *loop2MBB = 8919 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 8920 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8921 F->insert(It, loopMBB); 8922 if (CmpOpcode) 8923 F->insert(It, loop2MBB); 8924 F->insert(It, exitMBB); 8925 exitMBB->splice(exitMBB->begin(), BB, 8926 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8927 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8928 8929 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8930 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8931 : &PPC::GPRCRegClass; 8932 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8933 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8934 unsigned ShiftReg = 8935 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 8936 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 8937 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8938 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8939 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8940 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8941 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 8942 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8943 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8944 unsigned Ptr1Reg; 8945 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 8946 8947 // thisMBB: 8948 // ... 8949 // fallthrough --> loopMBB 8950 BB->addSuccessor(loopMBB); 8951 8952 // The 4-byte load must be aligned, while a char or short may be 8953 // anywhere in the word. Hence all this nasty bookkeeping code. 8954 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8955 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8956 // xori shift, shift1, 24 [16] 8957 // rlwinm ptr, ptr1, 0, 0, 29 8958 // slw incr2, incr, shift 8959 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8960 // slw mask, mask2, shift 8961 // loopMBB: 8962 // lwarx tmpDest, ptr 8963 // add tmp, tmpDest, incr2 8964 // andc tmp2, tmpDest, mask 8965 // and tmp3, tmp, mask 8966 // or tmp4, tmp3, tmp2 8967 // stwcx. tmp4, ptr 8968 // bne- loopMBB 8969 // fallthrough --> exitMBB 8970 // srw dest, tmpDest, shift 8971 if (ptrA != ZeroReg) { 8972 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8973 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8974 .addReg(ptrA).addReg(ptrB); 8975 } else { 8976 Ptr1Reg = ptrB; 8977 } 8978 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8979 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8980 if (!isLittleEndian) 8981 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8982 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8983 if (is64bit) 8984 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8985 .addReg(Ptr1Reg).addImm(0).addImm(61); 8986 else 8987 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8988 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8989 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 8990 .addReg(incr).addReg(ShiftReg); 8991 if (is8bit) 8992 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8993 else { 8994 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8995 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 8996 } 8997 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8998 .addReg(Mask2Reg).addReg(ShiftReg); 8999 9000 BB = loopMBB; 9001 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9002 .addReg(ZeroReg).addReg(PtrReg); 9003 if (BinOpcode) 9004 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 9005 .addReg(Incr2Reg).addReg(TmpDestReg); 9006 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 9007 .addReg(TmpDestReg).addReg(MaskReg); 9008 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 9009 .addReg(TmpReg).addReg(MaskReg); 9010 if (CmpOpcode) { 9011 // For unsigned comparisons, we can directly compare the shifted values. 9012 // For signed comparisons we shift and sign extend. 9013 unsigned SReg = RegInfo.createVirtualRegister(RC); 9014 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg) 9015 .addReg(TmpDestReg).addReg(MaskReg); 9016 unsigned ValueReg = SReg; 9017 unsigned CmpReg = Incr2Reg; 9018 if (CmpOpcode == PPC::CMPW) { 9019 ValueReg = RegInfo.createVirtualRegister(RC); 9020 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 9021 .addReg(SReg).addReg(ShiftReg); 9022 unsigned ValueSReg = RegInfo.createVirtualRegister(RC); 9023 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 9024 .addReg(ValueReg); 9025 ValueReg = ValueSReg; 9026 CmpReg = incr; 9027 } 9028 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 9029 .addReg(CmpReg).addReg(ValueReg); 9030 BuildMI(BB, dl, TII->get(PPC::BCC)) 9031 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 9032 BB->addSuccessor(loop2MBB); 9033 BB->addSuccessor(exitMBB); 9034 BB = loop2MBB; 9035 } 9036 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 9037 .addReg(Tmp3Reg).addReg(Tmp2Reg); 9038 BuildMI(BB, dl, TII->get(PPC::STWCX)) 9039 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 9040 BuildMI(BB, dl, TII->get(PPC::BCC)) 9041 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 9042 BB->addSuccessor(loopMBB); 9043 BB->addSuccessor(exitMBB); 9044 9045 // exitMBB: 9046 // ... 9047 BB = exitMBB; 9048 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 9049 .addReg(ShiftReg); 9050 return BB; 9051 } 9052 9053 llvm::MachineBasicBlock * 9054 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 9055 MachineBasicBlock *MBB) const { 9056 DebugLoc DL = MI.getDebugLoc(); 9057 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9058 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 9059 9060 MachineFunction *MF = MBB->getParent(); 9061 MachineRegisterInfo &MRI = MF->getRegInfo(); 9062 9063 const BasicBlock *BB = MBB->getBasicBlock(); 9064 MachineFunction::iterator I = ++MBB->getIterator(); 9065 9066 // Memory Reference 9067 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9068 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9069 9070 unsigned DstReg = MI.getOperand(0).getReg(); 9071 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 9072 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 9073 unsigned mainDstReg = MRI.createVirtualRegister(RC); 9074 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 9075 9076 MVT PVT = getPointerTy(MF->getDataLayout()); 9077 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9078 "Invalid Pointer Size!"); 9079 // For v = setjmp(buf), we generate 9080 // 9081 // thisMBB: 9082 // SjLjSetup mainMBB 9083 // bl mainMBB 9084 // v_restore = 1 9085 // b sinkMBB 9086 // 9087 // mainMBB: 9088 // buf[LabelOffset] = LR 9089 // v_main = 0 9090 // 9091 // sinkMBB: 9092 // v = phi(main, restore) 9093 // 9094 9095 MachineBasicBlock *thisMBB = MBB; 9096 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 9097 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 9098 MF->insert(I, mainMBB); 9099 MF->insert(I, sinkMBB); 9100 9101 MachineInstrBuilder MIB; 9102 9103 // Transfer the remainder of BB and its successor edges to sinkMBB. 9104 sinkMBB->splice(sinkMBB->begin(), MBB, 9105 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 9106 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 9107 9108 // Note that the structure of the jmp_buf used here is not compatible 9109 // with that used by libc, and is not designed to be. Specifically, it 9110 // stores only those 'reserved' registers that LLVM does not otherwise 9111 // understand how to spill. Also, by convention, by the time this 9112 // intrinsic is called, Clang has already stored the frame address in the 9113 // first slot of the buffer and stack address in the third. Following the 9114 // X86 target code, we'll store the jump address in the second slot. We also 9115 // need to save the TOC pointer (R2) to handle jumps between shared 9116 // libraries, and that will be stored in the fourth slot. The thread 9117 // identifier (R13) is not affected. 9118 9119 // thisMBB: 9120 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9121 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9122 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9123 9124 // Prepare IP either in reg. 9125 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 9126 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 9127 unsigned BufReg = MI.getOperand(1).getReg(); 9128 9129 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 9130 setUsesTOCBasePtr(*MBB->getParent()); 9131 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 9132 .addReg(PPC::X2) 9133 .addImm(TOCOffset) 9134 .addReg(BufReg); 9135 MIB.setMemRefs(MMOBegin, MMOEnd); 9136 } 9137 9138 // Naked functions never have a base pointer, and so we use r1. For all 9139 // other functions, this decision must be delayed until during PEI. 9140 unsigned BaseReg; 9141 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 9142 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 9143 else 9144 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 9145 9146 MIB = BuildMI(*thisMBB, MI, DL, 9147 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 9148 .addReg(BaseReg) 9149 .addImm(BPOffset) 9150 .addReg(BufReg); 9151 MIB.setMemRefs(MMOBegin, MMOEnd); 9152 9153 // Setup 9154 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 9155 MIB.addRegMask(TRI->getNoPreservedMask()); 9156 9157 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 9158 9159 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 9160 .addMBB(mainMBB); 9161 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 9162 9163 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 9164 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 9165 9166 // mainMBB: 9167 // mainDstReg = 0 9168 MIB = 9169 BuildMI(mainMBB, DL, 9170 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 9171 9172 // Store IP 9173 if (Subtarget.isPPC64()) { 9174 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 9175 .addReg(LabelReg) 9176 .addImm(LabelOffset) 9177 .addReg(BufReg); 9178 } else { 9179 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 9180 .addReg(LabelReg) 9181 .addImm(LabelOffset) 9182 .addReg(BufReg); 9183 } 9184 9185 MIB.setMemRefs(MMOBegin, MMOEnd); 9186 9187 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 9188 mainMBB->addSuccessor(sinkMBB); 9189 9190 // sinkMBB: 9191 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 9192 TII->get(PPC::PHI), DstReg) 9193 .addReg(mainDstReg).addMBB(mainMBB) 9194 .addReg(restoreDstReg).addMBB(thisMBB); 9195 9196 MI.eraseFromParent(); 9197 return sinkMBB; 9198 } 9199 9200 MachineBasicBlock * 9201 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 9202 MachineBasicBlock *MBB) const { 9203 DebugLoc DL = MI.getDebugLoc(); 9204 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9205 9206 MachineFunction *MF = MBB->getParent(); 9207 MachineRegisterInfo &MRI = MF->getRegInfo(); 9208 9209 // Memory Reference 9210 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 9211 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 9212 9213 MVT PVT = getPointerTy(MF->getDataLayout()); 9214 assert((PVT == MVT::i64 || PVT == MVT::i32) && 9215 "Invalid Pointer Size!"); 9216 9217 const TargetRegisterClass *RC = 9218 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 9219 unsigned Tmp = MRI.createVirtualRegister(RC); 9220 // Since FP is only updated here but NOT referenced, it's treated as GPR. 9221 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 9222 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 9223 unsigned BP = 9224 (PVT == MVT::i64) 9225 ? PPC::X30 9226 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 9227 : PPC::R30); 9228 9229 MachineInstrBuilder MIB; 9230 9231 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 9232 const int64_t SPOffset = 2 * PVT.getStoreSize(); 9233 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 9234 const int64_t BPOffset = 4 * PVT.getStoreSize(); 9235 9236 unsigned BufReg = MI.getOperand(0).getReg(); 9237 9238 // Reload FP (the jumped-to function may not have had a 9239 // frame pointer, and if so, then its r31 will be restored 9240 // as necessary). 9241 if (PVT == MVT::i64) { 9242 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 9243 .addImm(0) 9244 .addReg(BufReg); 9245 } else { 9246 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 9247 .addImm(0) 9248 .addReg(BufReg); 9249 } 9250 MIB.setMemRefs(MMOBegin, MMOEnd); 9251 9252 // Reload IP 9253 if (PVT == MVT::i64) { 9254 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 9255 .addImm(LabelOffset) 9256 .addReg(BufReg); 9257 } else { 9258 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 9259 .addImm(LabelOffset) 9260 .addReg(BufReg); 9261 } 9262 MIB.setMemRefs(MMOBegin, MMOEnd); 9263 9264 // Reload SP 9265 if (PVT == MVT::i64) { 9266 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 9267 .addImm(SPOffset) 9268 .addReg(BufReg); 9269 } else { 9270 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 9271 .addImm(SPOffset) 9272 .addReg(BufReg); 9273 } 9274 MIB.setMemRefs(MMOBegin, MMOEnd); 9275 9276 // Reload BP 9277 if (PVT == MVT::i64) { 9278 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 9279 .addImm(BPOffset) 9280 .addReg(BufReg); 9281 } else { 9282 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 9283 .addImm(BPOffset) 9284 .addReg(BufReg); 9285 } 9286 MIB.setMemRefs(MMOBegin, MMOEnd); 9287 9288 // Reload TOC 9289 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 9290 setUsesTOCBasePtr(*MBB->getParent()); 9291 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 9292 .addImm(TOCOffset) 9293 .addReg(BufReg); 9294 9295 MIB.setMemRefs(MMOBegin, MMOEnd); 9296 } 9297 9298 // Jump 9299 BuildMI(*MBB, MI, DL, 9300 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 9301 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 9302 9303 MI.eraseFromParent(); 9304 return MBB; 9305 } 9306 9307 MachineBasicBlock * 9308 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 9309 MachineBasicBlock *BB) const { 9310 if (MI.getOpcode() == TargetOpcode::STACKMAP || 9311 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 9312 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 9313 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 9314 // Call lowering should have added an r2 operand to indicate a dependence 9315 // on the TOC base pointer value. It can't however, because there is no 9316 // way to mark the dependence as implicit there, and so the stackmap code 9317 // will confuse it with a regular operand. Instead, add the dependence 9318 // here. 9319 setUsesTOCBasePtr(*BB->getParent()); 9320 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 9321 } 9322 9323 return emitPatchPoint(MI, BB); 9324 } 9325 9326 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 9327 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 9328 return emitEHSjLjSetJmp(MI, BB); 9329 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 9330 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 9331 return emitEHSjLjLongJmp(MI, BB); 9332 } 9333 9334 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 9335 9336 // To "insert" these instructions we actually have to insert their 9337 // control-flow patterns. 9338 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9339 MachineFunction::iterator It = ++BB->getIterator(); 9340 9341 MachineFunction *F = BB->getParent(); 9342 9343 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 9344 MI.getOpcode() == PPC::SELECT_CC_I8 || 9345 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) { 9346 SmallVector<MachineOperand, 2> Cond; 9347 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 9348 MI.getOpcode() == PPC::SELECT_CC_I8) 9349 Cond.push_back(MI.getOperand(4)); 9350 else 9351 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 9352 Cond.push_back(MI.getOperand(1)); 9353 9354 DebugLoc dl = MI.getDebugLoc(); 9355 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 9356 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 9357 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 9358 MI.getOpcode() == PPC::SELECT_CC_I8 || 9359 MI.getOpcode() == PPC::SELECT_CC_F4 || 9360 MI.getOpcode() == PPC::SELECT_CC_F8 || 9361 MI.getOpcode() == PPC::SELECT_CC_QFRC || 9362 MI.getOpcode() == PPC::SELECT_CC_QSRC || 9363 MI.getOpcode() == PPC::SELECT_CC_QBRC || 9364 MI.getOpcode() == PPC::SELECT_CC_VRRC || 9365 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 9366 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 9367 MI.getOpcode() == PPC::SELECT_CC_VSRC || 9368 MI.getOpcode() == PPC::SELECT_I4 || 9369 MI.getOpcode() == PPC::SELECT_I8 || 9370 MI.getOpcode() == PPC::SELECT_F4 || 9371 MI.getOpcode() == PPC::SELECT_F8 || 9372 MI.getOpcode() == PPC::SELECT_QFRC || 9373 MI.getOpcode() == PPC::SELECT_QSRC || 9374 MI.getOpcode() == PPC::SELECT_QBRC || 9375 MI.getOpcode() == PPC::SELECT_VRRC || 9376 MI.getOpcode() == PPC::SELECT_VSFRC || 9377 MI.getOpcode() == PPC::SELECT_VSSRC || 9378 MI.getOpcode() == PPC::SELECT_VSRC) { 9379 // The incoming instruction knows the destination vreg to set, the 9380 // condition code register to branch on, the true/false values to 9381 // select between, and a branch opcode to use. 9382 9383 // thisMBB: 9384 // ... 9385 // TrueVal = ... 9386 // cmpTY ccX, r1, r2 9387 // bCC copy1MBB 9388 // fallthrough --> copy0MBB 9389 MachineBasicBlock *thisMBB = BB; 9390 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 9391 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9392 DebugLoc dl = MI.getDebugLoc(); 9393 F->insert(It, copy0MBB); 9394 F->insert(It, sinkMBB); 9395 9396 // Transfer the remainder of BB and its successor edges to sinkMBB. 9397 sinkMBB->splice(sinkMBB->begin(), BB, 9398 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9399 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9400 9401 // Next, add the true and fallthrough blocks as its successors. 9402 BB->addSuccessor(copy0MBB); 9403 BB->addSuccessor(sinkMBB); 9404 9405 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 9406 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 9407 MI.getOpcode() == PPC::SELECT_QFRC || 9408 MI.getOpcode() == PPC::SELECT_QSRC || 9409 MI.getOpcode() == PPC::SELECT_QBRC || 9410 MI.getOpcode() == PPC::SELECT_VRRC || 9411 MI.getOpcode() == PPC::SELECT_VSFRC || 9412 MI.getOpcode() == PPC::SELECT_VSSRC || 9413 MI.getOpcode() == PPC::SELECT_VSRC) { 9414 BuildMI(BB, dl, TII->get(PPC::BC)) 9415 .addReg(MI.getOperand(1).getReg()) 9416 .addMBB(sinkMBB); 9417 } else { 9418 unsigned SelectPred = MI.getOperand(4).getImm(); 9419 BuildMI(BB, dl, TII->get(PPC::BCC)) 9420 .addImm(SelectPred) 9421 .addReg(MI.getOperand(1).getReg()) 9422 .addMBB(sinkMBB); 9423 } 9424 9425 // copy0MBB: 9426 // %FalseValue = ... 9427 // # fallthrough to sinkMBB 9428 BB = copy0MBB; 9429 9430 // Update machine-CFG edges 9431 BB->addSuccessor(sinkMBB); 9432 9433 // sinkMBB: 9434 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 9435 // ... 9436 BB = sinkMBB; 9437 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 9438 .addReg(MI.getOperand(3).getReg()) 9439 .addMBB(copy0MBB) 9440 .addReg(MI.getOperand(2).getReg()) 9441 .addMBB(thisMBB); 9442 } else if (MI.getOpcode() == PPC::ReadTB) { 9443 // To read the 64-bit time-base register on a 32-bit target, we read the 9444 // two halves. Should the counter have wrapped while it was being read, we 9445 // need to try again. 9446 // ... 9447 // readLoop: 9448 // mfspr Rx,TBU # load from TBU 9449 // mfspr Ry,TB # load from TB 9450 // mfspr Rz,TBU # load from TBU 9451 // cmpw crX,Rx,Rz # check if 'old'='new' 9452 // bne readLoop # branch if they're not equal 9453 // ... 9454 9455 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 9456 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9457 DebugLoc dl = MI.getDebugLoc(); 9458 F->insert(It, readMBB); 9459 F->insert(It, sinkMBB); 9460 9461 // Transfer the remainder of BB and its successor edges to sinkMBB. 9462 sinkMBB->splice(sinkMBB->begin(), BB, 9463 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9464 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9465 9466 BB->addSuccessor(readMBB); 9467 BB = readMBB; 9468 9469 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9470 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9471 unsigned LoReg = MI.getOperand(0).getReg(); 9472 unsigned HiReg = MI.getOperand(1).getReg(); 9473 9474 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 9475 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 9476 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 9477 9478 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9479 9480 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 9481 .addReg(HiReg).addReg(ReadAgainReg); 9482 BuildMI(BB, dl, TII->get(PPC::BCC)) 9483 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 9484 9485 BB->addSuccessor(readMBB); 9486 BB->addSuccessor(sinkMBB); 9487 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 9488 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 9489 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 9490 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 9491 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 9492 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 9493 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 9494 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 9495 9496 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 9497 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 9498 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 9499 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 9500 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 9501 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 9502 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 9503 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 9504 9505 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 9506 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 9507 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 9508 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 9509 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 9510 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 9511 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 9512 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 9513 9514 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 9515 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 9516 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 9517 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 9518 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 9519 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 9520 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 9521 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 9522 9523 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 9524 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 9525 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 9526 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 9527 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 9528 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 9529 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 9530 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 9531 9532 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 9533 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 9534 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 9535 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 9536 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 9537 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 9538 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 9539 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 9540 9541 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 9542 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 9543 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 9544 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 9545 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 9546 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 9547 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 9548 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 9549 9550 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 9551 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 9552 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 9553 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 9554 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 9555 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 9556 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 9557 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 9558 9559 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 9560 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 9561 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 9562 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 9563 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 9564 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 9565 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 9566 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 9567 9568 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 9569 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 9570 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 9571 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 9572 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 9573 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 9574 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 9575 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 9576 9577 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 9578 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 9579 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 9580 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 9581 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 9582 BB = EmitAtomicBinary(MI, BB, 4, 0); 9583 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 9584 BB = EmitAtomicBinary(MI, BB, 8, 0); 9585 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 9586 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 9587 (Subtarget.hasPartwordAtomics() && 9588 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 9589 (Subtarget.hasPartwordAtomics() && 9590 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 9591 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 9592 9593 auto LoadMnemonic = PPC::LDARX; 9594 auto StoreMnemonic = PPC::STDCX; 9595 switch (MI.getOpcode()) { 9596 default: 9597 llvm_unreachable("Compare and swap of unknown size"); 9598 case PPC::ATOMIC_CMP_SWAP_I8: 9599 LoadMnemonic = PPC::LBARX; 9600 StoreMnemonic = PPC::STBCX; 9601 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9602 break; 9603 case PPC::ATOMIC_CMP_SWAP_I16: 9604 LoadMnemonic = PPC::LHARX; 9605 StoreMnemonic = PPC::STHCX; 9606 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9607 break; 9608 case PPC::ATOMIC_CMP_SWAP_I32: 9609 LoadMnemonic = PPC::LWARX; 9610 StoreMnemonic = PPC::STWCX; 9611 break; 9612 case PPC::ATOMIC_CMP_SWAP_I64: 9613 LoadMnemonic = PPC::LDARX; 9614 StoreMnemonic = PPC::STDCX; 9615 break; 9616 } 9617 unsigned dest = MI.getOperand(0).getReg(); 9618 unsigned ptrA = MI.getOperand(1).getReg(); 9619 unsigned ptrB = MI.getOperand(2).getReg(); 9620 unsigned oldval = MI.getOperand(3).getReg(); 9621 unsigned newval = MI.getOperand(4).getReg(); 9622 DebugLoc dl = MI.getDebugLoc(); 9623 9624 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9625 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9626 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9627 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9628 F->insert(It, loop1MBB); 9629 F->insert(It, loop2MBB); 9630 F->insert(It, midMBB); 9631 F->insert(It, exitMBB); 9632 exitMBB->splice(exitMBB->begin(), BB, 9633 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9634 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9635 9636 // thisMBB: 9637 // ... 9638 // fallthrough --> loopMBB 9639 BB->addSuccessor(loop1MBB); 9640 9641 // loop1MBB: 9642 // l[bhwd]arx dest, ptr 9643 // cmp[wd] dest, oldval 9644 // bne- midMBB 9645 // loop2MBB: 9646 // st[bhwd]cx. newval, ptr 9647 // bne- loopMBB 9648 // b exitBB 9649 // midMBB: 9650 // st[bhwd]cx. dest, ptr 9651 // exitBB: 9652 BB = loop1MBB; 9653 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9654 .addReg(ptrA).addReg(ptrB); 9655 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 9656 .addReg(oldval).addReg(dest); 9657 BuildMI(BB, dl, TII->get(PPC::BCC)) 9658 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9659 BB->addSuccessor(loop2MBB); 9660 BB->addSuccessor(midMBB); 9661 9662 BB = loop2MBB; 9663 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9664 .addReg(newval).addReg(ptrA).addReg(ptrB); 9665 BuildMI(BB, dl, TII->get(PPC::BCC)) 9666 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9667 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9668 BB->addSuccessor(loop1MBB); 9669 BB->addSuccessor(exitMBB); 9670 9671 BB = midMBB; 9672 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9673 .addReg(dest).addReg(ptrA).addReg(ptrB); 9674 BB->addSuccessor(exitMBB); 9675 9676 // exitMBB: 9677 // ... 9678 BB = exitMBB; 9679 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 9680 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 9681 // We must use 64-bit registers for addresses when targeting 64-bit, 9682 // since we're actually doing arithmetic on them. Other registers 9683 // can be 32-bit. 9684 bool is64bit = Subtarget.isPPC64(); 9685 bool isLittleEndian = Subtarget.isLittleEndian(); 9686 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 9687 9688 unsigned dest = MI.getOperand(0).getReg(); 9689 unsigned ptrA = MI.getOperand(1).getReg(); 9690 unsigned ptrB = MI.getOperand(2).getReg(); 9691 unsigned oldval = MI.getOperand(3).getReg(); 9692 unsigned newval = MI.getOperand(4).getReg(); 9693 DebugLoc dl = MI.getDebugLoc(); 9694 9695 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9696 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9697 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9698 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9699 F->insert(It, loop1MBB); 9700 F->insert(It, loop2MBB); 9701 F->insert(It, midMBB); 9702 F->insert(It, exitMBB); 9703 exitMBB->splice(exitMBB->begin(), BB, 9704 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9705 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9706 9707 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9708 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9709 : &PPC::GPRCRegClass; 9710 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9711 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9712 unsigned ShiftReg = 9713 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC); 9714 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 9715 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 9716 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 9717 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 9718 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9719 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9720 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9721 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9722 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9723 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9724 unsigned Ptr1Reg; 9725 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 9726 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9727 // thisMBB: 9728 // ... 9729 // fallthrough --> loopMBB 9730 BB->addSuccessor(loop1MBB); 9731 9732 // The 4-byte load must be aligned, while a char or short may be 9733 // anywhere in the word. Hence all this nasty bookkeeping code. 9734 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9735 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9736 // xori shift, shift1, 24 [16] 9737 // rlwinm ptr, ptr1, 0, 0, 29 9738 // slw newval2, newval, shift 9739 // slw oldval2, oldval,shift 9740 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9741 // slw mask, mask2, shift 9742 // and newval3, newval2, mask 9743 // and oldval3, oldval2, mask 9744 // loop1MBB: 9745 // lwarx tmpDest, ptr 9746 // and tmp, tmpDest, mask 9747 // cmpw tmp, oldval3 9748 // bne- midMBB 9749 // loop2MBB: 9750 // andc tmp2, tmpDest, mask 9751 // or tmp4, tmp2, newval3 9752 // stwcx. tmp4, ptr 9753 // bne- loop1MBB 9754 // b exitBB 9755 // midMBB: 9756 // stwcx. tmpDest, ptr 9757 // exitBB: 9758 // srw dest, tmpDest, shift 9759 if (ptrA != ZeroReg) { 9760 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9761 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9762 .addReg(ptrA).addReg(ptrB); 9763 } else { 9764 Ptr1Reg = ptrB; 9765 } 9766 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9767 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9768 if (!isLittleEndian) 9769 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9770 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9771 if (is64bit) 9772 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9773 .addReg(Ptr1Reg).addImm(0).addImm(61); 9774 else 9775 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9776 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9777 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 9778 .addReg(newval).addReg(ShiftReg); 9779 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 9780 .addReg(oldval).addReg(ShiftReg); 9781 if (is8bit) 9782 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9783 else { 9784 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9785 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 9786 .addReg(Mask3Reg).addImm(65535); 9787 } 9788 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9789 .addReg(Mask2Reg).addReg(ShiftReg); 9790 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 9791 .addReg(NewVal2Reg).addReg(MaskReg); 9792 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 9793 .addReg(OldVal2Reg).addReg(MaskReg); 9794 9795 BB = loop1MBB; 9796 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9797 .addReg(ZeroReg).addReg(PtrReg); 9798 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 9799 .addReg(TmpDestReg).addReg(MaskReg); 9800 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 9801 .addReg(TmpReg).addReg(OldVal3Reg); 9802 BuildMI(BB, dl, TII->get(PPC::BCC)) 9803 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9804 BB->addSuccessor(loop2MBB); 9805 BB->addSuccessor(midMBB); 9806 9807 BB = loop2MBB; 9808 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 9809 .addReg(TmpDestReg).addReg(MaskReg); 9810 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 9811 .addReg(Tmp2Reg).addReg(NewVal3Reg); 9812 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 9813 .addReg(ZeroReg).addReg(PtrReg); 9814 BuildMI(BB, dl, TII->get(PPC::BCC)) 9815 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9816 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9817 BB->addSuccessor(loop1MBB); 9818 BB->addSuccessor(exitMBB); 9819 9820 BB = midMBB; 9821 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 9822 .addReg(ZeroReg).addReg(PtrReg); 9823 BB->addSuccessor(exitMBB); 9824 9825 // exitMBB: 9826 // ... 9827 BB = exitMBB; 9828 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 9829 .addReg(ShiftReg); 9830 } else if (MI.getOpcode() == PPC::FADDrtz) { 9831 // This pseudo performs an FADD with rounding mode temporarily forced 9832 // to round-to-zero. We emit this via custom inserter since the FPSCR 9833 // is not modeled at the SelectionDAG level. 9834 unsigned Dest = MI.getOperand(0).getReg(); 9835 unsigned Src1 = MI.getOperand(1).getReg(); 9836 unsigned Src2 = MI.getOperand(2).getReg(); 9837 DebugLoc dl = MI.getDebugLoc(); 9838 9839 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9840 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 9841 9842 // Save FPSCR value. 9843 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 9844 9845 // Set rounding mode to round-to-zero. 9846 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 9847 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 9848 9849 // Perform addition. 9850 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 9851 9852 // Restore FPSCR value. 9853 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 9854 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 9855 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 9856 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9857 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 9858 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9859 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 9860 ? PPC::ANDIo8 9861 : PPC::ANDIo; 9862 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 9863 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 9864 9865 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9866 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 9867 &PPC::GPRCRegClass : 9868 &PPC::G8RCRegClass); 9869 9870 DebugLoc dl = MI.getDebugLoc(); 9871 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 9872 .addReg(MI.getOperand(1).getReg()) 9873 .addImm(1); 9874 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 9875 MI.getOperand(0).getReg()) 9876 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 9877 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 9878 DebugLoc Dl = MI.getDebugLoc(); 9879 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9880 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9881 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 9882 return BB; 9883 } else { 9884 llvm_unreachable("Unexpected instr type to insert"); 9885 } 9886 9887 MI.eraseFromParent(); // The pseudo instruction is gone now. 9888 return BB; 9889 } 9890 9891 //===----------------------------------------------------------------------===// 9892 // Target Optimization Hooks 9893 //===----------------------------------------------------------------------===// 9894 9895 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 9896 // For the estimates, convergence is quadratic, so we essentially double the 9897 // number of digits correct after every iteration. For both FRE and FRSQRTE, 9898 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 9899 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 9900 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 9901 if (VT.getScalarType() == MVT::f64) 9902 RefinementSteps++; 9903 return RefinementSteps; 9904 } 9905 9906 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 9907 int Enabled, int &RefinementSteps, 9908 bool &UseOneConstNR, 9909 bool Reciprocal) const { 9910 EVT VT = Operand.getValueType(); 9911 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 9912 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 9913 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9914 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9915 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9916 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9917 if (RefinementSteps == ReciprocalEstimate::Unspecified) 9918 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 9919 9920 UseOneConstNR = true; 9921 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 9922 } 9923 return SDValue(); 9924 } 9925 9926 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 9927 int Enabled, 9928 int &RefinementSteps) const { 9929 EVT VT = Operand.getValueType(); 9930 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 9931 (VT == MVT::f64 && Subtarget.hasFRE()) || 9932 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9933 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9934 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9935 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9936 if (RefinementSteps == ReciprocalEstimate::Unspecified) 9937 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 9938 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 9939 } 9940 return SDValue(); 9941 } 9942 9943 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 9944 // Note: This functionality is used only when unsafe-fp-math is enabled, and 9945 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 9946 // enabled for division), this functionality is redundant with the default 9947 // combiner logic (once the division -> reciprocal/multiply transformation 9948 // has taken place). As a result, this matters more for older cores than for 9949 // newer ones. 9950 9951 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9952 // reciprocal if there are two or more FDIVs (for embedded cores with only 9953 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 9954 switch (Subtarget.getDarwinDirective()) { 9955 default: 9956 return 3; 9957 case PPC::DIR_440: 9958 case PPC::DIR_A2: 9959 case PPC::DIR_E500mc: 9960 case PPC::DIR_E5500: 9961 return 2; 9962 } 9963 } 9964 9965 // isConsecutiveLSLoc needs to work even if all adds have not yet been 9966 // collapsed, and so we need to look through chains of them. 9967 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 9968 int64_t& Offset, SelectionDAG &DAG) { 9969 if (DAG.isBaseWithConstantOffset(Loc)) { 9970 Base = Loc.getOperand(0); 9971 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 9972 9973 // The base might itself be a base plus an offset, and if so, accumulate 9974 // that as well. 9975 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 9976 } 9977 } 9978 9979 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 9980 unsigned Bytes, int Dist, 9981 SelectionDAG &DAG) { 9982 if (VT.getSizeInBits() / 8 != Bytes) 9983 return false; 9984 9985 SDValue BaseLoc = Base->getBasePtr(); 9986 if (Loc.getOpcode() == ISD::FrameIndex) { 9987 if (BaseLoc.getOpcode() != ISD::FrameIndex) 9988 return false; 9989 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9990 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 9991 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 9992 int FS = MFI.getObjectSize(FI); 9993 int BFS = MFI.getObjectSize(BFI); 9994 if (FS != BFS || FS != (int)Bytes) return false; 9995 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 9996 } 9997 9998 SDValue Base1 = Loc, Base2 = BaseLoc; 9999 int64_t Offset1 = 0, Offset2 = 0; 10000 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 10001 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 10002 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 10003 return true; 10004 10005 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10006 const GlobalValue *GV1 = nullptr; 10007 const GlobalValue *GV2 = nullptr; 10008 Offset1 = 0; 10009 Offset2 = 0; 10010 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 10011 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 10012 if (isGA1 && isGA2 && GV1 == GV2) 10013 return Offset1 == (Offset2 + Dist*Bytes); 10014 return false; 10015 } 10016 10017 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 10018 // not enforce equality of the chain operands. 10019 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 10020 unsigned Bytes, int Dist, 10021 SelectionDAG &DAG) { 10022 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 10023 EVT VT = LS->getMemoryVT(); 10024 SDValue Loc = LS->getBasePtr(); 10025 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 10026 } 10027 10028 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 10029 EVT VT; 10030 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10031 default: return false; 10032 case Intrinsic::ppc_qpx_qvlfd: 10033 case Intrinsic::ppc_qpx_qvlfda: 10034 VT = MVT::v4f64; 10035 break; 10036 case Intrinsic::ppc_qpx_qvlfs: 10037 case Intrinsic::ppc_qpx_qvlfsa: 10038 VT = MVT::v4f32; 10039 break; 10040 case Intrinsic::ppc_qpx_qvlfcd: 10041 case Intrinsic::ppc_qpx_qvlfcda: 10042 VT = MVT::v2f64; 10043 break; 10044 case Intrinsic::ppc_qpx_qvlfcs: 10045 case Intrinsic::ppc_qpx_qvlfcsa: 10046 VT = MVT::v2f32; 10047 break; 10048 case Intrinsic::ppc_qpx_qvlfiwa: 10049 case Intrinsic::ppc_qpx_qvlfiwz: 10050 case Intrinsic::ppc_altivec_lvx: 10051 case Intrinsic::ppc_altivec_lvxl: 10052 case Intrinsic::ppc_vsx_lxvw4x: 10053 case Intrinsic::ppc_vsx_lxvw4x_be: 10054 VT = MVT::v4i32; 10055 break; 10056 case Intrinsic::ppc_vsx_lxvd2x: 10057 case Intrinsic::ppc_vsx_lxvd2x_be: 10058 VT = MVT::v2f64; 10059 break; 10060 case Intrinsic::ppc_altivec_lvebx: 10061 VT = MVT::i8; 10062 break; 10063 case Intrinsic::ppc_altivec_lvehx: 10064 VT = MVT::i16; 10065 break; 10066 case Intrinsic::ppc_altivec_lvewx: 10067 VT = MVT::i32; 10068 break; 10069 } 10070 10071 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 10072 } 10073 10074 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 10075 EVT VT; 10076 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10077 default: return false; 10078 case Intrinsic::ppc_qpx_qvstfd: 10079 case Intrinsic::ppc_qpx_qvstfda: 10080 VT = MVT::v4f64; 10081 break; 10082 case Intrinsic::ppc_qpx_qvstfs: 10083 case Intrinsic::ppc_qpx_qvstfsa: 10084 VT = MVT::v4f32; 10085 break; 10086 case Intrinsic::ppc_qpx_qvstfcd: 10087 case Intrinsic::ppc_qpx_qvstfcda: 10088 VT = MVT::v2f64; 10089 break; 10090 case Intrinsic::ppc_qpx_qvstfcs: 10091 case Intrinsic::ppc_qpx_qvstfcsa: 10092 VT = MVT::v2f32; 10093 break; 10094 case Intrinsic::ppc_qpx_qvstfiw: 10095 case Intrinsic::ppc_qpx_qvstfiwa: 10096 case Intrinsic::ppc_altivec_stvx: 10097 case Intrinsic::ppc_altivec_stvxl: 10098 case Intrinsic::ppc_vsx_stxvw4x: 10099 VT = MVT::v4i32; 10100 break; 10101 case Intrinsic::ppc_vsx_stxvd2x: 10102 VT = MVT::v2f64; 10103 break; 10104 case Intrinsic::ppc_vsx_stxvw4x_be: 10105 VT = MVT::v4i32; 10106 break; 10107 case Intrinsic::ppc_vsx_stxvd2x_be: 10108 VT = MVT::v2f64; 10109 break; 10110 case Intrinsic::ppc_altivec_stvebx: 10111 VT = MVT::i8; 10112 break; 10113 case Intrinsic::ppc_altivec_stvehx: 10114 VT = MVT::i16; 10115 break; 10116 case Intrinsic::ppc_altivec_stvewx: 10117 VT = MVT::i32; 10118 break; 10119 } 10120 10121 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 10122 } 10123 10124 return false; 10125 } 10126 10127 // Return true is there is a nearyby consecutive load to the one provided 10128 // (regardless of alignment). We search up and down the chain, looking though 10129 // token factors and other loads (but nothing else). As a result, a true result 10130 // indicates that it is safe to create a new consecutive load adjacent to the 10131 // load provided. 10132 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 10133 SDValue Chain = LD->getChain(); 10134 EVT VT = LD->getMemoryVT(); 10135 10136 SmallSet<SDNode *, 16> LoadRoots; 10137 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 10138 SmallSet<SDNode *, 16> Visited; 10139 10140 // First, search up the chain, branching to follow all token-factor operands. 10141 // If we find a consecutive load, then we're done, otherwise, record all 10142 // nodes just above the top-level loads and token factors. 10143 while (!Queue.empty()) { 10144 SDNode *ChainNext = Queue.pop_back_val(); 10145 if (!Visited.insert(ChainNext).second) 10146 continue; 10147 10148 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 10149 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10150 return true; 10151 10152 if (!Visited.count(ChainLD->getChain().getNode())) 10153 Queue.push_back(ChainLD->getChain().getNode()); 10154 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 10155 for (const SDUse &O : ChainNext->ops()) 10156 if (!Visited.count(O.getNode())) 10157 Queue.push_back(O.getNode()); 10158 } else 10159 LoadRoots.insert(ChainNext); 10160 } 10161 10162 // Second, search down the chain, starting from the top-level nodes recorded 10163 // in the first phase. These top-level nodes are the nodes just above all 10164 // loads and token factors. Starting with their uses, recursively look though 10165 // all loads (just the chain uses) and token factors to find a consecutive 10166 // load. 10167 Visited.clear(); 10168 Queue.clear(); 10169 10170 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 10171 IE = LoadRoots.end(); I != IE; ++I) { 10172 Queue.push_back(*I); 10173 10174 while (!Queue.empty()) { 10175 SDNode *LoadRoot = Queue.pop_back_val(); 10176 if (!Visited.insert(LoadRoot).second) 10177 continue; 10178 10179 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 10180 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 10181 return true; 10182 10183 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 10184 UE = LoadRoot->use_end(); UI != UE; ++UI) 10185 if (((isa<MemSDNode>(*UI) && 10186 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 10187 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 10188 Queue.push_back(*UI); 10189 } 10190 } 10191 10192 return false; 10193 } 10194 10195 /// This function is called when we have proved that a SETCC node can be replaced 10196 /// by subtraction (and other supporting instructions) so that the result of 10197 /// comparison is kept in a GPR instead of CR. This function is purely for 10198 /// codegen purposes and has some flags to guide the codegen process. 10199 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 10200 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 10201 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10202 10203 // Zero extend the operands to the largest legal integer. Originally, they 10204 // must be of a strictly smaller size. 10205 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 10206 DAG.getConstant(Size, DL, MVT::i32)); 10207 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 10208 DAG.getConstant(Size, DL, MVT::i32)); 10209 10210 // Swap if needed. Depends on the condition code. 10211 if (Swap) 10212 std::swap(Op0, Op1); 10213 10214 // Subtract extended integers. 10215 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 10216 10217 // Move the sign bit to the least significant position and zero out the rest. 10218 // Now the least significant bit carries the result of original comparison. 10219 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 10220 DAG.getConstant(Size - 1, DL, MVT::i32)); 10221 auto Final = Shifted; 10222 10223 // Complement the result if needed. Based on the condition code. 10224 if (Complement) 10225 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 10226 DAG.getConstant(1, DL, MVT::i64)); 10227 10228 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 10229 } 10230 10231 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 10232 DAGCombinerInfo &DCI) const { 10233 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 10234 10235 SelectionDAG &DAG = DCI.DAG; 10236 SDLoc DL(N); 10237 10238 // Size of integers being compared has a critical role in the following 10239 // analysis, so we prefer to do this when all types are legal. 10240 if (!DCI.isAfterLegalizeVectorOps()) 10241 return SDValue(); 10242 10243 // If all users of SETCC extend its value to a legal integer type 10244 // then we replace SETCC with a subtraction 10245 for (SDNode::use_iterator UI = N->use_begin(), 10246 UE = N->use_end(); UI != UE; ++UI) { 10247 if (UI->getOpcode() != ISD::ZERO_EXTEND) 10248 return SDValue(); 10249 } 10250 10251 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 10252 auto OpSize = N->getOperand(0).getValueSizeInBits(); 10253 10254 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 10255 10256 if (OpSize < Size) { 10257 switch (CC) { 10258 default: break; 10259 case ISD::SETULT: 10260 return generateEquivalentSub(N, Size, false, false, DL, DAG); 10261 case ISD::SETULE: 10262 return generateEquivalentSub(N, Size, true, true, DL, DAG); 10263 case ISD::SETUGT: 10264 return generateEquivalentSub(N, Size, false, true, DL, DAG); 10265 case ISD::SETUGE: 10266 return generateEquivalentSub(N, Size, true, false, DL, DAG); 10267 } 10268 } 10269 10270 return SDValue(); 10271 } 10272 10273 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 10274 DAGCombinerInfo &DCI) const { 10275 SelectionDAG &DAG = DCI.DAG; 10276 SDLoc dl(N); 10277 10278 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 10279 // If we're tracking CR bits, we need to be careful that we don't have: 10280 // trunc(binary-ops(zext(x), zext(y))) 10281 // or 10282 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 10283 // such that we're unnecessarily moving things into GPRs when it would be 10284 // better to keep them in CR bits. 10285 10286 // Note that trunc here can be an actual i1 trunc, or can be the effective 10287 // truncation that comes from a setcc or select_cc. 10288 if (N->getOpcode() == ISD::TRUNCATE && 10289 N->getValueType(0) != MVT::i1) 10290 return SDValue(); 10291 10292 if (N->getOperand(0).getValueType() != MVT::i32 && 10293 N->getOperand(0).getValueType() != MVT::i64) 10294 return SDValue(); 10295 10296 if (N->getOpcode() == ISD::SETCC || 10297 N->getOpcode() == ISD::SELECT_CC) { 10298 // If we're looking at a comparison, then we need to make sure that the 10299 // high bits (all except for the first) don't matter the result. 10300 ISD::CondCode CC = 10301 cast<CondCodeSDNode>(N->getOperand( 10302 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 10303 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 10304 10305 if (ISD::isSignedIntSetCC(CC)) { 10306 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 10307 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 10308 return SDValue(); 10309 } else if (ISD::isUnsignedIntSetCC(CC)) { 10310 if (!DAG.MaskedValueIsZero(N->getOperand(0), 10311 APInt::getHighBitsSet(OpBits, OpBits-1)) || 10312 !DAG.MaskedValueIsZero(N->getOperand(1), 10313 APInt::getHighBitsSet(OpBits, OpBits-1))) 10314 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 10315 : SDValue()); 10316 } else { 10317 // This is neither a signed nor an unsigned comparison, just make sure 10318 // that the high bits are equal. 10319 KnownBits Op1Known, Op2Known; 10320 DAG.computeKnownBits(N->getOperand(0), Op1Known); 10321 DAG.computeKnownBits(N->getOperand(1), Op2Known); 10322 10323 // We don't really care about what is known about the first bit (if 10324 // anything), so clear it in all masks prior to comparing them. 10325 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 10326 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 10327 10328 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 10329 return SDValue(); 10330 } 10331 } 10332 10333 // We now know that the higher-order bits are irrelevant, we just need to 10334 // make sure that all of the intermediate operations are bit operations, and 10335 // all inputs are extensions. 10336 if (N->getOperand(0).getOpcode() != ISD::AND && 10337 N->getOperand(0).getOpcode() != ISD::OR && 10338 N->getOperand(0).getOpcode() != ISD::XOR && 10339 N->getOperand(0).getOpcode() != ISD::SELECT && 10340 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 10341 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 10342 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 10343 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 10344 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 10345 return SDValue(); 10346 10347 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 10348 N->getOperand(1).getOpcode() != ISD::AND && 10349 N->getOperand(1).getOpcode() != ISD::OR && 10350 N->getOperand(1).getOpcode() != ISD::XOR && 10351 N->getOperand(1).getOpcode() != ISD::SELECT && 10352 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 10353 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 10354 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 10355 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 10356 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 10357 return SDValue(); 10358 10359 SmallVector<SDValue, 4> Inputs; 10360 SmallVector<SDValue, 8> BinOps, PromOps; 10361 SmallPtrSet<SDNode *, 16> Visited; 10362 10363 for (unsigned i = 0; i < 2; ++i) { 10364 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 10365 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 10366 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 10367 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 10368 isa<ConstantSDNode>(N->getOperand(i))) 10369 Inputs.push_back(N->getOperand(i)); 10370 else 10371 BinOps.push_back(N->getOperand(i)); 10372 10373 if (N->getOpcode() == ISD::TRUNCATE) 10374 break; 10375 } 10376 10377 // Visit all inputs, collect all binary operations (and, or, xor and 10378 // select) that are all fed by extensions. 10379 while (!BinOps.empty()) { 10380 SDValue BinOp = BinOps.back(); 10381 BinOps.pop_back(); 10382 10383 if (!Visited.insert(BinOp.getNode()).second) 10384 continue; 10385 10386 PromOps.push_back(BinOp); 10387 10388 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 10389 // The condition of the select is not promoted. 10390 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 10391 continue; 10392 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 10393 continue; 10394 10395 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 10396 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 10397 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 10398 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 10399 isa<ConstantSDNode>(BinOp.getOperand(i))) { 10400 Inputs.push_back(BinOp.getOperand(i)); 10401 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 10402 BinOp.getOperand(i).getOpcode() == ISD::OR || 10403 BinOp.getOperand(i).getOpcode() == ISD::XOR || 10404 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 10405 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 10406 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 10407 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 10408 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 10409 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 10410 BinOps.push_back(BinOp.getOperand(i)); 10411 } else { 10412 // We have an input that is not an extension or another binary 10413 // operation; we'll abort this transformation. 10414 return SDValue(); 10415 } 10416 } 10417 } 10418 10419 // Make sure that this is a self-contained cluster of operations (which 10420 // is not quite the same thing as saying that everything has only one 10421 // use). 10422 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10423 if (isa<ConstantSDNode>(Inputs[i])) 10424 continue; 10425 10426 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 10427 UE = Inputs[i].getNode()->use_end(); 10428 UI != UE; ++UI) { 10429 SDNode *User = *UI; 10430 if (User != N && !Visited.count(User)) 10431 return SDValue(); 10432 10433 // Make sure that we're not going to promote the non-output-value 10434 // operand(s) or SELECT or SELECT_CC. 10435 // FIXME: Although we could sometimes handle this, and it does occur in 10436 // practice that one of the condition inputs to the select is also one of 10437 // the outputs, we currently can't deal with this. 10438 if (User->getOpcode() == ISD::SELECT) { 10439 if (User->getOperand(0) == Inputs[i]) 10440 return SDValue(); 10441 } else if (User->getOpcode() == ISD::SELECT_CC) { 10442 if (User->getOperand(0) == Inputs[i] || 10443 User->getOperand(1) == Inputs[i]) 10444 return SDValue(); 10445 } 10446 } 10447 } 10448 10449 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 10450 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 10451 UE = PromOps[i].getNode()->use_end(); 10452 UI != UE; ++UI) { 10453 SDNode *User = *UI; 10454 if (User != N && !Visited.count(User)) 10455 return SDValue(); 10456 10457 // Make sure that we're not going to promote the non-output-value 10458 // operand(s) or SELECT or SELECT_CC. 10459 // FIXME: Although we could sometimes handle this, and it does occur in 10460 // practice that one of the condition inputs to the select is also one of 10461 // the outputs, we currently can't deal with this. 10462 if (User->getOpcode() == ISD::SELECT) { 10463 if (User->getOperand(0) == PromOps[i]) 10464 return SDValue(); 10465 } else if (User->getOpcode() == ISD::SELECT_CC) { 10466 if (User->getOperand(0) == PromOps[i] || 10467 User->getOperand(1) == PromOps[i]) 10468 return SDValue(); 10469 } 10470 } 10471 } 10472 10473 // Replace all inputs with the extension operand. 10474 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10475 // Constants may have users outside the cluster of to-be-promoted nodes, 10476 // and so we need to replace those as we do the promotions. 10477 if (isa<ConstantSDNode>(Inputs[i])) 10478 continue; 10479 else 10480 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 10481 } 10482 10483 std::list<HandleSDNode> PromOpHandles; 10484 for (auto &PromOp : PromOps) 10485 PromOpHandles.emplace_back(PromOp); 10486 10487 // Replace all operations (these are all the same, but have a different 10488 // (i1) return type). DAG.getNode will validate that the types of 10489 // a binary operator match, so go through the list in reverse so that 10490 // we've likely promoted both operands first. Any intermediate truncations or 10491 // extensions disappear. 10492 while (!PromOpHandles.empty()) { 10493 SDValue PromOp = PromOpHandles.back().getValue(); 10494 PromOpHandles.pop_back(); 10495 10496 if (PromOp.getOpcode() == ISD::TRUNCATE || 10497 PromOp.getOpcode() == ISD::SIGN_EXTEND || 10498 PromOp.getOpcode() == ISD::ZERO_EXTEND || 10499 PromOp.getOpcode() == ISD::ANY_EXTEND) { 10500 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 10501 PromOp.getOperand(0).getValueType() != MVT::i1) { 10502 // The operand is not yet ready (see comment below). 10503 PromOpHandles.emplace_front(PromOp); 10504 continue; 10505 } 10506 10507 SDValue RepValue = PromOp.getOperand(0); 10508 if (isa<ConstantSDNode>(RepValue)) 10509 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 10510 10511 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 10512 continue; 10513 } 10514 10515 unsigned C; 10516 switch (PromOp.getOpcode()) { 10517 default: C = 0; break; 10518 case ISD::SELECT: C = 1; break; 10519 case ISD::SELECT_CC: C = 2; break; 10520 } 10521 10522 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 10523 PromOp.getOperand(C).getValueType() != MVT::i1) || 10524 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 10525 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 10526 // The to-be-promoted operands of this node have not yet been 10527 // promoted (this should be rare because we're going through the 10528 // list backward, but if one of the operands has several users in 10529 // this cluster of to-be-promoted nodes, it is possible). 10530 PromOpHandles.emplace_front(PromOp); 10531 continue; 10532 } 10533 10534 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 10535 PromOp.getNode()->op_end()); 10536 10537 // If there are any constant inputs, make sure they're replaced now. 10538 for (unsigned i = 0; i < 2; ++i) 10539 if (isa<ConstantSDNode>(Ops[C+i])) 10540 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 10541 10542 DAG.ReplaceAllUsesOfValueWith(PromOp, 10543 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 10544 } 10545 10546 // Now we're left with the initial truncation itself. 10547 if (N->getOpcode() == ISD::TRUNCATE) 10548 return N->getOperand(0); 10549 10550 // Otherwise, this is a comparison. The operands to be compared have just 10551 // changed type (to i1), but everything else is the same. 10552 return SDValue(N, 0); 10553 } 10554 10555 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 10556 DAGCombinerInfo &DCI) const { 10557 SelectionDAG &DAG = DCI.DAG; 10558 SDLoc dl(N); 10559 10560 // If we're tracking CR bits, we need to be careful that we don't have: 10561 // zext(binary-ops(trunc(x), trunc(y))) 10562 // or 10563 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 10564 // such that we're unnecessarily moving things into CR bits that can more 10565 // efficiently stay in GPRs. Note that if we're not certain that the high 10566 // bits are set as required by the final extension, we still may need to do 10567 // some masking to get the proper behavior. 10568 10569 // This same functionality is important on PPC64 when dealing with 10570 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 10571 // the return values of functions. Because it is so similar, it is handled 10572 // here as well. 10573 10574 if (N->getValueType(0) != MVT::i32 && 10575 N->getValueType(0) != MVT::i64) 10576 return SDValue(); 10577 10578 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 10579 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 10580 return SDValue(); 10581 10582 if (N->getOperand(0).getOpcode() != ISD::AND && 10583 N->getOperand(0).getOpcode() != ISD::OR && 10584 N->getOperand(0).getOpcode() != ISD::XOR && 10585 N->getOperand(0).getOpcode() != ISD::SELECT && 10586 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 10587 return SDValue(); 10588 10589 SmallVector<SDValue, 4> Inputs; 10590 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 10591 SmallPtrSet<SDNode *, 16> Visited; 10592 10593 // Visit all inputs, collect all binary operations (and, or, xor and 10594 // select) that are all fed by truncations. 10595 while (!BinOps.empty()) { 10596 SDValue BinOp = BinOps.back(); 10597 BinOps.pop_back(); 10598 10599 if (!Visited.insert(BinOp.getNode()).second) 10600 continue; 10601 10602 PromOps.push_back(BinOp); 10603 10604 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 10605 // The condition of the select is not promoted. 10606 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 10607 continue; 10608 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 10609 continue; 10610 10611 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 10612 isa<ConstantSDNode>(BinOp.getOperand(i))) { 10613 Inputs.push_back(BinOp.getOperand(i)); 10614 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 10615 BinOp.getOperand(i).getOpcode() == ISD::OR || 10616 BinOp.getOperand(i).getOpcode() == ISD::XOR || 10617 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 10618 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 10619 BinOps.push_back(BinOp.getOperand(i)); 10620 } else { 10621 // We have an input that is not a truncation or another binary 10622 // operation; we'll abort this transformation. 10623 return SDValue(); 10624 } 10625 } 10626 } 10627 10628 // The operands of a select that must be truncated when the select is 10629 // promoted because the operand is actually part of the to-be-promoted set. 10630 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 10631 10632 // Make sure that this is a self-contained cluster of operations (which 10633 // is not quite the same thing as saying that everything has only one 10634 // use). 10635 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10636 if (isa<ConstantSDNode>(Inputs[i])) 10637 continue; 10638 10639 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 10640 UE = Inputs[i].getNode()->use_end(); 10641 UI != UE; ++UI) { 10642 SDNode *User = *UI; 10643 if (User != N && !Visited.count(User)) 10644 return SDValue(); 10645 10646 // If we're going to promote the non-output-value operand(s) or SELECT or 10647 // SELECT_CC, record them for truncation. 10648 if (User->getOpcode() == ISD::SELECT) { 10649 if (User->getOperand(0) == Inputs[i]) 10650 SelectTruncOp[0].insert(std::make_pair(User, 10651 User->getOperand(0).getValueType())); 10652 } else if (User->getOpcode() == ISD::SELECT_CC) { 10653 if (User->getOperand(0) == Inputs[i]) 10654 SelectTruncOp[0].insert(std::make_pair(User, 10655 User->getOperand(0).getValueType())); 10656 if (User->getOperand(1) == Inputs[i]) 10657 SelectTruncOp[1].insert(std::make_pair(User, 10658 User->getOperand(1).getValueType())); 10659 } 10660 } 10661 } 10662 10663 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 10664 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 10665 UE = PromOps[i].getNode()->use_end(); 10666 UI != UE; ++UI) { 10667 SDNode *User = *UI; 10668 if (User != N && !Visited.count(User)) 10669 return SDValue(); 10670 10671 // If we're going to promote the non-output-value operand(s) or SELECT or 10672 // SELECT_CC, record them for truncation. 10673 if (User->getOpcode() == ISD::SELECT) { 10674 if (User->getOperand(0) == PromOps[i]) 10675 SelectTruncOp[0].insert(std::make_pair(User, 10676 User->getOperand(0).getValueType())); 10677 } else if (User->getOpcode() == ISD::SELECT_CC) { 10678 if (User->getOperand(0) == PromOps[i]) 10679 SelectTruncOp[0].insert(std::make_pair(User, 10680 User->getOperand(0).getValueType())); 10681 if (User->getOperand(1) == PromOps[i]) 10682 SelectTruncOp[1].insert(std::make_pair(User, 10683 User->getOperand(1).getValueType())); 10684 } 10685 } 10686 } 10687 10688 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 10689 bool ReallyNeedsExt = false; 10690 if (N->getOpcode() != ISD::ANY_EXTEND) { 10691 // If all of the inputs are not already sign/zero extended, then 10692 // we'll still need to do that at the end. 10693 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10694 if (isa<ConstantSDNode>(Inputs[i])) 10695 continue; 10696 10697 unsigned OpBits = 10698 Inputs[i].getOperand(0).getValueSizeInBits(); 10699 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 10700 10701 if ((N->getOpcode() == ISD::ZERO_EXTEND && 10702 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 10703 APInt::getHighBitsSet(OpBits, 10704 OpBits-PromBits))) || 10705 (N->getOpcode() == ISD::SIGN_EXTEND && 10706 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 10707 (OpBits-(PromBits-1)))) { 10708 ReallyNeedsExt = true; 10709 break; 10710 } 10711 } 10712 } 10713 10714 // Replace all inputs, either with the truncation operand, or a 10715 // truncation or extension to the final output type. 10716 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10717 // Constant inputs need to be replaced with the to-be-promoted nodes that 10718 // use them because they might have users outside of the cluster of 10719 // promoted nodes. 10720 if (isa<ConstantSDNode>(Inputs[i])) 10721 continue; 10722 10723 SDValue InSrc = Inputs[i].getOperand(0); 10724 if (Inputs[i].getValueType() == N->getValueType(0)) 10725 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 10726 else if (N->getOpcode() == ISD::SIGN_EXTEND) 10727 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10728 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 10729 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10730 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10731 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 10732 else 10733 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10734 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 10735 } 10736 10737 std::list<HandleSDNode> PromOpHandles; 10738 for (auto &PromOp : PromOps) 10739 PromOpHandles.emplace_back(PromOp); 10740 10741 // Replace all operations (these are all the same, but have a different 10742 // (promoted) return type). DAG.getNode will validate that the types of 10743 // a binary operator match, so go through the list in reverse so that 10744 // we've likely promoted both operands first. 10745 while (!PromOpHandles.empty()) { 10746 SDValue PromOp = PromOpHandles.back().getValue(); 10747 PromOpHandles.pop_back(); 10748 10749 unsigned C; 10750 switch (PromOp.getOpcode()) { 10751 default: C = 0; break; 10752 case ISD::SELECT: C = 1; break; 10753 case ISD::SELECT_CC: C = 2; break; 10754 } 10755 10756 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 10757 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 10758 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 10759 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 10760 // The to-be-promoted operands of this node have not yet been 10761 // promoted (this should be rare because we're going through the 10762 // list backward, but if one of the operands has several users in 10763 // this cluster of to-be-promoted nodes, it is possible). 10764 PromOpHandles.emplace_front(PromOp); 10765 continue; 10766 } 10767 10768 // For SELECT and SELECT_CC nodes, we do a similar check for any 10769 // to-be-promoted comparison inputs. 10770 if (PromOp.getOpcode() == ISD::SELECT || 10771 PromOp.getOpcode() == ISD::SELECT_CC) { 10772 if ((SelectTruncOp[0].count(PromOp.getNode()) && 10773 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 10774 (SelectTruncOp[1].count(PromOp.getNode()) && 10775 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 10776 PromOpHandles.emplace_front(PromOp); 10777 continue; 10778 } 10779 } 10780 10781 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 10782 PromOp.getNode()->op_end()); 10783 10784 // If this node has constant inputs, then they'll need to be promoted here. 10785 for (unsigned i = 0; i < 2; ++i) { 10786 if (!isa<ConstantSDNode>(Ops[C+i])) 10787 continue; 10788 if (Ops[C+i].getValueType() == N->getValueType(0)) 10789 continue; 10790 10791 if (N->getOpcode() == ISD::SIGN_EXTEND) 10792 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10793 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10794 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10795 else 10796 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10797 } 10798 10799 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 10800 // truncate them again to the original value type. 10801 if (PromOp.getOpcode() == ISD::SELECT || 10802 PromOp.getOpcode() == ISD::SELECT_CC) { 10803 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 10804 if (SI0 != SelectTruncOp[0].end()) 10805 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 10806 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 10807 if (SI1 != SelectTruncOp[1].end()) 10808 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 10809 } 10810 10811 DAG.ReplaceAllUsesOfValueWith(PromOp, 10812 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 10813 } 10814 10815 // Now we're left with the initial extension itself. 10816 if (!ReallyNeedsExt) 10817 return N->getOperand(0); 10818 10819 // To zero extend, just mask off everything except for the first bit (in the 10820 // i1 case). 10821 if (N->getOpcode() == ISD::ZERO_EXTEND) 10822 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 10823 DAG.getConstant(APInt::getLowBitsSet( 10824 N->getValueSizeInBits(0), PromBits), 10825 dl, N->getValueType(0))); 10826 10827 assert(N->getOpcode() == ISD::SIGN_EXTEND && 10828 "Invalid extension type"); 10829 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 10830 SDValue ShiftCst = 10831 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 10832 return DAG.getNode( 10833 ISD::SRA, dl, N->getValueType(0), 10834 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 10835 ShiftCst); 10836 } 10837 10838 /// \brief Reduces the number of fp-to-int conversion when building a vector. 10839 /// 10840 /// If this vector is built out of floating to integer conversions, 10841 /// transform it to a vector built out of floating point values followed by a 10842 /// single floating to integer conversion of the vector. 10843 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 10844 /// becomes (fptosi (build_vector ($A, $B, ...))) 10845 SDValue PPCTargetLowering:: 10846 combineElementTruncationToVectorTruncation(SDNode *N, 10847 DAGCombinerInfo &DCI) const { 10848 assert(N->getOpcode() == ISD::BUILD_VECTOR && 10849 "Should be called with a BUILD_VECTOR node"); 10850 10851 SelectionDAG &DAG = DCI.DAG; 10852 SDLoc dl(N); 10853 10854 SDValue FirstInput = N->getOperand(0); 10855 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 10856 "The input operand must be an fp-to-int conversion."); 10857 10858 // This combine happens after legalization so the fp_to_[su]i nodes are 10859 // already converted to PPCSISD nodes. 10860 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 10861 if (FirstConversion == PPCISD::FCTIDZ || 10862 FirstConversion == PPCISD::FCTIDUZ || 10863 FirstConversion == PPCISD::FCTIWZ || 10864 FirstConversion == PPCISD::FCTIWUZ) { 10865 bool IsSplat = true; 10866 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 10867 FirstConversion == PPCISD::FCTIWUZ; 10868 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 10869 SmallVector<SDValue, 4> Ops; 10870 EVT TargetVT = N->getValueType(0); 10871 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 10872 if (N->getOperand(i).getOpcode() != PPCISD::MFVSR) 10873 return SDValue(); 10874 unsigned NextConversion = N->getOperand(i).getOperand(0).getOpcode(); 10875 if (NextConversion != FirstConversion) 10876 return SDValue(); 10877 if (N->getOperand(i) != FirstInput) 10878 IsSplat = false; 10879 } 10880 10881 // If this is a splat, we leave it as-is since there will be only a single 10882 // fp-to-int conversion followed by a splat of the integer. This is better 10883 // for 32-bit and smaller ints and neutral for 64-bit ints. 10884 if (IsSplat) 10885 return SDValue(); 10886 10887 // Now that we know we have the right type of node, get its operands 10888 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 10889 SDValue In = N->getOperand(i).getOperand(0); 10890 // For 32-bit values, we need to add an FP_ROUND node. 10891 if (Is32Bit) { 10892 if (In.isUndef()) 10893 Ops.push_back(DAG.getUNDEF(SrcVT)); 10894 else { 10895 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 10896 MVT::f32, In.getOperand(0), 10897 DAG.getIntPtrConstant(1, dl)); 10898 Ops.push_back(Trunc); 10899 } 10900 } else 10901 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 10902 } 10903 10904 unsigned Opcode; 10905 if (FirstConversion == PPCISD::FCTIDZ || 10906 FirstConversion == PPCISD::FCTIWZ) 10907 Opcode = ISD::FP_TO_SINT; 10908 else 10909 Opcode = ISD::FP_TO_UINT; 10910 10911 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 10912 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 10913 return DAG.getNode(Opcode, dl, TargetVT, BV); 10914 } 10915 return SDValue(); 10916 } 10917 10918 /// \brief Reduce the number of loads when building a vector. 10919 /// 10920 /// Building a vector out of multiple loads can be converted to a load 10921 /// of the vector type if the loads are consecutive. If the loads are 10922 /// consecutive but in descending order, a shuffle is added at the end 10923 /// to reorder the vector. 10924 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 10925 assert(N->getOpcode() == ISD::BUILD_VECTOR && 10926 "Should be called with a BUILD_VECTOR node"); 10927 10928 SDLoc dl(N); 10929 bool InputsAreConsecutiveLoads = true; 10930 bool InputsAreReverseConsecutive = true; 10931 unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8; 10932 SDValue FirstInput = N->getOperand(0); 10933 bool IsRoundOfExtLoad = false; 10934 10935 if (FirstInput.getOpcode() == ISD::FP_ROUND && 10936 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 10937 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 10938 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 10939 } 10940 // Not a build vector of (possibly fp_rounded) loads. 10941 if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) 10942 return SDValue(); 10943 10944 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 10945 // If any inputs are fp_round(extload), they all must be. 10946 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 10947 return SDValue(); 10948 10949 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 10950 N->getOperand(i); 10951 if (NextInput.getOpcode() != ISD::LOAD) 10952 return SDValue(); 10953 10954 SDValue PreviousInput = 10955 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 10956 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 10957 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 10958 10959 // If any inputs are fp_round(extload), they all must be. 10960 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 10961 return SDValue(); 10962 10963 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 10964 InputsAreConsecutiveLoads = false; 10965 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 10966 InputsAreReverseConsecutive = false; 10967 10968 // Exit early if the loads are neither consecutive nor reverse consecutive. 10969 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 10970 return SDValue(); 10971 } 10972 10973 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 10974 "The loads cannot be both consecutive and reverse consecutive."); 10975 10976 SDValue FirstLoadOp = 10977 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 10978 SDValue LastLoadOp = 10979 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 10980 N->getOperand(N->getNumOperands()-1); 10981 10982 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 10983 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 10984 if (InputsAreConsecutiveLoads) { 10985 assert(LD1 && "Input needs to be a LoadSDNode."); 10986 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 10987 LD1->getBasePtr(), LD1->getPointerInfo(), 10988 LD1->getAlignment()); 10989 } 10990 if (InputsAreReverseConsecutive) { 10991 assert(LDL && "Input needs to be a LoadSDNode."); 10992 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 10993 LDL->getBasePtr(), LDL->getPointerInfo(), 10994 LDL->getAlignment()); 10995 SmallVector<int, 16> Ops; 10996 for (int i = N->getNumOperands() - 1; i >= 0; i--) 10997 Ops.push_back(i); 10998 10999 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 11000 DAG.getUNDEF(N->getValueType(0)), Ops); 11001 } 11002 return SDValue(); 11003 } 11004 11005 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 11006 DAGCombinerInfo &DCI) const { 11007 assert(N->getOpcode() == ISD::BUILD_VECTOR && 11008 "Should be called with a BUILD_VECTOR node"); 11009 11010 SelectionDAG &DAG = DCI.DAG; 11011 SDLoc dl(N); 11012 11013 if (!Subtarget.hasVSX()) 11014 return SDValue(); 11015 11016 // The target independent DAG combiner will leave a build_vector of 11017 // float-to-int conversions intact. We can generate MUCH better code for 11018 // a float-to-int conversion of a vector of floats. 11019 SDValue FirstInput = N->getOperand(0); 11020 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 11021 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 11022 if (Reduced) 11023 return Reduced; 11024 } 11025 11026 // If we're building a vector out of consecutive loads, just load that 11027 // vector type. 11028 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 11029 if (Reduced) 11030 return Reduced; 11031 11032 if (N->getValueType(0) != MVT::v2f64) 11033 return SDValue(); 11034 11035 // Looking for: 11036 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 11037 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 11038 FirstInput.getOpcode() != ISD::UINT_TO_FP) 11039 return SDValue(); 11040 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 11041 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 11042 return SDValue(); 11043 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 11044 return SDValue(); 11045 11046 SDValue Ext1 = FirstInput.getOperand(0); 11047 SDValue Ext2 = N->getOperand(1).getOperand(0); 11048 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 11049 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11050 return SDValue(); 11051 11052 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 11053 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 11054 if (!Ext1Op || !Ext2Op) 11055 return SDValue(); 11056 if (Ext1.getValueType() != MVT::i32 || 11057 Ext2.getValueType() != MVT::i32) 11058 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 11059 return SDValue(); 11060 11061 int FirstElem = Ext1Op->getZExtValue(); 11062 int SecondElem = Ext2Op->getZExtValue(); 11063 int SubvecIdx; 11064 if (FirstElem == 0 && SecondElem == 1) 11065 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 11066 else if (FirstElem == 2 && SecondElem == 3) 11067 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 11068 else 11069 return SDValue(); 11070 11071 SDValue SrcVec = Ext1.getOperand(0); 11072 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 11073 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 11074 return DAG.getNode(NodeType, dl, MVT::v2f64, 11075 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 11076 } 11077 11078 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 11079 DAGCombinerInfo &DCI) const { 11080 assert((N->getOpcode() == ISD::SINT_TO_FP || 11081 N->getOpcode() == ISD::UINT_TO_FP) && 11082 "Need an int -> FP conversion node here"); 11083 11084 if (useSoftFloat() || !Subtarget.has64BitSupport()) 11085 return SDValue(); 11086 11087 SelectionDAG &DAG = DCI.DAG; 11088 SDLoc dl(N); 11089 SDValue Op(N, 0); 11090 11091 SDValue FirstOperand(Op.getOperand(0)); 11092 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 11093 (FirstOperand.getValueType() == MVT::i8 || 11094 FirstOperand.getValueType() == MVT::i16); 11095 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 11096 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 11097 bool DstDouble = Op.getValueType() == MVT::f64; 11098 unsigned ConvOp = Signed ? 11099 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 11100 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 11101 SDValue WidthConst = 11102 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 11103 dl, false); 11104 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 11105 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 11106 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 11107 DAG.getVTList(MVT::f64, MVT::Other), 11108 Ops, MVT::i8, LDN->getMemOperand()); 11109 11110 // For signed conversion, we need to sign-extend the value in the VSR 11111 if (Signed) { 11112 SDValue ExtOps[] = { Ld, WidthConst }; 11113 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 11114 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 11115 } else 11116 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 11117 } 11118 11119 // Don't handle ppc_fp128 here or i1 conversions. 11120 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 11121 return SDValue(); 11122 if (Op.getOperand(0).getValueType() == MVT::i1) 11123 return SDValue(); 11124 11125 // For i32 intermediate values, unfortunately, the conversion functions 11126 // leave the upper 32 bits of the value are undefined. Within the set of 11127 // scalar instructions, we have no method for zero- or sign-extending the 11128 // value. Thus, we cannot handle i32 intermediate values here. 11129 if (Op.getOperand(0).getValueType() == MVT::i32) 11130 return SDValue(); 11131 11132 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 11133 "UINT_TO_FP is supported only with FPCVT"); 11134 11135 // If we have FCFIDS, then use it when converting to single-precision. 11136 // Otherwise, convert to double-precision and then round. 11137 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 11138 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 11139 : PPCISD::FCFIDS) 11140 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 11141 : PPCISD::FCFID); 11142 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 11143 ? MVT::f32 11144 : MVT::f64; 11145 11146 // If we're converting from a float, to an int, and back to a float again, 11147 // then we don't need the store/load pair at all. 11148 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 11149 Subtarget.hasFPCVT()) || 11150 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 11151 SDValue Src = Op.getOperand(0).getOperand(0); 11152 if (Src.getValueType() == MVT::f32) { 11153 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 11154 DCI.AddToWorklist(Src.getNode()); 11155 } else if (Src.getValueType() != MVT::f64) { 11156 // Make sure that we don't pick up a ppc_fp128 source value. 11157 return SDValue(); 11158 } 11159 11160 unsigned FCTOp = 11161 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 11162 PPCISD::FCTIDUZ; 11163 11164 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 11165 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 11166 11167 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 11168 FP = DAG.getNode(ISD::FP_ROUND, dl, 11169 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 11170 DCI.AddToWorklist(FP.getNode()); 11171 } 11172 11173 return FP; 11174 } 11175 11176 return SDValue(); 11177 } 11178 11179 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 11180 // builtins) into loads with swaps. 11181 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 11182 DAGCombinerInfo &DCI) const { 11183 SelectionDAG &DAG = DCI.DAG; 11184 SDLoc dl(N); 11185 SDValue Chain; 11186 SDValue Base; 11187 MachineMemOperand *MMO; 11188 11189 switch (N->getOpcode()) { 11190 default: 11191 llvm_unreachable("Unexpected opcode for little endian VSX load"); 11192 case ISD::LOAD: { 11193 LoadSDNode *LD = cast<LoadSDNode>(N); 11194 Chain = LD->getChain(); 11195 Base = LD->getBasePtr(); 11196 MMO = LD->getMemOperand(); 11197 // If the MMO suggests this isn't a load of a full vector, leave 11198 // things alone. For a built-in, we have to make the change for 11199 // correctness, so if there is a size problem that will be a bug. 11200 if (MMO->getSize() < 16) 11201 return SDValue(); 11202 break; 11203 } 11204 case ISD::INTRINSIC_W_CHAIN: { 11205 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 11206 Chain = Intrin->getChain(); 11207 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 11208 // us what we want. Get operand 2 instead. 11209 Base = Intrin->getOperand(2); 11210 MMO = Intrin->getMemOperand(); 11211 break; 11212 } 11213 } 11214 11215 MVT VecTy = N->getValueType(0).getSimpleVT(); 11216 11217 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 11218 // aligned and the type is a vector with elements up to 4 bytes 11219 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 11220 && VecTy.getScalarSizeInBits() <= 32 ) { 11221 return SDValue(); 11222 } 11223 11224 SDValue LoadOps[] = { Chain, Base }; 11225 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 11226 DAG.getVTList(MVT::v2f64, MVT::Other), 11227 LoadOps, MVT::v2f64, MMO); 11228 11229 DCI.AddToWorklist(Load.getNode()); 11230 Chain = Load.getValue(1); 11231 SDValue Swap = DAG.getNode( 11232 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 11233 DCI.AddToWorklist(Swap.getNode()); 11234 11235 // Add a bitcast if the resulting load type doesn't match v2f64. 11236 if (VecTy != MVT::v2f64) { 11237 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 11238 DCI.AddToWorklist(N.getNode()); 11239 // Package {bitcast value, swap's chain} to match Load's shape. 11240 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 11241 N, Swap.getValue(1)); 11242 } 11243 11244 return Swap; 11245 } 11246 11247 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 11248 // builtins) into stores with swaps. 11249 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 11250 DAGCombinerInfo &DCI) const { 11251 SelectionDAG &DAG = DCI.DAG; 11252 SDLoc dl(N); 11253 SDValue Chain; 11254 SDValue Base; 11255 unsigned SrcOpnd; 11256 MachineMemOperand *MMO; 11257 11258 switch (N->getOpcode()) { 11259 default: 11260 llvm_unreachable("Unexpected opcode for little endian VSX store"); 11261 case ISD::STORE: { 11262 StoreSDNode *ST = cast<StoreSDNode>(N); 11263 Chain = ST->getChain(); 11264 Base = ST->getBasePtr(); 11265 MMO = ST->getMemOperand(); 11266 SrcOpnd = 1; 11267 // If the MMO suggests this isn't a store of a full vector, leave 11268 // things alone. For a built-in, we have to make the change for 11269 // correctness, so if there is a size problem that will be a bug. 11270 if (MMO->getSize() < 16) 11271 return SDValue(); 11272 break; 11273 } 11274 case ISD::INTRINSIC_VOID: { 11275 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 11276 Chain = Intrin->getChain(); 11277 // Intrin->getBasePtr() oddly does not get what we want. 11278 Base = Intrin->getOperand(3); 11279 MMO = Intrin->getMemOperand(); 11280 SrcOpnd = 2; 11281 break; 11282 } 11283 } 11284 11285 SDValue Src = N->getOperand(SrcOpnd); 11286 MVT VecTy = Src.getValueType().getSimpleVT(); 11287 11288 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 11289 // aligned and the type is a vector with elements up to 4 bytes 11290 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 11291 && VecTy.getScalarSizeInBits() <= 32 ) { 11292 return SDValue(); 11293 } 11294 11295 // All stores are done as v2f64 and possible bit cast. 11296 if (VecTy != MVT::v2f64) { 11297 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 11298 DCI.AddToWorklist(Src.getNode()); 11299 } 11300 11301 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 11302 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 11303 DCI.AddToWorklist(Swap.getNode()); 11304 Chain = Swap.getValue(1); 11305 SDValue StoreOps[] = { Chain, Swap, Base }; 11306 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 11307 DAG.getVTList(MVT::Other), 11308 StoreOps, VecTy, MMO); 11309 DCI.AddToWorklist(Store.getNode()); 11310 return Store; 11311 } 11312 11313 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 11314 DAGCombinerInfo &DCI) const { 11315 SelectionDAG &DAG = DCI.DAG; 11316 SDLoc dl(N); 11317 switch (N->getOpcode()) { 11318 default: break; 11319 case PPCISD::SHL: 11320 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 11321 return N->getOperand(0); 11322 break; 11323 case PPCISD::SRL: 11324 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 11325 return N->getOperand(0); 11326 break; 11327 case PPCISD::SRA: 11328 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 11329 if (C->isNullValue() || // 0 >>s V -> 0. 11330 C->isAllOnesValue()) // -1 >>s V -> -1. 11331 return N->getOperand(0); 11332 } 11333 break; 11334 case ISD::SIGN_EXTEND: 11335 case ISD::ZERO_EXTEND: 11336 case ISD::ANY_EXTEND: 11337 return DAGCombineExtBoolTrunc(N, DCI); 11338 case ISD::TRUNCATE: 11339 case ISD::SETCC: 11340 case ISD::SELECT_CC: 11341 return DAGCombineTruncBoolExt(N, DCI); 11342 case ISD::SINT_TO_FP: 11343 case ISD::UINT_TO_FP: 11344 return combineFPToIntToFP(N, DCI); 11345 case ISD::STORE: { 11346 EVT Op1VT = N->getOperand(1).getValueType(); 11347 bool ValidTypeForStoreFltAsInt = (Op1VT == MVT::i32) || 11348 (Subtarget.hasP9Vector() && (Op1VT == MVT::i8 || Op1VT == MVT::i16)); 11349 11350 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 11351 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 11352 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 11353 ValidTypeForStoreFltAsInt && 11354 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 11355 SDValue Val = N->getOperand(1).getOperand(0); 11356 if (Val.getValueType() == MVT::f32) { 11357 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 11358 DCI.AddToWorklist(Val.getNode()); 11359 } 11360 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 11361 DCI.AddToWorklist(Val.getNode()); 11362 11363 if (Op1VT == MVT::i32) { 11364 SDValue Ops[] = { 11365 N->getOperand(0), Val, N->getOperand(2), 11366 DAG.getValueType(N->getOperand(1).getValueType()) 11367 }; 11368 11369 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 11370 DAG.getVTList(MVT::Other), Ops, 11371 cast<StoreSDNode>(N)->getMemoryVT(), 11372 cast<StoreSDNode>(N)->getMemOperand()); 11373 } else { 11374 unsigned WidthInBytes = 11375 N->getOperand(1).getValueType() == MVT::i8 ? 1 : 2; 11376 SDValue WidthConst = DAG.getIntPtrConstant(WidthInBytes, dl, false); 11377 11378 SDValue Ops[] = { 11379 N->getOperand(0), Val, N->getOperand(2), WidthConst, 11380 DAG.getValueType(N->getOperand(1).getValueType()) 11381 }; 11382 Val = DAG.getMemIntrinsicNode(PPCISD::STXSIX, dl, 11383 DAG.getVTList(MVT::Other), Ops, 11384 cast<StoreSDNode>(N)->getMemoryVT(), 11385 cast<StoreSDNode>(N)->getMemOperand()); 11386 } 11387 11388 DCI.AddToWorklist(Val.getNode()); 11389 return Val; 11390 } 11391 11392 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 11393 if (cast<StoreSDNode>(N)->isUnindexed() && 11394 N->getOperand(1).getOpcode() == ISD::BSWAP && 11395 N->getOperand(1).getNode()->hasOneUse() && 11396 (N->getOperand(1).getValueType() == MVT::i32 || 11397 N->getOperand(1).getValueType() == MVT::i16 || 11398 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 11399 N->getOperand(1).getValueType() == MVT::i64))) { 11400 SDValue BSwapOp = N->getOperand(1).getOperand(0); 11401 // Do an any-extend to 32-bits if this is a half-word input. 11402 if (BSwapOp.getValueType() == MVT::i16) 11403 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 11404 11405 // If the type of BSWAP operand is wider than stored memory width 11406 // it need to be shifted to the right side before STBRX. 11407 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 11408 if (Op1VT.bitsGT(mVT)) { 11409 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 11410 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 11411 DAG.getConstant(Shift, dl, MVT::i32)); 11412 // Need to truncate if this is a bswap of i64 stored as i32/i16. 11413 if (Op1VT == MVT::i64) 11414 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 11415 } 11416 11417 SDValue Ops[] = { 11418 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 11419 }; 11420 return 11421 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 11422 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 11423 cast<StoreSDNode>(N)->getMemOperand()); 11424 } 11425 11426 // For little endian, VSX stores require generating xxswapd/lxvd2x. 11427 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 11428 EVT VT = N->getOperand(1).getValueType(); 11429 if (VT.isSimple()) { 11430 MVT StoreVT = VT.getSimpleVT(); 11431 if (Subtarget.needsSwapsForVSXMemOps() && 11432 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 11433 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 11434 return expandVSXStoreForLE(N, DCI); 11435 } 11436 break; 11437 } 11438 case ISD::LOAD: { 11439 LoadSDNode *LD = cast<LoadSDNode>(N); 11440 EVT VT = LD->getValueType(0); 11441 11442 // For little endian, VSX loads require generating lxvd2x/xxswapd. 11443 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 11444 if (VT.isSimple()) { 11445 MVT LoadVT = VT.getSimpleVT(); 11446 if (Subtarget.needsSwapsForVSXMemOps() && 11447 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 11448 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 11449 return expandVSXLoadForLE(N, DCI); 11450 } 11451 11452 // We sometimes end up with a 64-bit integer load, from which we extract 11453 // two single-precision floating-point numbers. This happens with 11454 // std::complex<float>, and other similar structures, because of the way we 11455 // canonicalize structure copies. However, if we lack direct moves, 11456 // then the final bitcasts from the extracted integer values to the 11457 // floating-point numbers turn into store/load pairs. Even with direct moves, 11458 // just loading the two floating-point numbers is likely better. 11459 auto ReplaceTwoFloatLoad = [&]() { 11460 if (VT != MVT::i64) 11461 return false; 11462 11463 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 11464 LD->isVolatile()) 11465 return false; 11466 11467 // We're looking for a sequence like this: 11468 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 11469 // t16: i64 = srl t13, Constant:i32<32> 11470 // t17: i32 = truncate t16 11471 // t18: f32 = bitcast t17 11472 // t19: i32 = truncate t13 11473 // t20: f32 = bitcast t19 11474 11475 if (!LD->hasNUsesOfValue(2, 0)) 11476 return false; 11477 11478 auto UI = LD->use_begin(); 11479 while (UI.getUse().getResNo() != 0) ++UI; 11480 SDNode *Trunc = *UI++; 11481 while (UI.getUse().getResNo() != 0) ++UI; 11482 SDNode *RightShift = *UI; 11483 if (Trunc->getOpcode() != ISD::TRUNCATE) 11484 std::swap(Trunc, RightShift); 11485 11486 if (Trunc->getOpcode() != ISD::TRUNCATE || 11487 Trunc->getValueType(0) != MVT::i32 || 11488 !Trunc->hasOneUse()) 11489 return false; 11490 if (RightShift->getOpcode() != ISD::SRL || 11491 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 11492 RightShift->getConstantOperandVal(1) != 32 || 11493 !RightShift->hasOneUse()) 11494 return false; 11495 11496 SDNode *Trunc2 = *RightShift->use_begin(); 11497 if (Trunc2->getOpcode() != ISD::TRUNCATE || 11498 Trunc2->getValueType(0) != MVT::i32 || 11499 !Trunc2->hasOneUse()) 11500 return false; 11501 11502 SDNode *Bitcast = *Trunc->use_begin(); 11503 SDNode *Bitcast2 = *Trunc2->use_begin(); 11504 11505 if (Bitcast->getOpcode() != ISD::BITCAST || 11506 Bitcast->getValueType(0) != MVT::f32) 11507 return false; 11508 if (Bitcast2->getOpcode() != ISD::BITCAST || 11509 Bitcast2->getValueType(0) != MVT::f32) 11510 return false; 11511 11512 if (Subtarget.isLittleEndian()) 11513 std::swap(Bitcast, Bitcast2); 11514 11515 // Bitcast has the second float (in memory-layout order) and Bitcast2 11516 // has the first one. 11517 11518 SDValue BasePtr = LD->getBasePtr(); 11519 if (LD->isIndexed()) { 11520 assert(LD->getAddressingMode() == ISD::PRE_INC && 11521 "Non-pre-inc AM on PPC?"); 11522 BasePtr = 11523 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 11524 LD->getOffset()); 11525 } 11526 11527 auto MMOFlags = 11528 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 11529 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 11530 LD->getPointerInfo(), LD->getAlignment(), 11531 MMOFlags, LD->getAAInfo()); 11532 SDValue AddPtr = 11533 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 11534 BasePtr, DAG.getIntPtrConstant(4, dl)); 11535 SDValue FloatLoad2 = DAG.getLoad( 11536 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 11537 LD->getPointerInfo().getWithOffset(4), 11538 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 11539 11540 if (LD->isIndexed()) { 11541 // Note that DAGCombine should re-form any pre-increment load(s) from 11542 // what is produced here if that makes sense. 11543 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 11544 } 11545 11546 DCI.CombineTo(Bitcast2, FloatLoad); 11547 DCI.CombineTo(Bitcast, FloatLoad2); 11548 11549 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 11550 SDValue(FloatLoad2.getNode(), 1)); 11551 return true; 11552 }; 11553 11554 if (ReplaceTwoFloatLoad()) 11555 return SDValue(N, 0); 11556 11557 EVT MemVT = LD->getMemoryVT(); 11558 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 11559 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 11560 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 11561 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 11562 if (LD->isUnindexed() && VT.isVector() && 11563 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 11564 // P8 and later hardware should just use LOAD. 11565 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 11566 VT == MVT::v4i32 || VT == MVT::v4f32)) || 11567 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 11568 LD->getAlignment() >= ScalarABIAlignment)) && 11569 LD->getAlignment() < ABIAlignment) { 11570 // This is a type-legal unaligned Altivec or QPX load. 11571 SDValue Chain = LD->getChain(); 11572 SDValue Ptr = LD->getBasePtr(); 11573 bool isLittleEndian = Subtarget.isLittleEndian(); 11574 11575 // This implements the loading of unaligned vectors as described in 11576 // the venerable Apple Velocity Engine overview. Specifically: 11577 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 11578 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 11579 // 11580 // The general idea is to expand a sequence of one or more unaligned 11581 // loads into an alignment-based permutation-control instruction (lvsl 11582 // or lvsr), a series of regular vector loads (which always truncate 11583 // their input address to an aligned address), and a series of 11584 // permutations. The results of these permutations are the requested 11585 // loaded values. The trick is that the last "extra" load is not taken 11586 // from the address you might suspect (sizeof(vector) bytes after the 11587 // last requested load), but rather sizeof(vector) - 1 bytes after the 11588 // last requested vector. The point of this is to avoid a page fault if 11589 // the base address happened to be aligned. This works because if the 11590 // base address is aligned, then adding less than a full vector length 11591 // will cause the last vector in the sequence to be (re)loaded. 11592 // Otherwise, the next vector will be fetched as you might suspect was 11593 // necessary. 11594 11595 // We might be able to reuse the permutation generation from 11596 // a different base address offset from this one by an aligned amount. 11597 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 11598 // optimization later. 11599 Intrinsic::ID Intr, IntrLD, IntrPerm; 11600 MVT PermCntlTy, PermTy, LDTy; 11601 if (Subtarget.hasAltivec()) { 11602 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 11603 Intrinsic::ppc_altivec_lvsl; 11604 IntrLD = Intrinsic::ppc_altivec_lvx; 11605 IntrPerm = Intrinsic::ppc_altivec_vperm; 11606 PermCntlTy = MVT::v16i8; 11607 PermTy = MVT::v4i32; 11608 LDTy = MVT::v4i32; 11609 } else { 11610 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 11611 Intrinsic::ppc_qpx_qvlpcls; 11612 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 11613 Intrinsic::ppc_qpx_qvlfs; 11614 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 11615 PermCntlTy = MVT::v4f64; 11616 PermTy = MVT::v4f64; 11617 LDTy = MemVT.getSimpleVT(); 11618 } 11619 11620 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 11621 11622 // Create the new MMO for the new base load. It is like the original MMO, 11623 // but represents an area in memory almost twice the vector size centered 11624 // on the original address. If the address is unaligned, we might start 11625 // reading up to (sizeof(vector)-1) bytes below the address of the 11626 // original unaligned load. 11627 MachineFunction &MF = DAG.getMachineFunction(); 11628 MachineMemOperand *BaseMMO = 11629 MF.getMachineMemOperand(LD->getMemOperand(), 11630 -(long)MemVT.getStoreSize()+1, 11631 2*MemVT.getStoreSize()-1); 11632 11633 // Create the new base load. 11634 SDValue LDXIntID = 11635 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 11636 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 11637 SDValue BaseLoad = 11638 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 11639 DAG.getVTList(PermTy, MVT::Other), 11640 BaseLoadOps, LDTy, BaseMMO); 11641 11642 // Note that the value of IncOffset (which is provided to the next 11643 // load's pointer info offset value, and thus used to calculate the 11644 // alignment), and the value of IncValue (which is actually used to 11645 // increment the pointer value) are different! This is because we 11646 // require the next load to appear to be aligned, even though it 11647 // is actually offset from the base pointer by a lesser amount. 11648 int IncOffset = VT.getSizeInBits() / 8; 11649 int IncValue = IncOffset; 11650 11651 // Walk (both up and down) the chain looking for another load at the real 11652 // (aligned) offset (the alignment of the other load does not matter in 11653 // this case). If found, then do not use the offset reduction trick, as 11654 // that will prevent the loads from being later combined (as they would 11655 // otherwise be duplicates). 11656 if (!findConsecutiveLoad(LD, DAG)) 11657 --IncValue; 11658 11659 SDValue Increment = 11660 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 11661 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 11662 11663 MachineMemOperand *ExtraMMO = 11664 MF.getMachineMemOperand(LD->getMemOperand(), 11665 1, 2*MemVT.getStoreSize()-1); 11666 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 11667 SDValue ExtraLoad = 11668 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 11669 DAG.getVTList(PermTy, MVT::Other), 11670 ExtraLoadOps, LDTy, ExtraMMO); 11671 11672 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 11673 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 11674 11675 // Because vperm has a big-endian bias, we must reverse the order 11676 // of the input vectors and complement the permute control vector 11677 // when generating little endian code. We have already handled the 11678 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 11679 // and ExtraLoad here. 11680 SDValue Perm; 11681 if (isLittleEndian) 11682 Perm = BuildIntrinsicOp(IntrPerm, 11683 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 11684 else 11685 Perm = BuildIntrinsicOp(IntrPerm, 11686 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 11687 11688 if (VT != PermTy) 11689 Perm = Subtarget.hasAltivec() ? 11690 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 11691 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 11692 DAG.getTargetConstant(1, dl, MVT::i64)); 11693 // second argument is 1 because this rounding 11694 // is always exact. 11695 11696 // The output of the permutation is our loaded result, the TokenFactor is 11697 // our new chain. 11698 DCI.CombineTo(N, Perm, TF); 11699 return SDValue(N, 0); 11700 } 11701 } 11702 break; 11703 case ISD::INTRINSIC_WO_CHAIN: { 11704 bool isLittleEndian = Subtarget.isLittleEndian(); 11705 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 11706 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 11707 : Intrinsic::ppc_altivec_lvsl); 11708 if ((IID == Intr || 11709 IID == Intrinsic::ppc_qpx_qvlpcld || 11710 IID == Intrinsic::ppc_qpx_qvlpcls) && 11711 N->getOperand(1)->getOpcode() == ISD::ADD) { 11712 SDValue Add = N->getOperand(1); 11713 11714 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 11715 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 11716 11717 if (DAG.MaskedValueIsZero(Add->getOperand(1), 11718 APInt::getAllOnesValue(Bits /* alignment */) 11719 .zext(Add.getScalarValueSizeInBits()))) { 11720 SDNode *BasePtr = Add->getOperand(0).getNode(); 11721 for (SDNode::use_iterator UI = BasePtr->use_begin(), 11722 UE = BasePtr->use_end(); 11723 UI != UE; ++UI) { 11724 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 11725 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 11726 // We've found another LVSL/LVSR, and this address is an aligned 11727 // multiple of that one. The results will be the same, so use the 11728 // one we've just found instead. 11729 11730 return SDValue(*UI, 0); 11731 } 11732 } 11733 } 11734 11735 if (isa<ConstantSDNode>(Add->getOperand(1))) { 11736 SDNode *BasePtr = Add->getOperand(0).getNode(); 11737 for (SDNode::use_iterator UI = BasePtr->use_begin(), 11738 UE = BasePtr->use_end(); UI != UE; ++UI) { 11739 if (UI->getOpcode() == ISD::ADD && 11740 isa<ConstantSDNode>(UI->getOperand(1)) && 11741 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 11742 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 11743 (1ULL << Bits) == 0) { 11744 SDNode *OtherAdd = *UI; 11745 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 11746 VE = OtherAdd->use_end(); VI != VE; ++VI) { 11747 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 11748 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 11749 return SDValue(*VI, 0); 11750 } 11751 } 11752 } 11753 } 11754 } 11755 } 11756 } 11757 11758 break; 11759 case ISD::INTRINSIC_W_CHAIN: 11760 // For little endian, VSX loads require generating lxvd2x/xxswapd. 11761 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 11762 if (Subtarget.needsSwapsForVSXMemOps()) { 11763 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11764 default: 11765 break; 11766 case Intrinsic::ppc_vsx_lxvw4x: 11767 case Intrinsic::ppc_vsx_lxvd2x: 11768 return expandVSXLoadForLE(N, DCI); 11769 } 11770 } 11771 break; 11772 case ISD::INTRINSIC_VOID: 11773 // For little endian, VSX stores require generating xxswapd/stxvd2x. 11774 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 11775 if (Subtarget.needsSwapsForVSXMemOps()) { 11776 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11777 default: 11778 break; 11779 case Intrinsic::ppc_vsx_stxvw4x: 11780 case Intrinsic::ppc_vsx_stxvd2x: 11781 return expandVSXStoreForLE(N, DCI); 11782 } 11783 } 11784 break; 11785 case ISD::BSWAP: 11786 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 11787 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 11788 N->getOperand(0).hasOneUse() && 11789 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 11790 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 11791 N->getValueType(0) == MVT::i64))) { 11792 SDValue Load = N->getOperand(0); 11793 LoadSDNode *LD = cast<LoadSDNode>(Load); 11794 // Create the byte-swapping load. 11795 SDValue Ops[] = { 11796 LD->getChain(), // Chain 11797 LD->getBasePtr(), // Ptr 11798 DAG.getValueType(N->getValueType(0)) // VT 11799 }; 11800 SDValue BSLoad = 11801 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 11802 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 11803 MVT::i64 : MVT::i32, MVT::Other), 11804 Ops, LD->getMemoryVT(), LD->getMemOperand()); 11805 11806 // If this is an i16 load, insert the truncate. 11807 SDValue ResVal = BSLoad; 11808 if (N->getValueType(0) == MVT::i16) 11809 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 11810 11811 // First, combine the bswap away. This makes the value produced by the 11812 // load dead. 11813 DCI.CombineTo(N, ResVal); 11814 11815 // Next, combine the load away, we give it a bogus result value but a real 11816 // chain result. The result value is dead because the bswap is dead. 11817 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 11818 11819 // Return N so it doesn't get rechecked! 11820 return SDValue(N, 0); 11821 } 11822 break; 11823 case PPCISD::VCMP: 11824 // If a VCMPo node already exists with exactly the same operands as this 11825 // node, use its result instead of this node (VCMPo computes both a CR6 and 11826 // a normal output). 11827 // 11828 if (!N->getOperand(0).hasOneUse() && 11829 !N->getOperand(1).hasOneUse() && 11830 !N->getOperand(2).hasOneUse()) { 11831 11832 // Scan all of the users of the LHS, looking for VCMPo's that match. 11833 SDNode *VCMPoNode = nullptr; 11834 11835 SDNode *LHSN = N->getOperand(0).getNode(); 11836 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 11837 UI != E; ++UI) 11838 if (UI->getOpcode() == PPCISD::VCMPo && 11839 UI->getOperand(1) == N->getOperand(1) && 11840 UI->getOperand(2) == N->getOperand(2) && 11841 UI->getOperand(0) == N->getOperand(0)) { 11842 VCMPoNode = *UI; 11843 break; 11844 } 11845 11846 // If there is no VCMPo node, or if the flag value has a single use, don't 11847 // transform this. 11848 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 11849 break; 11850 11851 // Look at the (necessarily single) use of the flag value. If it has a 11852 // chain, this transformation is more complex. Note that multiple things 11853 // could use the value result, which we should ignore. 11854 SDNode *FlagUser = nullptr; 11855 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 11856 FlagUser == nullptr; ++UI) { 11857 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 11858 SDNode *User = *UI; 11859 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 11860 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 11861 FlagUser = User; 11862 break; 11863 } 11864 } 11865 } 11866 11867 // If the user is a MFOCRF instruction, we know this is safe. 11868 // Otherwise we give up for right now. 11869 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 11870 return SDValue(VCMPoNode, 0); 11871 } 11872 break; 11873 case ISD::BRCOND: { 11874 SDValue Cond = N->getOperand(1); 11875 SDValue Target = N->getOperand(2); 11876 11877 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 11878 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 11879 Intrinsic::ppc_is_decremented_ctr_nonzero) { 11880 11881 // We now need to make the intrinsic dead (it cannot be instruction 11882 // selected). 11883 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 11884 assert(Cond.getNode()->hasOneUse() && 11885 "Counter decrement has more than one use"); 11886 11887 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 11888 N->getOperand(0), Target); 11889 } 11890 } 11891 break; 11892 case ISD::BR_CC: { 11893 // If this is a branch on an altivec predicate comparison, lower this so 11894 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 11895 // lowering is done pre-legalize, because the legalizer lowers the predicate 11896 // compare down to code that is difficult to reassemble. 11897 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 11898 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 11899 11900 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 11901 // value. If so, pass-through the AND to get to the intrinsic. 11902 if (LHS.getOpcode() == ISD::AND && 11903 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 11904 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 11905 Intrinsic::ppc_is_decremented_ctr_nonzero && 11906 isa<ConstantSDNode>(LHS.getOperand(1)) && 11907 !isNullConstant(LHS.getOperand(1))) 11908 LHS = LHS.getOperand(0); 11909 11910 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 11911 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 11912 Intrinsic::ppc_is_decremented_ctr_nonzero && 11913 isa<ConstantSDNode>(RHS)) { 11914 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 11915 "Counter decrement comparison is not EQ or NE"); 11916 11917 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 11918 bool isBDNZ = (CC == ISD::SETEQ && Val) || 11919 (CC == ISD::SETNE && !Val); 11920 11921 // We now need to make the intrinsic dead (it cannot be instruction 11922 // selected). 11923 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 11924 assert(LHS.getNode()->hasOneUse() && 11925 "Counter decrement has more than one use"); 11926 11927 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 11928 N->getOperand(0), N->getOperand(4)); 11929 } 11930 11931 int CompareOpc; 11932 bool isDot; 11933 11934 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 11935 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 11936 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 11937 assert(isDot && "Can't compare against a vector result!"); 11938 11939 // If this is a comparison against something other than 0/1, then we know 11940 // that the condition is never/always true. 11941 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 11942 if (Val != 0 && Val != 1) { 11943 if (CC == ISD::SETEQ) // Cond never true, remove branch. 11944 return N->getOperand(0); 11945 // Always !=, turn it into an unconditional branch. 11946 return DAG.getNode(ISD::BR, dl, MVT::Other, 11947 N->getOperand(0), N->getOperand(4)); 11948 } 11949 11950 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 11951 11952 // Create the PPCISD altivec 'dot' comparison node. 11953 SDValue Ops[] = { 11954 LHS.getOperand(2), // LHS of compare 11955 LHS.getOperand(3), // RHS of compare 11956 DAG.getConstant(CompareOpc, dl, MVT::i32) 11957 }; 11958 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 11959 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 11960 11961 // Unpack the result based on how the target uses it. 11962 PPC::Predicate CompOpc; 11963 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 11964 default: // Can't happen, don't crash on invalid number though. 11965 case 0: // Branch on the value of the EQ bit of CR6. 11966 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 11967 break; 11968 case 1: // Branch on the inverted value of the EQ bit of CR6. 11969 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 11970 break; 11971 case 2: // Branch on the value of the LT bit of CR6. 11972 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 11973 break; 11974 case 3: // Branch on the inverted value of the LT bit of CR6. 11975 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 11976 break; 11977 } 11978 11979 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 11980 DAG.getConstant(CompOpc, dl, MVT::i32), 11981 DAG.getRegister(PPC::CR6, MVT::i32), 11982 N->getOperand(4), CompNode.getValue(1)); 11983 } 11984 break; 11985 } 11986 case ISD::BUILD_VECTOR: 11987 return DAGCombineBuildVector(N, DCI); 11988 } 11989 11990 return SDValue(); 11991 } 11992 11993 SDValue 11994 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 11995 SelectionDAG &DAG, 11996 std::vector<SDNode *> *Created) const { 11997 // fold (sdiv X, pow2) 11998 EVT VT = N->getValueType(0); 11999 if (VT == MVT::i64 && !Subtarget.isPPC64()) 12000 return SDValue(); 12001 if ((VT != MVT::i32 && VT != MVT::i64) || 12002 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 12003 return SDValue(); 12004 12005 SDLoc DL(N); 12006 SDValue N0 = N->getOperand(0); 12007 12008 bool IsNegPow2 = (-Divisor).isPowerOf2(); 12009 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 12010 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 12011 12012 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 12013 if (Created) 12014 Created->push_back(Op.getNode()); 12015 12016 if (IsNegPow2) { 12017 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 12018 if (Created) 12019 Created->push_back(Op.getNode()); 12020 } 12021 12022 return Op; 12023 } 12024 12025 //===----------------------------------------------------------------------===// 12026 // Inline Assembly Support 12027 //===----------------------------------------------------------------------===// 12028 12029 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 12030 KnownBits &Known, 12031 const APInt &DemandedElts, 12032 const SelectionDAG &DAG, 12033 unsigned Depth) const { 12034 Known.resetAll(); 12035 switch (Op.getOpcode()) { 12036 default: break; 12037 case PPCISD::LBRX: { 12038 // lhbrx is known to have the top bits cleared out. 12039 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 12040 Known.Zero = 0xFFFF0000; 12041 break; 12042 } 12043 case ISD::INTRINSIC_WO_CHAIN: { 12044 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 12045 default: break; 12046 case Intrinsic::ppc_altivec_vcmpbfp_p: 12047 case Intrinsic::ppc_altivec_vcmpeqfp_p: 12048 case Intrinsic::ppc_altivec_vcmpequb_p: 12049 case Intrinsic::ppc_altivec_vcmpequh_p: 12050 case Intrinsic::ppc_altivec_vcmpequw_p: 12051 case Intrinsic::ppc_altivec_vcmpequd_p: 12052 case Intrinsic::ppc_altivec_vcmpgefp_p: 12053 case Intrinsic::ppc_altivec_vcmpgtfp_p: 12054 case Intrinsic::ppc_altivec_vcmpgtsb_p: 12055 case Intrinsic::ppc_altivec_vcmpgtsh_p: 12056 case Intrinsic::ppc_altivec_vcmpgtsw_p: 12057 case Intrinsic::ppc_altivec_vcmpgtsd_p: 12058 case Intrinsic::ppc_altivec_vcmpgtub_p: 12059 case Intrinsic::ppc_altivec_vcmpgtuh_p: 12060 case Intrinsic::ppc_altivec_vcmpgtuw_p: 12061 case Intrinsic::ppc_altivec_vcmpgtud_p: 12062 Known.Zero = ~1U; // All bits but the low one are known to be zero. 12063 break; 12064 } 12065 } 12066 } 12067 } 12068 12069 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 12070 switch (Subtarget.getDarwinDirective()) { 12071 default: break; 12072 case PPC::DIR_970: 12073 case PPC::DIR_PWR4: 12074 case PPC::DIR_PWR5: 12075 case PPC::DIR_PWR5X: 12076 case PPC::DIR_PWR6: 12077 case PPC::DIR_PWR6X: 12078 case PPC::DIR_PWR7: 12079 case PPC::DIR_PWR8: 12080 case PPC::DIR_PWR9: { 12081 if (!ML) 12082 break; 12083 12084 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 12085 12086 // For small loops (between 5 and 8 instructions), align to a 32-byte 12087 // boundary so that the entire loop fits in one instruction-cache line. 12088 uint64_t LoopSize = 0; 12089 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 12090 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 12091 LoopSize += TII->getInstSizeInBytes(*J); 12092 if (LoopSize > 32) 12093 break; 12094 } 12095 12096 if (LoopSize > 16 && LoopSize <= 32) 12097 return 5; 12098 12099 break; 12100 } 12101 } 12102 12103 return TargetLowering::getPrefLoopAlignment(ML); 12104 } 12105 12106 /// getConstraintType - Given a constraint, return the type of 12107 /// constraint it is for this target. 12108 PPCTargetLowering::ConstraintType 12109 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 12110 if (Constraint.size() == 1) { 12111 switch (Constraint[0]) { 12112 default: break; 12113 case 'b': 12114 case 'r': 12115 case 'f': 12116 case 'd': 12117 case 'v': 12118 case 'y': 12119 return C_RegisterClass; 12120 case 'Z': 12121 // FIXME: While Z does indicate a memory constraint, it specifically 12122 // indicates an r+r address (used in conjunction with the 'y' modifier 12123 // in the replacement string). Currently, we're forcing the base 12124 // register to be r0 in the asm printer (which is interpreted as zero) 12125 // and forming the complete address in the second register. This is 12126 // suboptimal. 12127 return C_Memory; 12128 } 12129 } else if (Constraint == "wc") { // individual CR bits. 12130 return C_RegisterClass; 12131 } else if (Constraint == "wa" || Constraint == "wd" || 12132 Constraint == "wf" || Constraint == "ws") { 12133 return C_RegisterClass; // VSX registers. 12134 } 12135 return TargetLowering::getConstraintType(Constraint); 12136 } 12137 12138 /// Examine constraint type and operand type and determine a weight value. 12139 /// This object must already have been set up with the operand type 12140 /// and the current alternative constraint selected. 12141 TargetLowering::ConstraintWeight 12142 PPCTargetLowering::getSingleConstraintMatchWeight( 12143 AsmOperandInfo &info, const char *constraint) const { 12144 ConstraintWeight weight = CW_Invalid; 12145 Value *CallOperandVal = info.CallOperandVal; 12146 // If we don't have a value, we can't do a match, 12147 // but allow it at the lowest weight. 12148 if (!CallOperandVal) 12149 return CW_Default; 12150 Type *type = CallOperandVal->getType(); 12151 12152 // Look at the constraint type. 12153 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 12154 return CW_Register; // an individual CR bit. 12155 else if ((StringRef(constraint) == "wa" || 12156 StringRef(constraint) == "wd" || 12157 StringRef(constraint) == "wf") && 12158 type->isVectorTy()) 12159 return CW_Register; 12160 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 12161 return CW_Register; 12162 12163 switch (*constraint) { 12164 default: 12165 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 12166 break; 12167 case 'b': 12168 if (type->isIntegerTy()) 12169 weight = CW_Register; 12170 break; 12171 case 'f': 12172 if (type->isFloatTy()) 12173 weight = CW_Register; 12174 break; 12175 case 'd': 12176 if (type->isDoubleTy()) 12177 weight = CW_Register; 12178 break; 12179 case 'v': 12180 if (type->isVectorTy()) 12181 weight = CW_Register; 12182 break; 12183 case 'y': 12184 weight = CW_Register; 12185 break; 12186 case 'Z': 12187 weight = CW_Memory; 12188 break; 12189 } 12190 return weight; 12191 } 12192 12193 std::pair<unsigned, const TargetRegisterClass *> 12194 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 12195 StringRef Constraint, 12196 MVT VT) const { 12197 if (Constraint.size() == 1) { 12198 // GCC RS6000 Constraint Letters 12199 switch (Constraint[0]) { 12200 case 'b': // R1-R31 12201 if (VT == MVT::i64 && Subtarget.isPPC64()) 12202 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 12203 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 12204 case 'r': // R0-R31 12205 if (VT == MVT::i64 && Subtarget.isPPC64()) 12206 return std::make_pair(0U, &PPC::G8RCRegClass); 12207 return std::make_pair(0U, &PPC::GPRCRegClass); 12208 // 'd' and 'f' constraints are both defined to be "the floating point 12209 // registers", where one is for 32-bit and the other for 64-bit. We don't 12210 // really care overly much here so just give them all the same reg classes. 12211 case 'd': 12212 case 'f': 12213 if (VT == MVT::f32 || VT == MVT::i32) 12214 return std::make_pair(0U, &PPC::F4RCRegClass); 12215 if (VT == MVT::f64 || VT == MVT::i64) 12216 return std::make_pair(0U, &PPC::F8RCRegClass); 12217 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 12218 return std::make_pair(0U, &PPC::QFRCRegClass); 12219 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 12220 return std::make_pair(0U, &PPC::QSRCRegClass); 12221 break; 12222 case 'v': 12223 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 12224 return std::make_pair(0U, &PPC::QFRCRegClass); 12225 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 12226 return std::make_pair(0U, &PPC::QSRCRegClass); 12227 if (Subtarget.hasAltivec()) 12228 return std::make_pair(0U, &PPC::VRRCRegClass); 12229 case 'y': // crrc 12230 return std::make_pair(0U, &PPC::CRRCRegClass); 12231 } 12232 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 12233 // An individual CR bit. 12234 return std::make_pair(0U, &PPC::CRBITRCRegClass); 12235 } else if ((Constraint == "wa" || Constraint == "wd" || 12236 Constraint == "wf") && Subtarget.hasVSX()) { 12237 return std::make_pair(0U, &PPC::VSRCRegClass); 12238 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 12239 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 12240 return std::make_pair(0U, &PPC::VSSRCRegClass); 12241 else 12242 return std::make_pair(0U, &PPC::VSFRCRegClass); 12243 } 12244 12245 std::pair<unsigned, const TargetRegisterClass *> R = 12246 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 12247 12248 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 12249 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 12250 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 12251 // register. 12252 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 12253 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 12254 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 12255 PPC::GPRCRegClass.contains(R.first)) 12256 return std::make_pair(TRI->getMatchingSuperReg(R.first, 12257 PPC::sub_32, &PPC::G8RCRegClass), 12258 &PPC::G8RCRegClass); 12259 12260 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 12261 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 12262 R.first = PPC::CR0; 12263 R.second = &PPC::CRRCRegClass; 12264 } 12265 12266 return R; 12267 } 12268 12269 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 12270 /// vector. If it is invalid, don't add anything to Ops. 12271 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 12272 std::string &Constraint, 12273 std::vector<SDValue>&Ops, 12274 SelectionDAG &DAG) const { 12275 SDValue Result; 12276 12277 // Only support length 1 constraints. 12278 if (Constraint.length() > 1) return; 12279 12280 char Letter = Constraint[0]; 12281 switch (Letter) { 12282 default: break; 12283 case 'I': 12284 case 'J': 12285 case 'K': 12286 case 'L': 12287 case 'M': 12288 case 'N': 12289 case 'O': 12290 case 'P': { 12291 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 12292 if (!CST) return; // Must be an immediate to match. 12293 SDLoc dl(Op); 12294 int64_t Value = CST->getSExtValue(); 12295 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 12296 // numbers are printed as such. 12297 switch (Letter) { 12298 default: llvm_unreachable("Unknown constraint letter!"); 12299 case 'I': // "I" is a signed 16-bit constant. 12300 if (isInt<16>(Value)) 12301 Result = DAG.getTargetConstant(Value, dl, TCVT); 12302 break; 12303 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 12304 if (isShiftedUInt<16, 16>(Value)) 12305 Result = DAG.getTargetConstant(Value, dl, TCVT); 12306 break; 12307 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 12308 if (isShiftedInt<16, 16>(Value)) 12309 Result = DAG.getTargetConstant(Value, dl, TCVT); 12310 break; 12311 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 12312 if (isUInt<16>(Value)) 12313 Result = DAG.getTargetConstant(Value, dl, TCVT); 12314 break; 12315 case 'M': // "M" is a constant that is greater than 31. 12316 if (Value > 31) 12317 Result = DAG.getTargetConstant(Value, dl, TCVT); 12318 break; 12319 case 'N': // "N" is a positive constant that is an exact power of two. 12320 if (Value > 0 && isPowerOf2_64(Value)) 12321 Result = DAG.getTargetConstant(Value, dl, TCVT); 12322 break; 12323 case 'O': // "O" is the constant zero. 12324 if (Value == 0) 12325 Result = DAG.getTargetConstant(Value, dl, TCVT); 12326 break; 12327 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 12328 if (isInt<16>(-Value)) 12329 Result = DAG.getTargetConstant(Value, dl, TCVT); 12330 break; 12331 } 12332 break; 12333 } 12334 } 12335 12336 if (Result.getNode()) { 12337 Ops.push_back(Result); 12338 return; 12339 } 12340 12341 // Handle standard constraint letters. 12342 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 12343 } 12344 12345 // isLegalAddressingMode - Return true if the addressing mode represented 12346 // by AM is legal for this target, for a load/store of the specified type. 12347 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 12348 const AddrMode &AM, Type *Ty, 12349 unsigned AS) const { 12350 // PPC does not allow r+i addressing modes for vectors! 12351 if (Ty->isVectorTy() && AM.BaseOffs != 0) 12352 return false; 12353 12354 // PPC allows a sign-extended 16-bit immediate field. 12355 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 12356 return false; 12357 12358 // No global is ever allowed as a base. 12359 if (AM.BaseGV) 12360 return false; 12361 12362 // PPC only support r+r, 12363 switch (AM.Scale) { 12364 case 0: // "r+i" or just "i", depending on HasBaseReg. 12365 break; 12366 case 1: 12367 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 12368 return false; 12369 // Otherwise we have r+r or r+i. 12370 break; 12371 case 2: 12372 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 12373 return false; 12374 // Allow 2*r as r+r. 12375 break; 12376 default: 12377 // No other scales are supported. 12378 return false; 12379 } 12380 12381 return true; 12382 } 12383 12384 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 12385 SelectionDAG &DAG) const { 12386 MachineFunction &MF = DAG.getMachineFunction(); 12387 MachineFrameInfo &MFI = MF.getFrameInfo(); 12388 MFI.setReturnAddressIsTaken(true); 12389 12390 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 12391 return SDValue(); 12392 12393 SDLoc dl(Op); 12394 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 12395 12396 // Make sure the function does not optimize away the store of the RA to 12397 // the stack. 12398 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 12399 FuncInfo->setLRStoreRequired(); 12400 bool isPPC64 = Subtarget.isPPC64(); 12401 auto PtrVT = getPointerTy(MF.getDataLayout()); 12402 12403 if (Depth > 0) { 12404 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 12405 SDValue Offset = 12406 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 12407 isPPC64 ? MVT::i64 : MVT::i32); 12408 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 12409 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 12410 MachinePointerInfo()); 12411 } 12412 12413 // Just load the return address off the stack. 12414 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 12415 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 12416 MachinePointerInfo()); 12417 } 12418 12419 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 12420 SelectionDAG &DAG) const { 12421 SDLoc dl(Op); 12422 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 12423 12424 MachineFunction &MF = DAG.getMachineFunction(); 12425 MachineFrameInfo &MFI = MF.getFrameInfo(); 12426 MFI.setFrameAddressIsTaken(true); 12427 12428 EVT PtrVT = getPointerTy(MF.getDataLayout()); 12429 bool isPPC64 = PtrVT == MVT::i64; 12430 12431 // Naked functions never have a frame pointer, and so we use r1. For all 12432 // other functions, this decision must be delayed until during PEI. 12433 unsigned FrameReg; 12434 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 12435 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 12436 else 12437 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 12438 12439 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 12440 PtrVT); 12441 while (Depth--) 12442 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 12443 FrameAddr, MachinePointerInfo()); 12444 return FrameAddr; 12445 } 12446 12447 // FIXME? Maybe this could be a TableGen attribute on some registers and 12448 // this table could be generated automatically from RegInfo. 12449 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 12450 SelectionDAG &DAG) const { 12451 bool isPPC64 = Subtarget.isPPC64(); 12452 bool isDarwinABI = Subtarget.isDarwinABI(); 12453 12454 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 12455 (!isPPC64 && VT != MVT::i32)) 12456 report_fatal_error("Invalid register global variable type"); 12457 12458 bool is64Bit = isPPC64 && VT == MVT::i64; 12459 unsigned Reg = StringSwitch<unsigned>(RegName) 12460 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 12461 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 12462 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 12463 (is64Bit ? PPC::X13 : PPC::R13)) 12464 .Default(0); 12465 12466 if (Reg) 12467 return Reg; 12468 report_fatal_error("Invalid register name global variable"); 12469 } 12470 12471 bool 12472 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 12473 // The PowerPC target isn't yet aware of offsets. 12474 return false; 12475 } 12476 12477 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 12478 const CallInst &I, 12479 unsigned Intrinsic) const { 12480 switch (Intrinsic) { 12481 case Intrinsic::ppc_qpx_qvlfd: 12482 case Intrinsic::ppc_qpx_qvlfs: 12483 case Intrinsic::ppc_qpx_qvlfcd: 12484 case Intrinsic::ppc_qpx_qvlfcs: 12485 case Intrinsic::ppc_qpx_qvlfiwa: 12486 case Intrinsic::ppc_qpx_qvlfiwz: 12487 case Intrinsic::ppc_altivec_lvx: 12488 case Intrinsic::ppc_altivec_lvxl: 12489 case Intrinsic::ppc_altivec_lvebx: 12490 case Intrinsic::ppc_altivec_lvehx: 12491 case Intrinsic::ppc_altivec_lvewx: 12492 case Intrinsic::ppc_vsx_lxvd2x: 12493 case Intrinsic::ppc_vsx_lxvw4x: { 12494 EVT VT; 12495 switch (Intrinsic) { 12496 case Intrinsic::ppc_altivec_lvebx: 12497 VT = MVT::i8; 12498 break; 12499 case Intrinsic::ppc_altivec_lvehx: 12500 VT = MVT::i16; 12501 break; 12502 case Intrinsic::ppc_altivec_lvewx: 12503 VT = MVT::i32; 12504 break; 12505 case Intrinsic::ppc_vsx_lxvd2x: 12506 VT = MVT::v2f64; 12507 break; 12508 case Intrinsic::ppc_qpx_qvlfd: 12509 VT = MVT::v4f64; 12510 break; 12511 case Intrinsic::ppc_qpx_qvlfs: 12512 VT = MVT::v4f32; 12513 break; 12514 case Intrinsic::ppc_qpx_qvlfcd: 12515 VT = MVT::v2f64; 12516 break; 12517 case Intrinsic::ppc_qpx_qvlfcs: 12518 VT = MVT::v2f32; 12519 break; 12520 default: 12521 VT = MVT::v4i32; 12522 break; 12523 } 12524 12525 Info.opc = ISD::INTRINSIC_W_CHAIN; 12526 Info.memVT = VT; 12527 Info.ptrVal = I.getArgOperand(0); 12528 Info.offset = -VT.getStoreSize()+1; 12529 Info.size = 2*VT.getStoreSize()-1; 12530 Info.align = 1; 12531 Info.vol = false; 12532 Info.readMem = true; 12533 Info.writeMem = false; 12534 return true; 12535 } 12536 case Intrinsic::ppc_qpx_qvlfda: 12537 case Intrinsic::ppc_qpx_qvlfsa: 12538 case Intrinsic::ppc_qpx_qvlfcda: 12539 case Intrinsic::ppc_qpx_qvlfcsa: 12540 case Intrinsic::ppc_qpx_qvlfiwaa: 12541 case Intrinsic::ppc_qpx_qvlfiwza: { 12542 EVT VT; 12543 switch (Intrinsic) { 12544 case Intrinsic::ppc_qpx_qvlfda: 12545 VT = MVT::v4f64; 12546 break; 12547 case Intrinsic::ppc_qpx_qvlfsa: 12548 VT = MVT::v4f32; 12549 break; 12550 case Intrinsic::ppc_qpx_qvlfcda: 12551 VT = MVT::v2f64; 12552 break; 12553 case Intrinsic::ppc_qpx_qvlfcsa: 12554 VT = MVT::v2f32; 12555 break; 12556 default: 12557 VT = MVT::v4i32; 12558 break; 12559 } 12560 12561 Info.opc = ISD::INTRINSIC_W_CHAIN; 12562 Info.memVT = VT; 12563 Info.ptrVal = I.getArgOperand(0); 12564 Info.offset = 0; 12565 Info.size = VT.getStoreSize(); 12566 Info.align = 1; 12567 Info.vol = false; 12568 Info.readMem = true; 12569 Info.writeMem = false; 12570 return true; 12571 } 12572 case Intrinsic::ppc_qpx_qvstfd: 12573 case Intrinsic::ppc_qpx_qvstfs: 12574 case Intrinsic::ppc_qpx_qvstfcd: 12575 case Intrinsic::ppc_qpx_qvstfcs: 12576 case Intrinsic::ppc_qpx_qvstfiw: 12577 case Intrinsic::ppc_altivec_stvx: 12578 case Intrinsic::ppc_altivec_stvxl: 12579 case Intrinsic::ppc_altivec_stvebx: 12580 case Intrinsic::ppc_altivec_stvehx: 12581 case Intrinsic::ppc_altivec_stvewx: 12582 case Intrinsic::ppc_vsx_stxvd2x: 12583 case Intrinsic::ppc_vsx_stxvw4x: { 12584 EVT VT; 12585 switch (Intrinsic) { 12586 case Intrinsic::ppc_altivec_stvebx: 12587 VT = MVT::i8; 12588 break; 12589 case Intrinsic::ppc_altivec_stvehx: 12590 VT = MVT::i16; 12591 break; 12592 case Intrinsic::ppc_altivec_stvewx: 12593 VT = MVT::i32; 12594 break; 12595 case Intrinsic::ppc_vsx_stxvd2x: 12596 VT = MVT::v2f64; 12597 break; 12598 case Intrinsic::ppc_qpx_qvstfd: 12599 VT = MVT::v4f64; 12600 break; 12601 case Intrinsic::ppc_qpx_qvstfs: 12602 VT = MVT::v4f32; 12603 break; 12604 case Intrinsic::ppc_qpx_qvstfcd: 12605 VT = MVT::v2f64; 12606 break; 12607 case Intrinsic::ppc_qpx_qvstfcs: 12608 VT = MVT::v2f32; 12609 break; 12610 default: 12611 VT = MVT::v4i32; 12612 break; 12613 } 12614 12615 Info.opc = ISD::INTRINSIC_VOID; 12616 Info.memVT = VT; 12617 Info.ptrVal = I.getArgOperand(1); 12618 Info.offset = -VT.getStoreSize()+1; 12619 Info.size = 2*VT.getStoreSize()-1; 12620 Info.align = 1; 12621 Info.vol = false; 12622 Info.readMem = false; 12623 Info.writeMem = true; 12624 return true; 12625 } 12626 case Intrinsic::ppc_qpx_qvstfda: 12627 case Intrinsic::ppc_qpx_qvstfsa: 12628 case Intrinsic::ppc_qpx_qvstfcda: 12629 case Intrinsic::ppc_qpx_qvstfcsa: 12630 case Intrinsic::ppc_qpx_qvstfiwa: { 12631 EVT VT; 12632 switch (Intrinsic) { 12633 case Intrinsic::ppc_qpx_qvstfda: 12634 VT = MVT::v4f64; 12635 break; 12636 case Intrinsic::ppc_qpx_qvstfsa: 12637 VT = MVT::v4f32; 12638 break; 12639 case Intrinsic::ppc_qpx_qvstfcda: 12640 VT = MVT::v2f64; 12641 break; 12642 case Intrinsic::ppc_qpx_qvstfcsa: 12643 VT = MVT::v2f32; 12644 break; 12645 default: 12646 VT = MVT::v4i32; 12647 break; 12648 } 12649 12650 Info.opc = ISD::INTRINSIC_VOID; 12651 Info.memVT = VT; 12652 Info.ptrVal = I.getArgOperand(1); 12653 Info.offset = 0; 12654 Info.size = VT.getStoreSize(); 12655 Info.align = 1; 12656 Info.vol = false; 12657 Info.readMem = false; 12658 Info.writeMem = true; 12659 return true; 12660 } 12661 default: 12662 break; 12663 } 12664 12665 return false; 12666 } 12667 12668 /// getOptimalMemOpType - Returns the target specific optimal type for load 12669 /// and store operations as a result of memset, memcpy, and memmove 12670 /// lowering. If DstAlign is zero that means it's safe to destination 12671 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 12672 /// means there isn't a need to check it against alignment requirement, 12673 /// probably because the source does not need to be loaded. If 'IsMemset' is 12674 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 12675 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 12676 /// source is constant so it does not need to be loaded. 12677 /// It returns EVT::Other if the type should be determined using generic 12678 /// target-independent logic. 12679 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 12680 unsigned DstAlign, unsigned SrcAlign, 12681 bool IsMemset, bool ZeroMemset, 12682 bool MemcpyStrSrc, 12683 MachineFunction &MF) const { 12684 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 12685 const Function *F = MF.getFunction(); 12686 // When expanding a memset, require at least two QPX instructions to cover 12687 // the cost of loading the value to be stored from the constant pool. 12688 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 12689 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 12690 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 12691 return MVT::v4f64; 12692 } 12693 12694 // We should use Altivec/VSX loads and stores when available. For unaligned 12695 // addresses, unaligned VSX loads are only fast starting with the P8. 12696 if (Subtarget.hasAltivec() && Size >= 16 && 12697 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 12698 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 12699 return MVT::v4i32; 12700 } 12701 12702 if (Subtarget.isPPC64()) { 12703 return MVT::i64; 12704 } 12705 12706 return MVT::i32; 12707 } 12708 12709 /// \brief Returns true if it is beneficial to convert a load of a constant 12710 /// to just the constant itself. 12711 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 12712 Type *Ty) const { 12713 assert(Ty->isIntegerTy()); 12714 12715 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 12716 return !(BitSize == 0 || BitSize > 64); 12717 } 12718 12719 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 12720 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 12721 return false; 12722 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 12723 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 12724 return NumBits1 == 64 && NumBits2 == 32; 12725 } 12726 12727 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 12728 if (!VT1.isInteger() || !VT2.isInteger()) 12729 return false; 12730 unsigned NumBits1 = VT1.getSizeInBits(); 12731 unsigned NumBits2 = VT2.getSizeInBits(); 12732 return NumBits1 == 64 && NumBits2 == 32; 12733 } 12734 12735 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 12736 // Generally speaking, zexts are not free, but they are free when they can be 12737 // folded with other operations. 12738 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 12739 EVT MemVT = LD->getMemoryVT(); 12740 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 12741 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 12742 (LD->getExtensionType() == ISD::NON_EXTLOAD || 12743 LD->getExtensionType() == ISD::ZEXTLOAD)) 12744 return true; 12745 } 12746 12747 // FIXME: Add other cases... 12748 // - 32-bit shifts with a zext to i64 12749 // - zext after ctlz, bswap, etc. 12750 // - zext after and by a constant mask 12751 12752 return TargetLowering::isZExtFree(Val, VT2); 12753 } 12754 12755 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 12756 assert(VT.isFloatingPoint()); 12757 return true; 12758 } 12759 12760 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 12761 return isInt<16>(Imm) || isUInt<16>(Imm); 12762 } 12763 12764 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 12765 return isInt<16>(Imm) || isUInt<16>(Imm); 12766 } 12767 12768 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 12769 unsigned, 12770 unsigned, 12771 bool *Fast) const { 12772 if (DisablePPCUnaligned) 12773 return false; 12774 12775 // PowerPC supports unaligned memory access for simple non-vector types. 12776 // Although accessing unaligned addresses is not as efficient as accessing 12777 // aligned addresses, it is generally more efficient than manual expansion, 12778 // and generally only traps for software emulation when crossing page 12779 // boundaries. 12780 12781 if (!VT.isSimple()) 12782 return false; 12783 12784 if (VT.getSimpleVT().isVector()) { 12785 if (Subtarget.hasVSX()) { 12786 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 12787 VT != MVT::v4f32 && VT != MVT::v4i32) 12788 return false; 12789 } else { 12790 return false; 12791 } 12792 } 12793 12794 if (VT == MVT::ppcf128) 12795 return false; 12796 12797 if (Fast) 12798 *Fast = true; 12799 12800 return true; 12801 } 12802 12803 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 12804 VT = VT.getScalarType(); 12805 12806 if (!VT.isSimple()) 12807 return false; 12808 12809 switch (VT.getSimpleVT().SimpleTy) { 12810 case MVT::f32: 12811 case MVT::f64: 12812 return true; 12813 default: 12814 break; 12815 } 12816 12817 return false; 12818 } 12819 12820 const MCPhysReg * 12821 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 12822 // LR is a callee-save register, but we must treat it as clobbered by any call 12823 // site. Hence we include LR in the scratch registers, which are in turn added 12824 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 12825 // to CTR, which is used by any indirect call. 12826 static const MCPhysReg ScratchRegs[] = { 12827 PPC::X12, PPC::LR8, PPC::CTR8, 0 12828 }; 12829 12830 return ScratchRegs; 12831 } 12832 12833 unsigned PPCTargetLowering::getExceptionPointerRegister( 12834 const Constant *PersonalityFn) const { 12835 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 12836 } 12837 12838 unsigned PPCTargetLowering::getExceptionSelectorRegister( 12839 const Constant *PersonalityFn) const { 12840 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 12841 } 12842 12843 bool 12844 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 12845 EVT VT , unsigned DefinedValues) const { 12846 if (VT == MVT::v2i64) 12847 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 12848 12849 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 12850 return true; 12851 12852 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 12853 } 12854 12855 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 12856 if (DisableILPPref || Subtarget.enableMachineScheduler()) 12857 return TargetLowering::getSchedulingPreference(N); 12858 12859 return Sched::ILP; 12860 } 12861 12862 // Create a fast isel object. 12863 FastISel * 12864 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 12865 const TargetLibraryInfo *LibInfo) const { 12866 return PPC::createFastISel(FuncInfo, LibInfo); 12867 } 12868 12869 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 12870 if (Subtarget.isDarwinABI()) return; 12871 if (!Subtarget.isPPC64()) return; 12872 12873 // Update IsSplitCSR in PPCFunctionInfo 12874 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 12875 PFI->setIsSplitCSR(true); 12876 } 12877 12878 void PPCTargetLowering::insertCopiesSplitCSR( 12879 MachineBasicBlock *Entry, 12880 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 12881 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 12882 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 12883 if (!IStart) 12884 return; 12885 12886 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 12887 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 12888 MachineBasicBlock::iterator MBBI = Entry->begin(); 12889 for (const MCPhysReg *I = IStart; *I; ++I) { 12890 const TargetRegisterClass *RC = nullptr; 12891 if (PPC::G8RCRegClass.contains(*I)) 12892 RC = &PPC::G8RCRegClass; 12893 else if (PPC::F8RCRegClass.contains(*I)) 12894 RC = &PPC::F8RCRegClass; 12895 else if (PPC::CRRCRegClass.contains(*I)) 12896 RC = &PPC::CRRCRegClass; 12897 else if (PPC::VRRCRegClass.contains(*I)) 12898 RC = &PPC::VRRCRegClass; 12899 else 12900 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 12901 12902 unsigned NewVR = MRI->createVirtualRegister(RC); 12903 // Create copy from CSR to a virtual register. 12904 // FIXME: this currently does not emit CFI pseudo-instructions, it works 12905 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 12906 // nounwind. If we want to generalize this later, we may need to emit 12907 // CFI pseudo-instructions. 12908 assert(Entry->getParent()->getFunction()->hasFnAttribute( 12909 Attribute::NoUnwind) && 12910 "Function should be nounwind in insertCopiesSplitCSR!"); 12911 Entry->addLiveIn(*I); 12912 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 12913 .addReg(*I); 12914 12915 // Insert the copy-back instructions right before the terminator 12916 for (auto *Exit : Exits) 12917 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 12918 TII->get(TargetOpcode::COPY), *I) 12919 .addReg(NewVR); 12920 } 12921 } 12922 12923 // Override to enable LOAD_STACK_GUARD lowering on Linux. 12924 bool PPCTargetLowering::useLoadStackGuardNode() const { 12925 if (!Subtarget.isTargetLinux()) 12926 return TargetLowering::useLoadStackGuardNode(); 12927 return true; 12928 } 12929 12930 // Override to disable global variable loading on Linux. 12931 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 12932 if (!Subtarget.isTargetLinux()) 12933 return TargetLowering::insertSSPDeclarations(M); 12934 } 12935 12936 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 12937 if (!VT.isSimple() || !Subtarget.hasVSX()) 12938 return false; 12939 12940 switch(VT.getSimpleVT().SimpleTy) { 12941 default: 12942 // For FP types that are currently not supported by PPC backend, return 12943 // false. Examples: f16, f80. 12944 return false; 12945 case MVT::f32: 12946 case MVT::f64: 12947 case MVT::ppcf128: 12948 return Imm.isPosZero(); 12949 } 12950 } 12951