1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCCallingConv.h" 17 #include "PPCMachineFunctionInfo.h" 18 #include "PPCPerfectShuffle.h" 19 #include "PPCTargetMachine.h" 20 #include "PPCTargetObjectFile.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/ADT/StringSwitch.h" 24 #include "llvm/ADT/Triple.h" 25 #include "llvm/CodeGen/CallingConvLower.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineLoopInfo.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/SelectionDAG.h" 32 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 33 #include "llvm/IR/CallingConv.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/Intrinsics.h" 38 #include "llvm/Support/CommandLine.h" 39 #include "llvm/Support/ErrorHandling.h" 40 #include "llvm/Support/Format.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Target/TargetOptions.h" 44 45 using namespace llvm; 46 47 #define DEBUG_TYPE "ppc-lowering" 48 49 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 50 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 51 52 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 53 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 54 55 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 56 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 57 58 static cl::opt<bool> DisableSCO("disable-ppc-sco", 59 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 60 61 STATISTIC(NumTailCalls, "Number of tail calls"); 62 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 63 64 // FIXME: Remove this once the bug has been fixed! 65 extern cl::opt<bool> ANDIGlueBug; 66 67 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 68 const PPCSubtarget &STI) 69 : TargetLowering(TM), Subtarget(STI) { 70 // Use _setjmp/_longjmp instead of setjmp/longjmp. 71 setUseUnderscoreSetJmp(true); 72 setUseUnderscoreLongJmp(true); 73 74 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 75 // arguments are at least 4/8 bytes aligned. 76 bool isPPC64 = Subtarget.isPPC64(); 77 setMinStackArgumentAlignment(isPPC64 ? 8:4); 78 79 // Set up the register classes. 80 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 81 if (!Subtarget.useSoftFloat()) { 82 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 83 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 84 } 85 86 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 87 for (MVT VT : MVT::integer_valuetypes()) { 88 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 89 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 90 } 91 92 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 93 94 // PowerPC has pre-inc load and store's. 95 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 96 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 97 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 98 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 99 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 100 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 101 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 102 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 103 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 104 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 105 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 106 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 107 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 108 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 109 110 if (Subtarget.useCRBits()) { 111 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 112 113 if (isPPC64 || Subtarget.hasFPCVT()) { 114 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 115 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 116 isPPC64 ? MVT::i64 : MVT::i32); 117 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 118 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 119 isPPC64 ? MVT::i64 : MVT::i32); 120 } else { 121 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 122 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 123 } 124 125 // PowerPC does not support direct load / store of condition registers 126 setOperationAction(ISD::LOAD, MVT::i1, Custom); 127 setOperationAction(ISD::STORE, MVT::i1, Custom); 128 129 // FIXME: Remove this once the ANDI glue bug is fixed: 130 if (ANDIGlueBug) 131 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 132 133 for (MVT VT : MVT::integer_valuetypes()) { 134 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 135 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 136 setTruncStoreAction(VT, MVT::i1, Expand); 137 } 138 139 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 140 } 141 142 // This is used in the ppcf128->int sequence. Note it has different semantics 143 // from FP_ROUND: that rounds to nearest, this rounds to zero. 144 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 145 146 // We do not currently implement these libm ops for PowerPC. 147 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 148 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 149 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 150 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 151 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 152 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 153 154 // PowerPC has no SREM/UREM instructions 155 setOperationAction(ISD::SREM, MVT::i32, Expand); 156 setOperationAction(ISD::UREM, MVT::i32, Expand); 157 setOperationAction(ISD::SREM, MVT::i64, Expand); 158 setOperationAction(ISD::UREM, MVT::i64, Expand); 159 160 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 161 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 162 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 163 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 164 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 165 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 166 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 167 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 168 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 169 170 // We don't support sin/cos/sqrt/fmod/pow 171 setOperationAction(ISD::FSIN , MVT::f64, Expand); 172 setOperationAction(ISD::FCOS , MVT::f64, Expand); 173 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 174 setOperationAction(ISD::FREM , MVT::f64, Expand); 175 setOperationAction(ISD::FPOW , MVT::f64, Expand); 176 setOperationAction(ISD::FMA , MVT::f64, Legal); 177 setOperationAction(ISD::FSIN , MVT::f32, Expand); 178 setOperationAction(ISD::FCOS , MVT::f32, Expand); 179 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 180 setOperationAction(ISD::FREM , MVT::f32, Expand); 181 setOperationAction(ISD::FPOW , MVT::f32, Expand); 182 setOperationAction(ISD::FMA , MVT::f32, Legal); 183 184 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 185 186 // If we're enabling GP optimizations, use hardware square root 187 if (!Subtarget.hasFSQRT() && 188 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 189 Subtarget.hasFRE())) 190 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 191 192 if (!Subtarget.hasFSQRT() && 193 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 194 Subtarget.hasFRES())) 195 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 196 197 if (Subtarget.hasFCPSGN()) { 198 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 199 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 200 } else { 201 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 202 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 203 } 204 205 if (Subtarget.hasFPRND()) { 206 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 207 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 208 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 209 setOperationAction(ISD::FROUND, MVT::f64, Legal); 210 211 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 212 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 213 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 214 setOperationAction(ISD::FROUND, MVT::f32, Legal); 215 } 216 217 // PowerPC does not have BSWAP, CTPOP or CTTZ 218 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 219 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 220 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 221 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 222 223 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 224 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 225 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 226 } else { 227 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 228 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 229 } 230 231 // PowerPC does not have ROTR 232 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 233 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 234 235 if (!Subtarget.useCRBits()) { 236 // PowerPC does not have Select 237 setOperationAction(ISD::SELECT, MVT::i32, Expand); 238 setOperationAction(ISD::SELECT, MVT::i64, Expand); 239 setOperationAction(ISD::SELECT, MVT::f32, Expand); 240 setOperationAction(ISD::SELECT, MVT::f64, Expand); 241 } 242 243 // PowerPC wants to turn select_cc of FP into fsel when possible. 244 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 245 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 246 247 // PowerPC wants to optimize integer setcc a bit 248 if (!Subtarget.useCRBits()) 249 setOperationAction(ISD::SETCC, MVT::i32, Custom); 250 251 // PowerPC does not have BRCOND which requires SetCC 252 if (!Subtarget.useCRBits()) 253 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 254 255 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 256 257 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 258 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 259 260 // PowerPC does not have [U|S]INT_TO_FP 261 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 262 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 263 264 if (Subtarget.hasDirectMove() && isPPC64) { 265 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 266 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 267 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 268 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 269 } else { 270 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 271 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 272 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 273 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 274 } 275 276 // We cannot sextinreg(i1). Expand to shifts. 277 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 278 279 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 280 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 281 // support continuation, user-level threading, and etc.. As a result, no 282 // other SjLj exception interfaces are implemented and please don't build 283 // your own exception handling based on them. 284 // LLVM/Clang supports zero-cost DWARF exception handling. 285 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 286 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 287 288 // We want to legalize GlobalAddress and ConstantPool nodes into the 289 // appropriate instructions to materialize the address. 290 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 291 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 292 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 293 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 294 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 295 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 296 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 297 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 298 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 299 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 300 301 // TRAP is legal. 302 setOperationAction(ISD::TRAP, MVT::Other, Legal); 303 304 // TRAMPOLINE is custom lowered. 305 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 306 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 307 308 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 309 setOperationAction(ISD::VASTART , MVT::Other, Custom); 310 311 if (Subtarget.isSVR4ABI()) { 312 if (isPPC64) { 313 // VAARG always uses double-word chunks, so promote anything smaller. 314 setOperationAction(ISD::VAARG, MVT::i1, Promote); 315 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 316 setOperationAction(ISD::VAARG, MVT::i8, Promote); 317 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 318 setOperationAction(ISD::VAARG, MVT::i16, Promote); 319 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 320 setOperationAction(ISD::VAARG, MVT::i32, Promote); 321 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 322 setOperationAction(ISD::VAARG, MVT::Other, Expand); 323 } else { 324 // VAARG is custom lowered with the 32-bit SVR4 ABI. 325 setOperationAction(ISD::VAARG, MVT::Other, Custom); 326 setOperationAction(ISD::VAARG, MVT::i64, Custom); 327 } 328 } else 329 setOperationAction(ISD::VAARG, MVT::Other, Expand); 330 331 if (Subtarget.isSVR4ABI() && !isPPC64) 332 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 333 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 334 else 335 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 336 337 // Use the default implementation. 338 setOperationAction(ISD::VAEND , MVT::Other, Expand); 339 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 340 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 341 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 342 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 343 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 344 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 345 346 // We want to custom lower some of our intrinsics. 347 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 348 349 // To handle counter-based loop conditions. 350 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 351 352 // Comparisons that require checking two conditions. 353 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 354 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 355 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 356 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 357 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 358 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 359 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 360 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 361 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 362 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 363 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 364 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 365 366 if (Subtarget.has64BitSupport()) { 367 // They also have instructions for converting between i64 and fp. 368 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 369 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 370 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 371 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 372 // This is just the low 32 bits of a (signed) fp->i64 conversion. 373 // We cannot do this with Promote because i64 is not a legal type. 374 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 375 376 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 377 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 378 } else { 379 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 380 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 381 } 382 383 // With the instructions enabled under FPCVT, we can do everything. 384 if (Subtarget.hasFPCVT()) { 385 if (Subtarget.has64BitSupport()) { 386 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 387 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 388 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 389 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 390 } 391 392 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 393 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 394 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 395 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 396 } 397 398 if (Subtarget.use64BitRegs()) { 399 // 64-bit PowerPC implementations can support i64 types directly 400 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 401 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 402 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 403 // 64-bit PowerPC wants to expand i128 shifts itself. 404 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 405 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 406 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 407 } else { 408 // 32-bit PowerPC wants to expand i64 shifts itself. 409 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 410 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 411 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 412 } 413 414 if (Subtarget.hasAltivec()) { 415 // First set operation action for all vector types to expand. Then we 416 // will selectively turn on ones that can be effectively codegen'd. 417 for (MVT VT : MVT::vector_valuetypes()) { 418 // add/sub are legal for all supported vector VT's. 419 setOperationAction(ISD::ADD, VT, Legal); 420 setOperationAction(ISD::SUB, VT, Legal); 421 422 // Vector instructions introduced in P8 423 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 424 setOperationAction(ISD::CTPOP, VT, Legal); 425 setOperationAction(ISD::CTLZ, VT, Legal); 426 } 427 else { 428 setOperationAction(ISD::CTPOP, VT, Expand); 429 setOperationAction(ISD::CTLZ, VT, Expand); 430 } 431 432 // We promote all shuffles to v16i8. 433 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 434 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 435 436 // We promote all non-typed operations to v4i32. 437 setOperationAction(ISD::AND , VT, Promote); 438 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 439 setOperationAction(ISD::OR , VT, Promote); 440 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 441 setOperationAction(ISD::XOR , VT, Promote); 442 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 443 setOperationAction(ISD::LOAD , VT, Promote); 444 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 445 setOperationAction(ISD::SELECT, VT, Promote); 446 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 447 setOperationAction(ISD::SELECT_CC, VT, Promote); 448 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 449 setOperationAction(ISD::STORE, VT, Promote); 450 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 451 452 // No other operations are legal. 453 setOperationAction(ISD::MUL , VT, Expand); 454 setOperationAction(ISD::SDIV, VT, Expand); 455 setOperationAction(ISD::SREM, VT, Expand); 456 setOperationAction(ISD::UDIV, VT, Expand); 457 setOperationAction(ISD::UREM, VT, Expand); 458 setOperationAction(ISD::FDIV, VT, Expand); 459 setOperationAction(ISD::FREM, VT, Expand); 460 setOperationAction(ISD::FNEG, VT, Expand); 461 setOperationAction(ISD::FSQRT, VT, Expand); 462 setOperationAction(ISD::FLOG, VT, Expand); 463 setOperationAction(ISD::FLOG10, VT, Expand); 464 setOperationAction(ISD::FLOG2, VT, Expand); 465 setOperationAction(ISD::FEXP, VT, Expand); 466 setOperationAction(ISD::FEXP2, VT, Expand); 467 setOperationAction(ISD::FSIN, VT, Expand); 468 setOperationAction(ISD::FCOS, VT, Expand); 469 setOperationAction(ISD::FABS, VT, Expand); 470 setOperationAction(ISD::FPOWI, VT, Expand); 471 setOperationAction(ISD::FFLOOR, VT, Expand); 472 setOperationAction(ISD::FCEIL, VT, Expand); 473 setOperationAction(ISD::FTRUNC, VT, Expand); 474 setOperationAction(ISD::FRINT, VT, Expand); 475 setOperationAction(ISD::FNEARBYINT, VT, Expand); 476 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 477 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 478 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 479 setOperationAction(ISD::MULHU, VT, Expand); 480 setOperationAction(ISD::MULHS, VT, Expand); 481 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 482 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 483 setOperationAction(ISD::UDIVREM, VT, Expand); 484 setOperationAction(ISD::SDIVREM, VT, Expand); 485 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 486 setOperationAction(ISD::FPOW, VT, Expand); 487 setOperationAction(ISD::BSWAP, VT, Expand); 488 setOperationAction(ISD::CTTZ, VT, Expand); 489 setOperationAction(ISD::VSELECT, VT, Expand); 490 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 491 setOperationAction(ISD::ROTL, VT, Expand); 492 setOperationAction(ISD::ROTR, VT, Expand); 493 494 for (MVT InnerVT : MVT::vector_valuetypes()) { 495 setTruncStoreAction(VT, InnerVT, Expand); 496 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 497 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 498 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 499 } 500 } 501 502 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 503 // with merges, splats, etc. 504 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 505 506 setOperationAction(ISD::AND , MVT::v4i32, Legal); 507 setOperationAction(ISD::OR , MVT::v4i32, Legal); 508 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 509 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 510 setOperationAction(ISD::SELECT, MVT::v4i32, 511 Subtarget.useCRBits() ? Legal : Expand); 512 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 513 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 514 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 515 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 516 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 517 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 518 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 519 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 520 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 521 522 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 523 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 524 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 525 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 526 527 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 528 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 529 530 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 531 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 532 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 533 } 534 535 if (Subtarget.hasP8Altivec()) 536 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 537 else 538 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 539 540 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 541 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 542 543 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 544 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 545 546 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 547 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 548 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 549 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 550 551 // Altivec does not contain unordered floating-point compare instructions 552 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 553 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 554 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 555 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 556 557 if (Subtarget.hasVSX()) { 558 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 559 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 560 if (Subtarget.hasP8Vector()) { 561 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 562 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 563 } 564 if (Subtarget.hasDirectMove() && isPPC64) { 565 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 566 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 567 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 568 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 569 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 570 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 571 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 572 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 573 } 574 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 575 576 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 577 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 578 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 579 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 580 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 581 582 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 583 584 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 585 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 586 587 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 588 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 589 590 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 591 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 592 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 593 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 594 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 595 596 // Share the Altivec comparison restrictions. 597 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 598 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 599 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 600 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 601 602 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 603 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 604 605 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 606 607 if (Subtarget.hasP8Vector()) 608 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 609 610 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 611 612 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 613 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 614 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 615 616 if (Subtarget.hasP8Altivec()) { 617 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 618 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 619 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 620 621 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 622 } 623 else { 624 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 625 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 626 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 627 628 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 629 630 // VSX v2i64 only supports non-arithmetic operations. 631 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 632 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 633 } 634 635 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 636 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 637 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 638 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 639 640 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 641 642 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 643 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 644 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 645 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 646 647 // Vector operation legalization checks the result type of 648 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 649 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 650 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 651 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 652 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 653 654 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 655 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 656 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 657 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 658 659 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 660 } 661 662 if (Subtarget.hasP8Altivec()) { 663 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 664 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 665 } 666 } 667 668 if (Subtarget.hasQPX()) { 669 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 670 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 671 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 672 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 673 674 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 675 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 676 677 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 678 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 679 680 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 681 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 682 683 if (!Subtarget.useCRBits()) 684 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 685 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 686 687 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 688 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 689 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 690 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 691 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 692 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 693 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 694 695 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 696 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 697 698 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 699 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 700 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 701 702 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 703 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 704 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 705 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 706 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 707 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 708 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 709 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 710 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 711 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 712 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 713 714 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 715 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 716 717 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 718 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 719 720 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 721 722 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 723 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 724 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 725 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 726 727 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 728 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 729 730 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 731 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 732 733 if (!Subtarget.useCRBits()) 734 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 735 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 736 737 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 738 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 739 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 740 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 741 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 742 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 743 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 744 745 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 746 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 747 748 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 749 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 750 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 751 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 752 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 753 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 754 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 755 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 756 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 757 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 758 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 759 760 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 761 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 762 763 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 764 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 765 766 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 767 768 setOperationAction(ISD::AND , MVT::v4i1, Legal); 769 setOperationAction(ISD::OR , MVT::v4i1, Legal); 770 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 771 772 if (!Subtarget.useCRBits()) 773 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 774 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 775 776 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 777 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 778 779 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 780 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 781 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 782 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 783 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 784 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 785 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 786 787 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 788 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 789 790 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 791 792 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 793 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 794 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 795 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 796 797 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 798 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 799 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 800 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 801 802 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 803 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 804 805 // These need to set FE_INEXACT, and so cannot be vectorized here. 806 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 807 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 808 809 if (TM.Options.UnsafeFPMath) { 810 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 811 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 812 813 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 814 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 815 } else { 816 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 817 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 818 819 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 820 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 821 } 822 } 823 824 if (Subtarget.has64BitSupport()) 825 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 826 827 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 828 829 if (!isPPC64) { 830 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 831 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 832 } 833 834 setBooleanContents(ZeroOrOneBooleanContent); 835 836 if (Subtarget.hasAltivec()) { 837 // Altivec instructions set fields to all zeros or all ones. 838 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 839 } 840 841 if (!isPPC64) { 842 // These libcalls are not available in 32-bit. 843 setLibcallName(RTLIB::SHL_I128, nullptr); 844 setLibcallName(RTLIB::SRL_I128, nullptr); 845 setLibcallName(RTLIB::SRA_I128, nullptr); 846 } 847 848 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 849 850 // We have target-specific dag combine patterns for the following nodes: 851 setTargetDAGCombine(ISD::SINT_TO_FP); 852 if (Subtarget.hasFPCVT()) 853 setTargetDAGCombine(ISD::UINT_TO_FP); 854 setTargetDAGCombine(ISD::LOAD); 855 setTargetDAGCombine(ISD::STORE); 856 setTargetDAGCombine(ISD::BR_CC); 857 if (Subtarget.useCRBits()) 858 setTargetDAGCombine(ISD::BRCOND); 859 setTargetDAGCombine(ISD::BSWAP); 860 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 861 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 862 setTargetDAGCombine(ISD::INTRINSIC_VOID); 863 864 setTargetDAGCombine(ISD::SIGN_EXTEND); 865 setTargetDAGCombine(ISD::ZERO_EXTEND); 866 setTargetDAGCombine(ISD::ANY_EXTEND); 867 868 if (Subtarget.useCRBits()) { 869 setTargetDAGCombine(ISD::TRUNCATE); 870 setTargetDAGCombine(ISD::SETCC); 871 setTargetDAGCombine(ISD::SELECT_CC); 872 } 873 874 // Use reciprocal estimates. 875 if (TM.Options.UnsafeFPMath) { 876 setTargetDAGCombine(ISD::FDIV); 877 setTargetDAGCombine(ISD::FSQRT); 878 } 879 880 // Darwin long double math library functions have $LDBL128 appended. 881 if (Subtarget.isDarwin()) { 882 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 883 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 884 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 885 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 886 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 887 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 888 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 889 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 890 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 891 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 892 } 893 894 // With 32 condition bits, we don't need to sink (and duplicate) compares 895 // aggressively in CodeGenPrep. 896 if (Subtarget.useCRBits()) { 897 setHasMultipleConditionRegisters(); 898 setJumpIsExpensive(); 899 } 900 901 setMinFunctionAlignment(2); 902 if (Subtarget.isDarwin()) 903 setPrefFunctionAlignment(4); 904 905 switch (Subtarget.getDarwinDirective()) { 906 default: break; 907 case PPC::DIR_970: 908 case PPC::DIR_A2: 909 case PPC::DIR_E500mc: 910 case PPC::DIR_E5500: 911 case PPC::DIR_PWR4: 912 case PPC::DIR_PWR5: 913 case PPC::DIR_PWR5X: 914 case PPC::DIR_PWR6: 915 case PPC::DIR_PWR6X: 916 case PPC::DIR_PWR7: 917 case PPC::DIR_PWR8: 918 setPrefFunctionAlignment(4); 919 setPrefLoopAlignment(4); 920 break; 921 } 922 923 924 if (Subtarget.enableMachineScheduler()) 925 setSchedulingPreference(Sched::Source); 926 else 927 setSchedulingPreference(Sched::Hybrid); 928 929 computeRegisterProperties(STI.getRegisterInfo()); 930 931 // The Freescale cores do better with aggressive inlining of memcpy and 932 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 933 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 934 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 935 MaxStoresPerMemset = 32; 936 MaxStoresPerMemsetOptSize = 16; 937 MaxStoresPerMemcpy = 32; 938 MaxStoresPerMemcpyOptSize = 8; 939 MaxStoresPerMemmove = 32; 940 MaxStoresPerMemmoveOptSize = 8; 941 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 942 // The A2 also benefits from (very) aggressive inlining of memcpy and 943 // friends. The overhead of a the function call, even when warm, can be 944 // over one hundred cycles. 945 MaxStoresPerMemset = 128; 946 MaxStoresPerMemcpy = 128; 947 MaxStoresPerMemmove = 128; 948 } 949 } 950 951 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 952 /// the desired ByVal argument alignment. 953 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 954 unsigned MaxMaxAlign) { 955 if (MaxAlign == MaxMaxAlign) 956 return; 957 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 958 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 959 MaxAlign = 32; 960 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 961 MaxAlign = 16; 962 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 963 unsigned EltAlign = 0; 964 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 965 if (EltAlign > MaxAlign) 966 MaxAlign = EltAlign; 967 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 968 for (auto *EltTy : STy->elements()) { 969 unsigned EltAlign = 0; 970 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 971 if (EltAlign > MaxAlign) 972 MaxAlign = EltAlign; 973 if (MaxAlign == MaxMaxAlign) 974 break; 975 } 976 } 977 } 978 979 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 980 /// function arguments in the caller parameter area. 981 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 982 const DataLayout &DL) const { 983 // Darwin passes everything on 4 byte boundary. 984 if (Subtarget.isDarwin()) 985 return 4; 986 987 // 16byte and wider vectors are passed on 16byte boundary. 988 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 989 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 990 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 991 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 992 return Align; 993 } 994 995 bool PPCTargetLowering::useSoftFloat() const { 996 return Subtarget.useSoftFloat(); 997 } 998 999 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1000 switch ((PPCISD::NodeType)Opcode) { 1001 case PPCISD::FIRST_NUMBER: break; 1002 case PPCISD::FSEL: return "PPCISD::FSEL"; 1003 case PPCISD::FCFID: return "PPCISD::FCFID"; 1004 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1005 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1006 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1007 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1008 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1009 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1010 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1011 case PPCISD::FRE: return "PPCISD::FRE"; 1012 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1013 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1014 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1015 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1016 case PPCISD::VPERM: return "PPCISD::VPERM"; 1017 case PPCISD::CMPB: return "PPCISD::CMPB"; 1018 case PPCISD::Hi: return "PPCISD::Hi"; 1019 case PPCISD::Lo: return "PPCISD::Lo"; 1020 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1021 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1022 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1023 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1024 case PPCISD::SRL: return "PPCISD::SRL"; 1025 case PPCISD::SRA: return "PPCISD::SRA"; 1026 case PPCISD::SHL: return "PPCISD::SHL"; 1027 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1028 case PPCISD::CALL: return "PPCISD::CALL"; 1029 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1030 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1031 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1032 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1033 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1034 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1035 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1036 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1037 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1038 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1039 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1040 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1041 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1042 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1043 case PPCISD::VCMP: return "PPCISD::VCMP"; 1044 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1045 case PPCISD::LBRX: return "PPCISD::LBRX"; 1046 case PPCISD::STBRX: return "PPCISD::STBRX"; 1047 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1048 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1049 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1050 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1051 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1052 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1053 case PPCISD::BDZ: return "PPCISD::BDZ"; 1054 case PPCISD::MFFS: return "PPCISD::MFFS"; 1055 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1056 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1057 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1058 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1059 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1060 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1061 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1062 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1063 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1064 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1065 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1066 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1067 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1068 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1069 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1070 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1071 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1072 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1073 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1074 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1075 case PPCISD::SC: return "PPCISD::SC"; 1076 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1077 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1078 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1079 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1080 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1081 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1082 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1083 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1084 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1085 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1086 } 1087 return nullptr; 1088 } 1089 1090 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1091 EVT VT) const { 1092 if (!VT.isVector()) 1093 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1094 1095 if (Subtarget.hasQPX()) 1096 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1097 1098 return VT.changeVectorElementTypeToInteger(); 1099 } 1100 1101 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1102 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1103 return true; 1104 } 1105 1106 //===----------------------------------------------------------------------===// 1107 // Node matching predicates, for use by the tblgen matching code. 1108 //===----------------------------------------------------------------------===// 1109 1110 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1111 static bool isFloatingPointZero(SDValue Op) { 1112 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1113 return CFP->getValueAPF().isZero(); 1114 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1115 // Maybe this has already been legalized into the constant pool? 1116 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1117 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1118 return CFP->getValueAPF().isZero(); 1119 } 1120 return false; 1121 } 1122 1123 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1124 /// true if Op is undef or if it matches the specified value. 1125 static bool isConstantOrUndef(int Op, int Val) { 1126 return Op < 0 || Op == Val; 1127 } 1128 1129 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1130 /// VPKUHUM instruction. 1131 /// The ShuffleKind distinguishes between big-endian operations with 1132 /// two different inputs (0), either-endian operations with two identical 1133 /// inputs (1), and little-endian operations with two different inputs (2). 1134 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1135 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1136 SelectionDAG &DAG) { 1137 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1138 if (ShuffleKind == 0) { 1139 if (IsLE) 1140 return false; 1141 for (unsigned i = 0; i != 16; ++i) 1142 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1143 return false; 1144 } else if (ShuffleKind == 2) { 1145 if (!IsLE) 1146 return false; 1147 for (unsigned i = 0; i != 16; ++i) 1148 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1149 return false; 1150 } else if (ShuffleKind == 1) { 1151 unsigned j = IsLE ? 0 : 1; 1152 for (unsigned i = 0; i != 8; ++i) 1153 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1154 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1155 return false; 1156 } 1157 return true; 1158 } 1159 1160 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1161 /// VPKUWUM instruction. 1162 /// The ShuffleKind distinguishes between big-endian operations with 1163 /// two different inputs (0), either-endian operations with two identical 1164 /// inputs (1), and little-endian operations with two different inputs (2). 1165 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1166 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1167 SelectionDAG &DAG) { 1168 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1169 if (ShuffleKind == 0) { 1170 if (IsLE) 1171 return false; 1172 for (unsigned i = 0; i != 16; i += 2) 1173 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1174 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1175 return false; 1176 } else if (ShuffleKind == 2) { 1177 if (!IsLE) 1178 return false; 1179 for (unsigned i = 0; i != 16; i += 2) 1180 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1181 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1182 return false; 1183 } else if (ShuffleKind == 1) { 1184 unsigned j = IsLE ? 0 : 2; 1185 for (unsigned i = 0; i != 8; i += 2) 1186 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1187 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1188 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1189 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1190 return false; 1191 } 1192 return true; 1193 } 1194 1195 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1196 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1197 /// current subtarget. 1198 /// 1199 /// The ShuffleKind distinguishes between big-endian operations with 1200 /// two different inputs (0), either-endian operations with two identical 1201 /// inputs (1), and little-endian operations with two different inputs (2). 1202 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1203 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1204 SelectionDAG &DAG) { 1205 const PPCSubtarget& Subtarget = 1206 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1207 if (!Subtarget.hasP8Vector()) 1208 return false; 1209 1210 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1211 if (ShuffleKind == 0) { 1212 if (IsLE) 1213 return false; 1214 for (unsigned i = 0; i != 16; i += 4) 1215 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1216 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1217 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1218 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1219 return false; 1220 } else if (ShuffleKind == 2) { 1221 if (!IsLE) 1222 return false; 1223 for (unsigned i = 0; i != 16; i += 4) 1224 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1225 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1226 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1227 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1228 return false; 1229 } else if (ShuffleKind == 1) { 1230 unsigned j = IsLE ? 0 : 4; 1231 for (unsigned i = 0; i != 8; i += 4) 1232 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1233 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1234 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1235 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1236 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1237 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1238 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1239 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1240 return false; 1241 } 1242 return true; 1243 } 1244 1245 /// isVMerge - Common function, used to match vmrg* shuffles. 1246 /// 1247 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1248 unsigned LHSStart, unsigned RHSStart) { 1249 if (N->getValueType(0) != MVT::v16i8) 1250 return false; 1251 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1252 "Unsupported merge size!"); 1253 1254 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1255 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1256 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1257 LHSStart+j+i*UnitSize) || 1258 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1259 RHSStart+j+i*UnitSize)) 1260 return false; 1261 } 1262 return true; 1263 } 1264 1265 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1266 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1267 /// The ShuffleKind distinguishes between big-endian merges with two 1268 /// different inputs (0), either-endian merges with two identical inputs (1), 1269 /// and little-endian merges with two different inputs (2). For the latter, 1270 /// the input operands are swapped (see PPCInstrAltivec.td). 1271 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1272 unsigned ShuffleKind, SelectionDAG &DAG) { 1273 if (DAG.getDataLayout().isLittleEndian()) { 1274 if (ShuffleKind == 1) // unary 1275 return isVMerge(N, UnitSize, 0, 0); 1276 else if (ShuffleKind == 2) // swapped 1277 return isVMerge(N, UnitSize, 0, 16); 1278 else 1279 return false; 1280 } else { 1281 if (ShuffleKind == 1) // unary 1282 return isVMerge(N, UnitSize, 8, 8); 1283 else if (ShuffleKind == 0) // normal 1284 return isVMerge(N, UnitSize, 8, 24); 1285 else 1286 return false; 1287 } 1288 } 1289 1290 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1291 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1292 /// The ShuffleKind distinguishes between big-endian merges with two 1293 /// different inputs (0), either-endian merges with two identical inputs (1), 1294 /// and little-endian merges with two different inputs (2). For the latter, 1295 /// the input operands are swapped (see PPCInstrAltivec.td). 1296 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1297 unsigned ShuffleKind, SelectionDAG &DAG) { 1298 if (DAG.getDataLayout().isLittleEndian()) { 1299 if (ShuffleKind == 1) // unary 1300 return isVMerge(N, UnitSize, 8, 8); 1301 else if (ShuffleKind == 2) // swapped 1302 return isVMerge(N, UnitSize, 8, 24); 1303 else 1304 return false; 1305 } else { 1306 if (ShuffleKind == 1) // unary 1307 return isVMerge(N, UnitSize, 0, 0); 1308 else if (ShuffleKind == 0) // normal 1309 return isVMerge(N, UnitSize, 0, 16); 1310 else 1311 return false; 1312 } 1313 } 1314 1315 /** 1316 * \brief Common function used to match vmrgew and vmrgow shuffles 1317 * 1318 * The indexOffset determines whether to look for even or odd words in 1319 * the shuffle mask. This is based on the of the endianness of the target 1320 * machine. 1321 * - Little Endian: 1322 * - Use offset of 0 to check for odd elements 1323 * - Use offset of 4 to check for even elements 1324 * - Big Endian: 1325 * - Use offset of 0 to check for even elements 1326 * - Use offset of 4 to check for odd elements 1327 * A detailed description of the vector element ordering for little endian and 1328 * big endian can be found at 1329 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1330 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1331 * compiler differences mean to you 1332 * 1333 * The mask to the shuffle vector instruction specifies the indices of the 1334 * elements from the two input vectors to place in the result. The elements are 1335 * numbered in array-access order, starting with the first vector. These vectors 1336 * are always of type v16i8, thus each vector will contain 16 elements of size 1337 * 8. More info on the shuffle vector can be found in the 1338 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1339 * Language Reference. 1340 * 1341 * The RHSStartValue indicates whether the same input vectors are used (unary) 1342 * or two different input vectors are used, based on the following: 1343 * - If the instruction uses the same vector for both inputs, the range of the 1344 * indices will be 0 to 15. In this case, the RHSStart value passed should 1345 * be 0. 1346 * - If the instruction has two different vectors then the range of the 1347 * indices will be 0 to 31. In this case, the RHSStart value passed should 1348 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1349 * to 31 specify elements in the second vector). 1350 * 1351 * \param[in] N The shuffle vector SD Node to analyze 1352 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1353 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1354 * vector to the shuffle_vector instruction 1355 * \return true iff this shuffle vector represents an even or odd word merge 1356 */ 1357 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1358 unsigned RHSStartValue) { 1359 if (N->getValueType(0) != MVT::v16i8) 1360 return false; 1361 1362 for (unsigned i = 0; i < 2; ++i) 1363 for (unsigned j = 0; j < 4; ++j) 1364 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1365 i*RHSStartValue+j+IndexOffset) || 1366 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1367 i*RHSStartValue+j+IndexOffset+8)) 1368 return false; 1369 return true; 1370 } 1371 1372 /** 1373 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1374 * vmrgow instructions. 1375 * 1376 * \param[in] N The shuffle vector SD Node to analyze 1377 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1378 * \param[in] ShuffleKind Identify the type of merge: 1379 * - 0 = big-endian merge with two different inputs; 1380 * - 1 = either-endian merge with two identical inputs; 1381 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1382 * little-endian merges). 1383 * \param[in] DAG The current SelectionDAG 1384 * \return true iff this shuffle mask 1385 */ 1386 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1387 unsigned ShuffleKind, SelectionDAG &DAG) { 1388 if (DAG.getDataLayout().isLittleEndian()) { 1389 unsigned indexOffset = CheckEven ? 4 : 0; 1390 if (ShuffleKind == 1) // Unary 1391 return isVMerge(N, indexOffset, 0); 1392 else if (ShuffleKind == 2) // swapped 1393 return isVMerge(N, indexOffset, 16); 1394 else 1395 return false; 1396 } 1397 else { 1398 unsigned indexOffset = CheckEven ? 0 : 4; 1399 if (ShuffleKind == 1) // Unary 1400 return isVMerge(N, indexOffset, 0); 1401 else if (ShuffleKind == 0) // Normal 1402 return isVMerge(N, indexOffset, 16); 1403 else 1404 return false; 1405 } 1406 return false; 1407 } 1408 1409 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1410 /// amount, otherwise return -1. 1411 /// The ShuffleKind distinguishes between big-endian operations with two 1412 /// different inputs (0), either-endian operations with two identical inputs 1413 /// (1), and little-endian operations with two different inputs (2). For the 1414 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1415 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1416 SelectionDAG &DAG) { 1417 if (N->getValueType(0) != MVT::v16i8) 1418 return -1; 1419 1420 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1421 1422 // Find the first non-undef value in the shuffle mask. 1423 unsigned i; 1424 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1425 /*search*/; 1426 1427 if (i == 16) return -1; // all undef. 1428 1429 // Otherwise, check to see if the rest of the elements are consecutively 1430 // numbered from this value. 1431 unsigned ShiftAmt = SVOp->getMaskElt(i); 1432 if (ShiftAmt < i) return -1; 1433 1434 ShiftAmt -= i; 1435 bool isLE = DAG.getDataLayout().isLittleEndian(); 1436 1437 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1438 // Check the rest of the elements to see if they are consecutive. 1439 for (++i; i != 16; ++i) 1440 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1441 return -1; 1442 } else if (ShuffleKind == 1) { 1443 // Check the rest of the elements to see if they are consecutive. 1444 for (++i; i != 16; ++i) 1445 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1446 return -1; 1447 } else 1448 return -1; 1449 1450 if (isLE) 1451 ShiftAmt = 16 - ShiftAmt; 1452 1453 return ShiftAmt; 1454 } 1455 1456 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1457 /// specifies a splat of a single element that is suitable for input to 1458 /// VSPLTB/VSPLTH/VSPLTW. 1459 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1460 assert(N->getValueType(0) == MVT::v16i8 && 1461 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1462 1463 // The consecutive indices need to specify an element, not part of two 1464 // different elements. So abandon ship early if this isn't the case. 1465 if (N->getMaskElt(0) % EltSize != 0) 1466 return false; 1467 1468 // This is a splat operation if each element of the permute is the same, and 1469 // if the value doesn't reference the second vector. 1470 unsigned ElementBase = N->getMaskElt(0); 1471 1472 // FIXME: Handle UNDEF elements too! 1473 if (ElementBase >= 16) 1474 return false; 1475 1476 // Check that the indices are consecutive, in the case of a multi-byte element 1477 // splatted with a v16i8 mask. 1478 for (unsigned i = 1; i != EltSize; ++i) 1479 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1480 return false; 1481 1482 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1483 if (N->getMaskElt(i) < 0) continue; 1484 for (unsigned j = 0; j != EltSize; ++j) 1485 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1486 return false; 1487 } 1488 return true; 1489 } 1490 1491 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1492 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1493 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1494 SelectionDAG &DAG) { 1495 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1496 assert(isSplatShuffleMask(SVOp, EltSize)); 1497 if (DAG.getDataLayout().isLittleEndian()) 1498 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1499 else 1500 return SVOp->getMaskElt(0) / EltSize; 1501 } 1502 1503 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1504 /// by using a vspltis[bhw] instruction of the specified element size, return 1505 /// the constant being splatted. The ByteSize field indicates the number of 1506 /// bytes of each element [124] -> [bhw]. 1507 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1508 SDValue OpVal(nullptr, 0); 1509 1510 // If ByteSize of the splat is bigger than the element size of the 1511 // build_vector, then we have a case where we are checking for a splat where 1512 // multiple elements of the buildvector are folded together into a single 1513 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1514 unsigned EltSize = 16/N->getNumOperands(); 1515 if (EltSize < ByteSize) { 1516 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1517 SDValue UniquedVals[4]; 1518 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1519 1520 // See if all of the elements in the buildvector agree across. 1521 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1522 if (N->getOperand(i).isUndef()) continue; 1523 // If the element isn't a constant, bail fully out. 1524 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1525 1526 1527 if (!UniquedVals[i&(Multiple-1)].getNode()) 1528 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1529 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1530 return SDValue(); // no match. 1531 } 1532 1533 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1534 // either constant or undef values that are identical for each chunk. See 1535 // if these chunks can form into a larger vspltis*. 1536 1537 // Check to see if all of the leading entries are either 0 or -1. If 1538 // neither, then this won't fit into the immediate field. 1539 bool LeadingZero = true; 1540 bool LeadingOnes = true; 1541 for (unsigned i = 0; i != Multiple-1; ++i) { 1542 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1543 1544 LeadingZero &= isNullConstant(UniquedVals[i]); 1545 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1546 } 1547 // Finally, check the least significant entry. 1548 if (LeadingZero) { 1549 if (!UniquedVals[Multiple-1].getNode()) 1550 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1551 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1552 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1553 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1554 } 1555 if (LeadingOnes) { 1556 if (!UniquedVals[Multiple-1].getNode()) 1557 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1558 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1559 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1560 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1561 } 1562 1563 return SDValue(); 1564 } 1565 1566 // Check to see if this buildvec has a single non-undef value in its elements. 1567 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1568 if (N->getOperand(i).isUndef()) continue; 1569 if (!OpVal.getNode()) 1570 OpVal = N->getOperand(i); 1571 else if (OpVal != N->getOperand(i)) 1572 return SDValue(); 1573 } 1574 1575 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1576 1577 unsigned ValSizeInBytes = EltSize; 1578 uint64_t Value = 0; 1579 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1580 Value = CN->getZExtValue(); 1581 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1582 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1583 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1584 } 1585 1586 // If the splat value is larger than the element value, then we can never do 1587 // this splat. The only case that we could fit the replicated bits into our 1588 // immediate field for would be zero, and we prefer to use vxor for it. 1589 if (ValSizeInBytes < ByteSize) return SDValue(); 1590 1591 // If the element value is larger than the splat value, check if it consists 1592 // of a repeated bit pattern of size ByteSize. 1593 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1594 return SDValue(); 1595 1596 // Properly sign extend the value. 1597 int MaskVal = SignExtend32(Value, ByteSize * 8); 1598 1599 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1600 if (MaskVal == 0) return SDValue(); 1601 1602 // Finally, if this value fits in a 5 bit sext field, return it 1603 if (SignExtend32<5>(MaskVal) == MaskVal) 1604 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1605 return SDValue(); 1606 } 1607 1608 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1609 /// amount, otherwise return -1. 1610 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1611 EVT VT = N->getValueType(0); 1612 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1613 return -1; 1614 1615 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1616 1617 // Find the first non-undef value in the shuffle mask. 1618 unsigned i; 1619 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1620 /*search*/; 1621 1622 if (i == 4) return -1; // all undef. 1623 1624 // Otherwise, check to see if the rest of the elements are consecutively 1625 // numbered from this value. 1626 unsigned ShiftAmt = SVOp->getMaskElt(i); 1627 if (ShiftAmt < i) return -1; 1628 ShiftAmt -= i; 1629 1630 // Check the rest of the elements to see if they are consecutive. 1631 for (++i; i != 4; ++i) 1632 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1633 return -1; 1634 1635 return ShiftAmt; 1636 } 1637 1638 //===----------------------------------------------------------------------===// 1639 // Addressing Mode Selection 1640 //===----------------------------------------------------------------------===// 1641 1642 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1643 /// or 64-bit immediate, and if the value can be accurately represented as a 1644 /// sign extension from a 16-bit value. If so, this returns true and the 1645 /// immediate. 1646 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1647 if (!isa<ConstantSDNode>(N)) 1648 return false; 1649 1650 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1651 if (N->getValueType(0) == MVT::i32) 1652 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1653 else 1654 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1655 } 1656 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1657 return isIntS16Immediate(Op.getNode(), Imm); 1658 } 1659 1660 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1661 /// can be represented as an indexed [r+r] operation. Returns false if it 1662 /// can be more efficiently represented with [r+imm]. 1663 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1664 SDValue &Index, 1665 SelectionDAG &DAG) const { 1666 short imm = 0; 1667 if (N.getOpcode() == ISD::ADD) { 1668 if (isIntS16Immediate(N.getOperand(1), imm)) 1669 return false; // r+i 1670 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1671 return false; // r+i 1672 1673 Base = N.getOperand(0); 1674 Index = N.getOperand(1); 1675 return true; 1676 } else if (N.getOpcode() == ISD::OR) { 1677 if (isIntS16Immediate(N.getOperand(1), imm)) 1678 return false; // r+i can fold it if we can. 1679 1680 // If this is an or of disjoint bitfields, we can codegen this as an add 1681 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1682 // disjoint. 1683 APInt LHSKnownZero, LHSKnownOne; 1684 APInt RHSKnownZero, RHSKnownOne; 1685 DAG.computeKnownBits(N.getOperand(0), 1686 LHSKnownZero, LHSKnownOne); 1687 1688 if (LHSKnownZero.getBoolValue()) { 1689 DAG.computeKnownBits(N.getOperand(1), 1690 RHSKnownZero, RHSKnownOne); 1691 // If all of the bits are known zero on the LHS or RHS, the add won't 1692 // carry. 1693 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1694 Base = N.getOperand(0); 1695 Index = N.getOperand(1); 1696 return true; 1697 } 1698 } 1699 } 1700 1701 return false; 1702 } 1703 1704 // If we happen to be doing an i64 load or store into a stack slot that has 1705 // less than a 4-byte alignment, then the frame-index elimination may need to 1706 // use an indexed load or store instruction (because the offset may not be a 1707 // multiple of 4). The extra register needed to hold the offset comes from the 1708 // register scavenger, and it is possible that the scavenger will need to use 1709 // an emergency spill slot. As a result, we need to make sure that a spill slot 1710 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1711 // stack slot. 1712 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1713 // FIXME: This does not handle the LWA case. 1714 if (VT != MVT::i64) 1715 return; 1716 1717 // NOTE: We'll exclude negative FIs here, which come from argument 1718 // lowering, because there are no known test cases triggering this problem 1719 // using packed structures (or similar). We can remove this exclusion if 1720 // we find such a test case. The reason why this is so test-case driven is 1721 // because this entire 'fixup' is only to prevent crashes (from the 1722 // register scavenger) on not-really-valid inputs. For example, if we have: 1723 // %a = alloca i1 1724 // %b = bitcast i1* %a to i64* 1725 // store i64* a, i64 b 1726 // then the store should really be marked as 'align 1', but is not. If it 1727 // were marked as 'align 1' then the indexed form would have been 1728 // instruction-selected initially, and the problem this 'fixup' is preventing 1729 // won't happen regardless. 1730 if (FrameIdx < 0) 1731 return; 1732 1733 MachineFunction &MF = DAG.getMachineFunction(); 1734 MachineFrameInfo *MFI = MF.getFrameInfo(); 1735 1736 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1737 if (Align >= 4) 1738 return; 1739 1740 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1741 FuncInfo->setHasNonRISpills(); 1742 } 1743 1744 /// Returns true if the address N can be represented by a base register plus 1745 /// a signed 16-bit displacement [r+imm], and if it is not better 1746 /// represented as reg+reg. If Aligned is true, only accept displacements 1747 /// suitable for STD and friends, i.e. multiples of 4. 1748 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1749 SDValue &Base, 1750 SelectionDAG &DAG, 1751 bool Aligned) const { 1752 // FIXME dl should come from parent load or store, not from address 1753 SDLoc dl(N); 1754 // If this can be more profitably realized as r+r, fail. 1755 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1756 return false; 1757 1758 if (N.getOpcode() == ISD::ADD) { 1759 short imm = 0; 1760 if (isIntS16Immediate(N.getOperand(1), imm) && 1761 (!Aligned || (imm & 3) == 0)) { 1762 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1763 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1764 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1765 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1766 } else { 1767 Base = N.getOperand(0); 1768 } 1769 return true; // [r+i] 1770 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1771 // Match LOAD (ADD (X, Lo(G))). 1772 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1773 && "Cannot handle constant offsets yet!"); 1774 Disp = N.getOperand(1).getOperand(0); // The global address. 1775 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1776 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1777 Disp.getOpcode() == ISD::TargetConstantPool || 1778 Disp.getOpcode() == ISD::TargetJumpTable); 1779 Base = N.getOperand(0); 1780 return true; // [&g+r] 1781 } 1782 } else if (N.getOpcode() == ISD::OR) { 1783 short imm = 0; 1784 if (isIntS16Immediate(N.getOperand(1), imm) && 1785 (!Aligned || (imm & 3) == 0)) { 1786 // If this is an or of disjoint bitfields, we can codegen this as an add 1787 // (for better address arithmetic) if the LHS and RHS of the OR are 1788 // provably disjoint. 1789 APInt LHSKnownZero, LHSKnownOne; 1790 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1791 1792 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1793 // If all of the bits are known zero on the LHS or RHS, the add won't 1794 // carry. 1795 if (FrameIndexSDNode *FI = 1796 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1797 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1798 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1799 } else { 1800 Base = N.getOperand(0); 1801 } 1802 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1803 return true; 1804 } 1805 } 1806 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1807 // Loading from a constant address. 1808 1809 // If this address fits entirely in a 16-bit sext immediate field, codegen 1810 // this as "d, 0" 1811 short Imm; 1812 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1813 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1814 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1815 CN->getValueType(0)); 1816 return true; 1817 } 1818 1819 // Handle 32-bit sext immediates with LIS + addr mode. 1820 if ((CN->getValueType(0) == MVT::i32 || 1821 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1822 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1823 int Addr = (int)CN->getZExtValue(); 1824 1825 // Otherwise, break this down into an LIS + disp. 1826 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1827 1828 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1829 MVT::i32); 1830 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1831 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1832 return true; 1833 } 1834 } 1835 1836 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 1837 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1838 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1839 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1840 } else 1841 Base = N; 1842 return true; // [r+0] 1843 } 1844 1845 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1846 /// represented as an indexed [r+r] operation. 1847 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1848 SDValue &Index, 1849 SelectionDAG &DAG) const { 1850 // Check to see if we can easily represent this as an [r+r] address. This 1851 // will fail if it thinks that the address is more profitably represented as 1852 // reg+imm, e.g. where imm = 0. 1853 if (SelectAddressRegReg(N, Base, Index, DAG)) 1854 return true; 1855 1856 // If the operand is an addition, always emit this as [r+r], since this is 1857 // better (for code size, and execution, as the memop does the add for free) 1858 // than emitting an explicit add. 1859 if (N.getOpcode() == ISD::ADD) { 1860 Base = N.getOperand(0); 1861 Index = N.getOperand(1); 1862 return true; 1863 } 1864 1865 // Otherwise, do it the hard way, using R0 as the base register. 1866 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1867 N.getValueType()); 1868 Index = N; 1869 return true; 1870 } 1871 1872 /// getPreIndexedAddressParts - returns true by value, base pointer and 1873 /// offset pointer and addressing mode by reference if the node's address 1874 /// can be legally represented as pre-indexed load / store address. 1875 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1876 SDValue &Offset, 1877 ISD::MemIndexedMode &AM, 1878 SelectionDAG &DAG) const { 1879 if (DisablePPCPreinc) return false; 1880 1881 bool isLoad = true; 1882 SDValue Ptr; 1883 EVT VT; 1884 unsigned Alignment; 1885 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1886 Ptr = LD->getBasePtr(); 1887 VT = LD->getMemoryVT(); 1888 Alignment = LD->getAlignment(); 1889 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1890 Ptr = ST->getBasePtr(); 1891 VT = ST->getMemoryVT(); 1892 Alignment = ST->getAlignment(); 1893 isLoad = false; 1894 } else 1895 return false; 1896 1897 // PowerPC doesn't have preinc load/store instructions for vectors (except 1898 // for QPX, which does have preinc r+r forms). 1899 if (VT.isVector()) { 1900 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 1901 return false; 1902 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 1903 AM = ISD::PRE_INC; 1904 return true; 1905 } 1906 } 1907 1908 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1909 1910 // Common code will reject creating a pre-inc form if the base pointer 1911 // is a frame index, or if N is a store and the base pointer is either 1912 // the same as or a predecessor of the value being stored. Check for 1913 // those situations here, and try with swapped Base/Offset instead. 1914 bool Swap = false; 1915 1916 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1917 Swap = true; 1918 else if (!isLoad) { 1919 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1920 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1921 Swap = true; 1922 } 1923 1924 if (Swap) 1925 std::swap(Base, Offset); 1926 1927 AM = ISD::PRE_INC; 1928 return true; 1929 } 1930 1931 // LDU/STU can only handle immediates that are a multiple of 4. 1932 if (VT != MVT::i64) { 1933 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1934 return false; 1935 } else { 1936 // LDU/STU need an address with at least 4-byte alignment. 1937 if (Alignment < 4) 1938 return false; 1939 1940 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1941 return false; 1942 } 1943 1944 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1945 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1946 // sext i32 to i64 when addr mode is r+i. 1947 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1948 LD->getExtensionType() == ISD::SEXTLOAD && 1949 isa<ConstantSDNode>(Offset)) 1950 return false; 1951 } 1952 1953 AM = ISD::PRE_INC; 1954 return true; 1955 } 1956 1957 //===----------------------------------------------------------------------===// 1958 // LowerOperation implementation 1959 //===----------------------------------------------------------------------===// 1960 1961 /// GetLabelAccessInfo - Return true if we should reference labels using a 1962 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1963 static bool GetLabelAccessInfo(const TargetMachine &TM, 1964 const PPCSubtarget &Subtarget, 1965 unsigned &HiOpFlags, unsigned &LoOpFlags, 1966 const GlobalValue *GV = nullptr) { 1967 HiOpFlags = PPCII::MO_HA; 1968 LoOpFlags = PPCII::MO_LO; 1969 1970 // Don't use the pic base if not in PIC relocation model. 1971 bool isPIC = TM.getRelocationModel() == Reloc::PIC_; 1972 1973 if (isPIC) { 1974 HiOpFlags |= PPCII::MO_PIC_FLAG; 1975 LoOpFlags |= PPCII::MO_PIC_FLAG; 1976 } 1977 1978 // If this is a reference to a global value that requires a non-lazy-ptr, make 1979 // sure that instruction lowering adds it. 1980 if (GV && Subtarget.hasLazyResolverStub(GV)) { 1981 HiOpFlags |= PPCII::MO_NLP_FLAG; 1982 LoOpFlags |= PPCII::MO_NLP_FLAG; 1983 1984 if (GV->hasHiddenVisibility()) { 1985 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1986 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1987 } 1988 } 1989 1990 return isPIC; 1991 } 1992 1993 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1994 SelectionDAG &DAG) { 1995 SDLoc DL(HiPart); 1996 EVT PtrVT = HiPart.getValueType(); 1997 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 1998 1999 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2000 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2001 2002 // With PIC, the first instruction is actually "GR+hi(&G)". 2003 if (isPIC) 2004 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2005 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2006 2007 // Generate non-pic code that has direct accesses to the constant pool. 2008 // The address of the global is just (hi(&g)+lo(&g)). 2009 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2010 } 2011 2012 static void setUsesTOCBasePtr(MachineFunction &MF) { 2013 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2014 FuncInfo->setUsesTOCBasePtr(); 2015 } 2016 2017 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2018 setUsesTOCBasePtr(DAG.getMachineFunction()); 2019 } 2020 2021 static SDValue getTOCEntry(SelectionDAG &DAG, SDLoc dl, bool Is64Bit, 2022 SDValue GA) { 2023 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2024 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2025 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2026 2027 SDValue Ops[] = { GA, Reg }; 2028 return DAG.getMemIntrinsicNode( 2029 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2030 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2031 false, 0); 2032 } 2033 2034 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2035 SelectionDAG &DAG) const { 2036 EVT PtrVT = Op.getValueType(); 2037 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2038 const Constant *C = CP->getConstVal(); 2039 2040 // 64-bit SVR4 ABI code is always position-independent. 2041 // The actual address of the GlobalValue is stored in the TOC. 2042 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2043 setUsesTOCBasePtr(DAG); 2044 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2045 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2046 } 2047 2048 unsigned MOHiFlag, MOLoFlag; 2049 bool isPIC = 2050 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2051 2052 if (isPIC && Subtarget.isSVR4ABI()) { 2053 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2054 PPCII::MO_PIC_FLAG); 2055 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2056 } 2057 2058 SDValue CPIHi = 2059 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2060 SDValue CPILo = 2061 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2062 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 2063 } 2064 2065 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2066 EVT PtrVT = Op.getValueType(); 2067 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2068 2069 // 64-bit SVR4 ABI code is always position-independent. 2070 // The actual address of the GlobalValue is stored in the TOC. 2071 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2072 setUsesTOCBasePtr(DAG); 2073 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2074 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2075 } 2076 2077 unsigned MOHiFlag, MOLoFlag; 2078 bool isPIC = 2079 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2080 2081 if (isPIC && Subtarget.isSVR4ABI()) { 2082 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2083 PPCII::MO_PIC_FLAG); 2084 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2085 } 2086 2087 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2088 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2089 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 2090 } 2091 2092 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2093 SelectionDAG &DAG) const { 2094 EVT PtrVT = Op.getValueType(); 2095 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2096 const BlockAddress *BA = BASDN->getBlockAddress(); 2097 2098 // 64-bit SVR4 ABI code is always position-independent. 2099 // The actual BlockAddress is stored in the TOC. 2100 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2101 setUsesTOCBasePtr(DAG); 2102 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2103 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2104 } 2105 2106 unsigned MOHiFlag, MOLoFlag; 2107 bool isPIC = 2108 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2109 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2110 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2111 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 2112 } 2113 2114 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2115 SelectionDAG &DAG) const { 2116 2117 // FIXME: TLS addresses currently use medium model code sequences, 2118 // which is the most useful form. Eventually support for small and 2119 // large models could be added if users need it, at the cost of 2120 // additional complexity. 2121 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2122 if (DAG.getTarget().Options.EmulatedTLS) 2123 return LowerToTLSEmulatedModel(GA, DAG); 2124 2125 SDLoc dl(GA); 2126 const GlobalValue *GV = GA->getGlobal(); 2127 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2128 bool is64bit = Subtarget.isPPC64(); 2129 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2130 PICLevel::Level picLevel = M->getPICLevel(); 2131 2132 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2133 2134 if (Model == TLSModel::LocalExec) { 2135 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2136 PPCII::MO_TPREL_HA); 2137 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2138 PPCII::MO_TPREL_LO); 2139 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2140 is64bit ? MVT::i64 : MVT::i32); 2141 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2142 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2143 } 2144 2145 if (Model == TLSModel::InitialExec) { 2146 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2147 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2148 PPCII::MO_TLS); 2149 SDValue GOTPtr; 2150 if (is64bit) { 2151 setUsesTOCBasePtr(DAG); 2152 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2153 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2154 PtrVT, GOTReg, TGA); 2155 } else 2156 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2157 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2158 PtrVT, TGA, GOTPtr); 2159 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2160 } 2161 2162 if (Model == TLSModel::GeneralDynamic) { 2163 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2164 SDValue GOTPtr; 2165 if (is64bit) { 2166 setUsesTOCBasePtr(DAG); 2167 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2168 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2169 GOTReg, TGA); 2170 } else { 2171 if (picLevel == PICLevel::Small) 2172 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2173 else 2174 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2175 } 2176 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2177 GOTPtr, TGA, TGA); 2178 } 2179 2180 if (Model == TLSModel::LocalDynamic) { 2181 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2182 SDValue GOTPtr; 2183 if (is64bit) { 2184 setUsesTOCBasePtr(DAG); 2185 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2186 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2187 GOTReg, TGA); 2188 } else { 2189 if (picLevel == PICLevel::Small) 2190 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2191 else 2192 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2193 } 2194 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2195 PtrVT, GOTPtr, TGA, TGA); 2196 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2197 PtrVT, TLSAddr, TGA); 2198 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2199 } 2200 2201 llvm_unreachable("Unknown TLS model!"); 2202 } 2203 2204 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2205 SelectionDAG &DAG) const { 2206 EVT PtrVT = Op.getValueType(); 2207 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2208 SDLoc DL(GSDN); 2209 const GlobalValue *GV = GSDN->getGlobal(); 2210 2211 // 64-bit SVR4 ABI code is always position-independent. 2212 // The actual address of the GlobalValue is stored in the TOC. 2213 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2214 setUsesTOCBasePtr(DAG); 2215 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2216 return getTOCEntry(DAG, DL, true, GA); 2217 } 2218 2219 unsigned MOHiFlag, MOLoFlag; 2220 bool isPIC = 2221 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag, GV); 2222 2223 if (isPIC && Subtarget.isSVR4ABI()) { 2224 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2225 GSDN->getOffset(), 2226 PPCII::MO_PIC_FLAG); 2227 return getTOCEntry(DAG, DL, false, GA); 2228 } 2229 2230 SDValue GAHi = 2231 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2232 SDValue GALo = 2233 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2234 2235 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 2236 2237 // If the global reference is actually to a non-lazy-pointer, we have to do an 2238 // extra load to get the address of the global. 2239 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2240 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 2241 false, false, false, 0); 2242 return Ptr; 2243 } 2244 2245 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2246 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2247 SDLoc dl(Op); 2248 2249 if (Op.getValueType() == MVT::v2i64) { 2250 // When the operands themselves are v2i64 values, we need to do something 2251 // special because VSX has no underlying comparison operations for these. 2252 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2253 // Equality can be handled by casting to the legal type for Altivec 2254 // comparisons, everything else needs to be expanded. 2255 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2256 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2257 DAG.getSetCC(dl, MVT::v4i32, 2258 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2259 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2260 CC)); 2261 } 2262 2263 return SDValue(); 2264 } 2265 2266 // We handle most of these in the usual way. 2267 return Op; 2268 } 2269 2270 // If we're comparing for equality to zero, expose the fact that this is 2271 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 2272 // fold the new nodes. 2273 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2274 if (C->isNullValue() && CC == ISD::SETEQ) { 2275 EVT VT = Op.getOperand(0).getValueType(); 2276 SDValue Zext = Op.getOperand(0); 2277 if (VT.bitsLT(MVT::i32)) { 2278 VT = MVT::i32; 2279 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 2280 } 2281 unsigned Log2b = Log2_32(VT.getSizeInBits()); 2282 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 2283 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 2284 DAG.getConstant(Log2b, dl, MVT::i32)); 2285 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 2286 } 2287 // Leave comparisons against 0 and -1 alone for now, since they're usually 2288 // optimized. FIXME: revisit this when we can custom lower all setcc 2289 // optimizations. 2290 if (C->isAllOnesValue() || C->isNullValue()) 2291 return SDValue(); 2292 } 2293 2294 // If we have an integer seteq/setne, turn it into a compare against zero 2295 // by xor'ing the rhs with the lhs, which is faster than setting a 2296 // condition register, reading it back out, and masking the correct bit. The 2297 // normal approach here uses sub to do this instead of xor. Using xor exposes 2298 // the result to other bit-twiddling opportunities. 2299 EVT LHSVT = Op.getOperand(0).getValueType(); 2300 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2301 EVT VT = Op.getValueType(); 2302 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2303 Op.getOperand(1)); 2304 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2305 } 2306 return SDValue(); 2307 } 2308 2309 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 2310 const PPCSubtarget &Subtarget) const { 2311 SDNode *Node = Op.getNode(); 2312 EVT VT = Node->getValueType(0); 2313 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2314 SDValue InChain = Node->getOperand(0); 2315 SDValue VAListPtr = Node->getOperand(1); 2316 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2317 SDLoc dl(Node); 2318 2319 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2320 2321 // gpr_index 2322 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2323 VAListPtr, MachinePointerInfo(SV), MVT::i8, 2324 false, false, false, 0); 2325 InChain = GprIndex.getValue(1); 2326 2327 if (VT == MVT::i64) { 2328 // Check if GprIndex is even 2329 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2330 DAG.getConstant(1, dl, MVT::i32)); 2331 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2332 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2333 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2334 DAG.getConstant(1, dl, MVT::i32)); 2335 // Align GprIndex to be even if it isn't 2336 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2337 GprIndex); 2338 } 2339 2340 // fpr index is 1 byte after gpr 2341 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2342 DAG.getConstant(1, dl, MVT::i32)); 2343 2344 // fpr 2345 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2346 FprPtr, MachinePointerInfo(SV), MVT::i8, 2347 false, false, false, 0); 2348 InChain = FprIndex.getValue(1); 2349 2350 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2351 DAG.getConstant(8, dl, MVT::i32)); 2352 2353 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2354 DAG.getConstant(4, dl, MVT::i32)); 2355 2356 // areas 2357 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 2358 MachinePointerInfo(), false, false, 2359 false, 0); 2360 InChain = OverflowArea.getValue(1); 2361 2362 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 2363 MachinePointerInfo(), false, false, 2364 false, 0); 2365 InChain = RegSaveArea.getValue(1); 2366 2367 // select overflow_area if index > 8 2368 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2369 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2370 2371 // adjustment constant gpr_index * 4/8 2372 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2373 VT.isInteger() ? GprIndex : FprIndex, 2374 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2375 MVT::i32)); 2376 2377 // OurReg = RegSaveArea + RegConstant 2378 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2379 RegConstant); 2380 2381 // Floating types are 32 bytes into RegSaveArea 2382 if (VT.isFloatingPoint()) 2383 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2384 DAG.getConstant(32, dl, MVT::i32)); 2385 2386 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2387 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2388 VT.isInteger() ? GprIndex : FprIndex, 2389 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2390 MVT::i32)); 2391 2392 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2393 VT.isInteger() ? VAListPtr : FprPtr, 2394 MachinePointerInfo(SV), 2395 MVT::i8, false, false, 0); 2396 2397 // determine if we should load from reg_save_area or overflow_area 2398 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2399 2400 // increase overflow_area by 4/8 if gpr/fpr > 8 2401 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2402 DAG.getConstant(VT.isInteger() ? 4 : 8, 2403 dl, MVT::i32)); 2404 2405 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2406 OverflowAreaPlusN); 2407 2408 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 2409 OverflowAreaPtr, 2410 MachinePointerInfo(), 2411 MVT::i32, false, false, 0); 2412 2413 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 2414 false, false, false, 0); 2415 } 2416 2417 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 2418 const PPCSubtarget &Subtarget) const { 2419 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2420 2421 // We have to copy the entire va_list struct: 2422 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2423 return DAG.getMemcpy(Op.getOperand(0), Op, 2424 Op.getOperand(1), Op.getOperand(2), 2425 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2426 false, MachinePointerInfo(), MachinePointerInfo()); 2427 } 2428 2429 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2430 SelectionDAG &DAG) const { 2431 return Op.getOperand(0); 2432 } 2433 2434 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2435 SelectionDAG &DAG) const { 2436 SDValue Chain = Op.getOperand(0); 2437 SDValue Trmp = Op.getOperand(1); // trampoline 2438 SDValue FPtr = Op.getOperand(2); // nested function 2439 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2440 SDLoc dl(Op); 2441 2442 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2443 bool isPPC64 = (PtrVT == MVT::i64); 2444 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2445 2446 TargetLowering::ArgListTy Args; 2447 TargetLowering::ArgListEntry Entry; 2448 2449 Entry.Ty = IntPtrTy; 2450 Entry.Node = Trmp; Args.push_back(Entry); 2451 2452 // TrampSize == (isPPC64 ? 48 : 40); 2453 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2454 isPPC64 ? MVT::i64 : MVT::i32); 2455 Args.push_back(Entry); 2456 2457 Entry.Node = FPtr; Args.push_back(Entry); 2458 Entry.Node = Nest; Args.push_back(Entry); 2459 2460 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2461 TargetLowering::CallLoweringInfo CLI(DAG); 2462 CLI.setDebugLoc(dl).setChain(Chain) 2463 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2464 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2465 std::move(Args), 0); 2466 2467 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2468 return CallResult.second; 2469 } 2470 2471 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 2472 const PPCSubtarget &Subtarget) const { 2473 MachineFunction &MF = DAG.getMachineFunction(); 2474 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2475 2476 SDLoc dl(Op); 2477 2478 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2479 // vastart just stores the address of the VarArgsFrameIndex slot into the 2480 // memory location argument. 2481 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2482 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2483 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2484 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2485 MachinePointerInfo(SV), 2486 false, false, 0); 2487 } 2488 2489 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2490 // We suppose the given va_list is already allocated. 2491 // 2492 // typedef struct { 2493 // char gpr; /* index into the array of 8 GPRs 2494 // * stored in the register save area 2495 // * gpr=0 corresponds to r3, 2496 // * gpr=1 to r4, etc. 2497 // */ 2498 // char fpr; /* index into the array of 8 FPRs 2499 // * stored in the register save area 2500 // * fpr=0 corresponds to f1, 2501 // * fpr=1 to f2, etc. 2502 // */ 2503 // char *overflow_arg_area; 2504 // /* location on stack that holds 2505 // * the next overflow argument 2506 // */ 2507 // char *reg_save_area; 2508 // /* where r3:r10 and f1:f8 (if saved) 2509 // * are stored 2510 // */ 2511 // } va_list[1]; 2512 2513 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2514 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2515 2516 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2517 2518 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2519 PtrVT); 2520 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2521 PtrVT); 2522 2523 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2524 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2525 2526 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2527 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2528 2529 uint64_t FPROffset = 1; 2530 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2531 2532 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2533 2534 // Store first byte : number of int regs 2535 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2536 Op.getOperand(1), 2537 MachinePointerInfo(SV), 2538 MVT::i8, false, false, 0); 2539 uint64_t nextOffset = FPROffset; 2540 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2541 ConstFPROffset); 2542 2543 // Store second byte : number of float regs 2544 SDValue secondStore = 2545 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2546 MachinePointerInfo(SV, nextOffset), MVT::i8, 2547 false, false, 0); 2548 nextOffset += StackOffset; 2549 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2550 2551 // Store second word : arguments given on stack 2552 SDValue thirdStore = 2553 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2554 MachinePointerInfo(SV, nextOffset), 2555 false, false, 0); 2556 nextOffset += FrameOffset; 2557 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2558 2559 // Store third word : arguments given in registers 2560 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2561 MachinePointerInfo(SV, nextOffset), 2562 false, false, 0); 2563 2564 } 2565 2566 #include "PPCGenCallingConv.inc" 2567 2568 // Function whose sole purpose is to kill compiler warnings 2569 // stemming from unused functions included from PPCGenCallingConv.inc. 2570 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2571 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2572 } 2573 2574 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2575 CCValAssign::LocInfo &LocInfo, 2576 ISD::ArgFlagsTy &ArgFlags, 2577 CCState &State) { 2578 return true; 2579 } 2580 2581 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2582 MVT &LocVT, 2583 CCValAssign::LocInfo &LocInfo, 2584 ISD::ArgFlagsTy &ArgFlags, 2585 CCState &State) { 2586 static const MCPhysReg ArgRegs[] = { 2587 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2588 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2589 }; 2590 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2591 2592 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2593 2594 // Skip one register if the first unallocated register has an even register 2595 // number and there are still argument registers available which have not been 2596 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2597 // need to skip a register if RegNum is odd. 2598 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2599 State.AllocateReg(ArgRegs[RegNum]); 2600 } 2601 2602 // Always return false here, as this function only makes sure that the first 2603 // unallocated register has an odd register number and does not actually 2604 // allocate a register for the current argument. 2605 return false; 2606 } 2607 2608 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2609 MVT &LocVT, 2610 CCValAssign::LocInfo &LocInfo, 2611 ISD::ArgFlagsTy &ArgFlags, 2612 CCState &State) { 2613 static const MCPhysReg ArgRegs[] = { 2614 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2615 PPC::F8 2616 }; 2617 2618 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2619 2620 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2621 2622 // If there is only one Floating-point register left we need to put both f64 2623 // values of a split ppc_fp128 value on the stack. 2624 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2625 State.AllocateReg(ArgRegs[RegNum]); 2626 } 2627 2628 // Always return false here, as this function only makes sure that the two f64 2629 // values a ppc_fp128 value is split into are both passed in registers or both 2630 // passed on the stack and does not actually allocate a register for the 2631 // current argument. 2632 return false; 2633 } 2634 2635 /// FPR - The set of FP registers that should be allocated for arguments, 2636 /// on Darwin. 2637 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2638 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2639 PPC::F11, PPC::F12, PPC::F13}; 2640 2641 /// QFPR - The set of QPX registers that should be allocated for arguments. 2642 static const MCPhysReg QFPR[] = { 2643 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2644 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2645 2646 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2647 /// the stack. 2648 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2649 unsigned PtrByteSize) { 2650 unsigned ArgSize = ArgVT.getStoreSize(); 2651 if (Flags.isByVal()) 2652 ArgSize = Flags.getByValSize(); 2653 2654 // Round up to multiples of the pointer size, except for array members, 2655 // which are always packed. 2656 if (!Flags.isInConsecutiveRegs()) 2657 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2658 2659 return ArgSize; 2660 } 2661 2662 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2663 /// on the stack. 2664 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2665 ISD::ArgFlagsTy Flags, 2666 unsigned PtrByteSize) { 2667 unsigned Align = PtrByteSize; 2668 2669 // Altivec parameters are padded to a 16 byte boundary. 2670 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2671 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2672 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2673 ArgVT == MVT::v1i128) 2674 Align = 16; 2675 // QPX vector types stored in double-precision are padded to a 32 byte 2676 // boundary. 2677 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2678 Align = 32; 2679 2680 // ByVal parameters are aligned as requested. 2681 if (Flags.isByVal()) { 2682 unsigned BVAlign = Flags.getByValAlign(); 2683 if (BVAlign > PtrByteSize) { 2684 if (BVAlign % PtrByteSize != 0) 2685 llvm_unreachable( 2686 "ByVal alignment is not a multiple of the pointer size"); 2687 2688 Align = BVAlign; 2689 } 2690 } 2691 2692 // Array members are always packed to their original alignment. 2693 if (Flags.isInConsecutiveRegs()) { 2694 // If the array member was split into multiple registers, the first 2695 // needs to be aligned to the size of the full type. (Except for 2696 // ppcf128, which is only aligned as its f64 components.) 2697 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2698 Align = OrigVT.getStoreSize(); 2699 else 2700 Align = ArgVT.getStoreSize(); 2701 } 2702 2703 return Align; 2704 } 2705 2706 /// CalculateStackSlotUsed - Return whether this argument will use its 2707 /// stack slot (instead of being passed in registers). ArgOffset, 2708 /// AvailableFPRs, and AvailableVRs must hold the current argument 2709 /// position, and will be updated to account for this argument. 2710 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2711 ISD::ArgFlagsTy Flags, 2712 unsigned PtrByteSize, 2713 unsigned LinkageSize, 2714 unsigned ParamAreaSize, 2715 unsigned &ArgOffset, 2716 unsigned &AvailableFPRs, 2717 unsigned &AvailableVRs, bool HasQPX) { 2718 bool UseMemory = false; 2719 2720 // Respect alignment of argument on the stack. 2721 unsigned Align = 2722 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2723 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2724 // If there's no space left in the argument save area, we must 2725 // use memory (this check also catches zero-sized arguments). 2726 if (ArgOffset >= LinkageSize + ParamAreaSize) 2727 UseMemory = true; 2728 2729 // Allocate argument on the stack. 2730 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2731 if (Flags.isInConsecutiveRegsLast()) 2732 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2733 // If we overran the argument save area, we must use memory 2734 // (this check catches arguments passed partially in memory) 2735 if (ArgOffset > LinkageSize + ParamAreaSize) 2736 UseMemory = true; 2737 2738 // However, if the argument is actually passed in an FPR or a VR, 2739 // we don't use memory after all. 2740 if (!Flags.isByVal()) { 2741 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2742 // QPX registers overlap with the scalar FP registers. 2743 (HasQPX && (ArgVT == MVT::v4f32 || 2744 ArgVT == MVT::v4f64 || 2745 ArgVT == MVT::v4i1))) 2746 if (AvailableFPRs > 0) { 2747 --AvailableFPRs; 2748 return false; 2749 } 2750 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2751 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2752 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2753 ArgVT == MVT::v1i128) 2754 if (AvailableVRs > 0) { 2755 --AvailableVRs; 2756 return false; 2757 } 2758 } 2759 2760 return UseMemory; 2761 } 2762 2763 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2764 /// ensure minimum alignment required for target. 2765 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2766 unsigned NumBytes) { 2767 unsigned TargetAlign = Lowering->getStackAlignment(); 2768 unsigned AlignMask = TargetAlign - 1; 2769 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2770 return NumBytes; 2771 } 2772 2773 SDValue 2774 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 2775 CallingConv::ID CallConv, bool isVarArg, 2776 const SmallVectorImpl<ISD::InputArg> 2777 &Ins, 2778 SDLoc dl, SelectionDAG &DAG, 2779 SmallVectorImpl<SDValue> &InVals) 2780 const { 2781 if (Subtarget.isSVR4ABI()) { 2782 if (Subtarget.isPPC64()) 2783 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2784 dl, DAG, InVals); 2785 else 2786 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2787 dl, DAG, InVals); 2788 } else { 2789 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2790 dl, DAG, InVals); 2791 } 2792 } 2793 2794 SDValue 2795 PPCTargetLowering::LowerFormalArguments_32SVR4( 2796 SDValue Chain, 2797 CallingConv::ID CallConv, bool isVarArg, 2798 const SmallVectorImpl<ISD::InputArg> 2799 &Ins, 2800 SDLoc dl, SelectionDAG &DAG, 2801 SmallVectorImpl<SDValue> &InVals) const { 2802 2803 // 32-bit SVR4 ABI Stack Frame Layout: 2804 // +-----------------------------------+ 2805 // +--> | Back chain | 2806 // | +-----------------------------------+ 2807 // | | Floating-point register save area | 2808 // | +-----------------------------------+ 2809 // | | General register save area | 2810 // | +-----------------------------------+ 2811 // | | CR save word | 2812 // | +-----------------------------------+ 2813 // | | VRSAVE save word | 2814 // | +-----------------------------------+ 2815 // | | Alignment padding | 2816 // | +-----------------------------------+ 2817 // | | Vector register save area | 2818 // | +-----------------------------------+ 2819 // | | Local variable space | 2820 // | +-----------------------------------+ 2821 // | | Parameter list area | 2822 // | +-----------------------------------+ 2823 // | | LR save word | 2824 // | +-----------------------------------+ 2825 // SP--> +--- | Back chain | 2826 // +-----------------------------------+ 2827 // 2828 // Specifications: 2829 // System V Application Binary Interface PowerPC Processor Supplement 2830 // AltiVec Technology Programming Interface Manual 2831 2832 MachineFunction &MF = DAG.getMachineFunction(); 2833 MachineFrameInfo *MFI = MF.getFrameInfo(); 2834 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2835 2836 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2837 // Potential tail calls could cause overwriting of argument stack slots. 2838 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2839 (CallConv == CallingConv::Fast)); 2840 unsigned PtrByteSize = 4; 2841 2842 // Assign locations to all of the incoming arguments. 2843 SmallVector<CCValAssign, 16> ArgLocs; 2844 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2845 *DAG.getContext()); 2846 2847 // Reserve space for the linkage area on the stack. 2848 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2849 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2850 2851 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2852 2853 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2854 CCValAssign &VA = ArgLocs[i]; 2855 2856 // Arguments stored in registers. 2857 if (VA.isRegLoc()) { 2858 const TargetRegisterClass *RC; 2859 EVT ValVT = VA.getValVT(); 2860 2861 switch (ValVT.getSimpleVT().SimpleTy) { 2862 default: 2863 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2864 case MVT::i1: 2865 case MVT::i32: 2866 RC = &PPC::GPRCRegClass; 2867 break; 2868 case MVT::f32: 2869 if (Subtarget.hasP8Vector()) 2870 RC = &PPC::VSSRCRegClass; 2871 else 2872 RC = &PPC::F4RCRegClass; 2873 break; 2874 case MVT::f64: 2875 if (Subtarget.hasVSX()) 2876 RC = &PPC::VSFRCRegClass; 2877 else 2878 RC = &PPC::F8RCRegClass; 2879 break; 2880 case MVT::v16i8: 2881 case MVT::v8i16: 2882 case MVT::v4i32: 2883 RC = &PPC::VRRCRegClass; 2884 break; 2885 case MVT::v4f32: 2886 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 2887 break; 2888 case MVT::v2f64: 2889 case MVT::v2i64: 2890 RC = &PPC::VSHRCRegClass; 2891 break; 2892 case MVT::v4f64: 2893 RC = &PPC::QFRCRegClass; 2894 break; 2895 case MVT::v4i1: 2896 RC = &PPC::QBRCRegClass; 2897 break; 2898 } 2899 2900 // Transform the arguments stored in physical registers into virtual ones. 2901 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2902 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2903 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2904 2905 if (ValVT == MVT::i1) 2906 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2907 2908 InVals.push_back(ArgValue); 2909 } else { 2910 // Argument stored in memory. 2911 assert(VA.isMemLoc()); 2912 2913 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2914 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2915 isImmutable); 2916 2917 // Create load nodes to retrieve arguments from the stack. 2918 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2919 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2920 MachinePointerInfo(), 2921 false, false, false, 0)); 2922 } 2923 } 2924 2925 // Assign locations to all of the incoming aggregate by value arguments. 2926 // Aggregates passed by value are stored in the local variable space of the 2927 // caller's stack frame, right above the parameter list area. 2928 SmallVector<CCValAssign, 16> ByValArgLocs; 2929 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2930 ByValArgLocs, *DAG.getContext()); 2931 2932 // Reserve stack space for the allocations in CCInfo. 2933 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2934 2935 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2936 2937 // Area that is at least reserved in the caller of this function. 2938 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2939 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2940 2941 // Set the size that is at least reserved in caller of this function. Tail 2942 // call optimized function's reserved stack space needs to be aligned so that 2943 // taking the difference between two stack areas will result in an aligned 2944 // stack. 2945 MinReservedArea = 2946 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 2947 FuncInfo->setMinReservedArea(MinReservedArea); 2948 2949 SmallVector<SDValue, 8> MemOps; 2950 2951 // If the function takes variable number of arguments, make a frame index for 2952 // the start of the first vararg value... for expansion of llvm.va_start. 2953 if (isVarArg) { 2954 static const MCPhysReg GPArgRegs[] = { 2955 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2956 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2957 }; 2958 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2959 2960 static const MCPhysReg FPArgRegs[] = { 2961 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2962 PPC::F8 2963 }; 2964 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2965 2966 if (Subtarget.useSoftFloat()) 2967 NumFPArgRegs = 0; 2968 2969 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 2970 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 2971 2972 // Make room for NumGPArgRegs and NumFPArgRegs. 2973 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2974 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 2975 2976 FuncInfo->setVarArgsStackOffset( 2977 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2978 CCInfo.getNextStackOffset(), true)); 2979 2980 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2981 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2982 2983 // The fixed integer arguments of a variadic function are stored to the 2984 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2985 // the result of va_next. 2986 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2987 // Get an existing live-in vreg, or add a new one. 2988 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2989 if (!VReg) 2990 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2991 2992 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2993 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2994 MachinePointerInfo(), false, false, 0); 2995 MemOps.push_back(Store); 2996 // Increment the address by four for the next argument to store 2997 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 2998 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2999 } 3000 3001 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3002 // is set. 3003 // The double arguments are stored to the VarArgsFrameIndex 3004 // on the stack. 3005 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3006 // Get an existing live-in vreg, or add a new one. 3007 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3008 if (!VReg) 3009 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3010 3011 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3012 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3013 MachinePointerInfo(), false, false, 0); 3014 MemOps.push_back(Store); 3015 // Increment the address by eight for the next argument to store 3016 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3017 PtrVT); 3018 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3019 } 3020 } 3021 3022 if (!MemOps.empty()) 3023 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3024 3025 return Chain; 3026 } 3027 3028 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3029 // value to MVT::i64 and then truncate to the correct register size. 3030 SDValue 3031 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 3032 SelectionDAG &DAG, SDValue ArgVal, 3033 SDLoc dl) const { 3034 if (Flags.isSExt()) 3035 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3036 DAG.getValueType(ObjectVT)); 3037 else if (Flags.isZExt()) 3038 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3039 DAG.getValueType(ObjectVT)); 3040 3041 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3042 } 3043 3044 SDValue 3045 PPCTargetLowering::LowerFormalArguments_64SVR4( 3046 SDValue Chain, 3047 CallingConv::ID CallConv, bool isVarArg, 3048 const SmallVectorImpl<ISD::InputArg> 3049 &Ins, 3050 SDLoc dl, SelectionDAG &DAG, 3051 SmallVectorImpl<SDValue> &InVals) const { 3052 // TODO: add description of PPC stack frame format, or at least some docs. 3053 // 3054 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3055 bool isLittleEndian = Subtarget.isLittleEndian(); 3056 MachineFunction &MF = DAG.getMachineFunction(); 3057 MachineFrameInfo *MFI = MF.getFrameInfo(); 3058 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3059 3060 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3061 "fastcc not supported on varargs functions"); 3062 3063 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3064 // Potential tail calls could cause overwriting of argument stack slots. 3065 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3066 (CallConv == CallingConv::Fast)); 3067 unsigned PtrByteSize = 8; 3068 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3069 3070 static const MCPhysReg GPR[] = { 3071 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3072 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3073 }; 3074 static const MCPhysReg VR[] = { 3075 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3076 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3077 }; 3078 static const MCPhysReg VSRH[] = { 3079 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 3080 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 3081 }; 3082 3083 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3084 const unsigned Num_FPR_Regs = 13; 3085 const unsigned Num_VR_Regs = array_lengthof(VR); 3086 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3087 3088 // Do a first pass over the arguments to determine whether the ABI 3089 // guarantees that our caller has allocated the parameter save area 3090 // on its stack frame. In the ELFv1 ABI, this is always the case; 3091 // in the ELFv2 ABI, it is true if this is a vararg function or if 3092 // any parameter is located in a stack slot. 3093 3094 bool HasParameterArea = !isELFv2ABI || isVarArg; 3095 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3096 unsigned NumBytes = LinkageSize; 3097 unsigned AvailableFPRs = Num_FPR_Regs; 3098 unsigned AvailableVRs = Num_VR_Regs; 3099 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3100 if (Ins[i].Flags.isNest()) 3101 continue; 3102 3103 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3104 PtrByteSize, LinkageSize, ParamAreaSize, 3105 NumBytes, AvailableFPRs, AvailableVRs, 3106 Subtarget.hasQPX())) 3107 HasParameterArea = true; 3108 } 3109 3110 // Add DAG nodes to load the arguments or copy them out of registers. On 3111 // entry to a function on PPC, the arguments start after the linkage area, 3112 // although the first ones are often in registers. 3113 3114 unsigned ArgOffset = LinkageSize; 3115 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3116 unsigned &QFPR_idx = FPR_idx; 3117 SmallVector<SDValue, 8> MemOps; 3118 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3119 unsigned CurArgIdx = 0; 3120 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3121 SDValue ArgVal; 3122 bool needsLoad = false; 3123 EVT ObjectVT = Ins[ArgNo].VT; 3124 EVT OrigVT = Ins[ArgNo].ArgVT; 3125 unsigned ObjSize = ObjectVT.getStoreSize(); 3126 unsigned ArgSize = ObjSize; 3127 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3128 if (Ins[ArgNo].isOrigArg()) { 3129 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3130 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3131 } 3132 // We re-align the argument offset for each argument, except when using the 3133 // fast calling convention, when we need to make sure we do that only when 3134 // we'll actually use a stack slot. 3135 unsigned CurArgOffset, Align; 3136 auto ComputeArgOffset = [&]() { 3137 /* Respect alignment of argument on the stack. */ 3138 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3139 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3140 CurArgOffset = ArgOffset; 3141 }; 3142 3143 if (CallConv != CallingConv::Fast) { 3144 ComputeArgOffset(); 3145 3146 /* Compute GPR index associated with argument offset. */ 3147 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3148 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3149 } 3150 3151 // FIXME the codegen can be much improved in some cases. 3152 // We do not have to keep everything in memory. 3153 if (Flags.isByVal()) { 3154 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3155 3156 if (CallConv == CallingConv::Fast) 3157 ComputeArgOffset(); 3158 3159 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3160 ObjSize = Flags.getByValSize(); 3161 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3162 // Empty aggregate parameters do not take up registers. Examples: 3163 // struct { } a; 3164 // union { } b; 3165 // int c[0]; 3166 // etc. However, we have to provide a place-holder in InVals, so 3167 // pretend we have an 8-byte item at the current address for that 3168 // purpose. 3169 if (!ObjSize) { 3170 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3171 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3172 InVals.push_back(FIN); 3173 continue; 3174 } 3175 3176 // Create a stack object covering all stack doublewords occupied 3177 // by the argument. If the argument is (fully or partially) on 3178 // the stack, or if the argument is fully in registers but the 3179 // caller has allocated the parameter save anyway, we can refer 3180 // directly to the caller's stack frame. Otherwise, create a 3181 // local copy in our own frame. 3182 int FI; 3183 if (HasParameterArea || 3184 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3185 FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true); 3186 else 3187 FI = MFI->CreateStackObject(ArgSize, Align, false); 3188 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3189 3190 // Handle aggregates smaller than 8 bytes. 3191 if (ObjSize < PtrByteSize) { 3192 // The value of the object is its address, which differs from the 3193 // address of the enclosing doubleword on big-endian systems. 3194 SDValue Arg = FIN; 3195 if (!isLittleEndian) { 3196 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3197 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3198 } 3199 InVals.push_back(Arg); 3200 3201 if (GPR_idx != Num_GPR_Regs) { 3202 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3203 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3204 SDValue Store; 3205 3206 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3207 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3208 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3209 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3210 MachinePointerInfo(&*FuncArg), ObjType, 3211 false, false, 0); 3212 } else { 3213 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3214 // store the whole register as-is to the parameter save area 3215 // slot. 3216 Store = 3217 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3218 MachinePointerInfo(&*FuncArg), false, false, 0); 3219 } 3220 3221 MemOps.push_back(Store); 3222 } 3223 // Whether we copied from a register or not, advance the offset 3224 // into the parameter save area by a full doubleword. 3225 ArgOffset += PtrByteSize; 3226 continue; 3227 } 3228 3229 // The value of the object is its address, which is the address of 3230 // its first stack doubleword. 3231 InVals.push_back(FIN); 3232 3233 // Store whatever pieces of the object are in registers to memory. 3234 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3235 if (GPR_idx == Num_GPR_Regs) 3236 break; 3237 3238 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3239 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3240 SDValue Addr = FIN; 3241 if (j) { 3242 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3243 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3244 } 3245 SDValue Store = 3246 DAG.getStore(Val.getValue(1), dl, Val, Addr, 3247 MachinePointerInfo(&*FuncArg, j), false, false, 0); 3248 MemOps.push_back(Store); 3249 ++GPR_idx; 3250 } 3251 ArgOffset += ArgSize; 3252 continue; 3253 } 3254 3255 switch (ObjectVT.getSimpleVT().SimpleTy) { 3256 default: llvm_unreachable("Unhandled argument type!"); 3257 case MVT::i1: 3258 case MVT::i32: 3259 case MVT::i64: 3260 if (Flags.isNest()) { 3261 // The 'nest' parameter, if any, is passed in R11. 3262 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3263 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3264 3265 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3266 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3267 3268 break; 3269 } 3270 3271 // These can be scalar arguments or elements of an integer array type 3272 // passed directly. Clang may use those instead of "byval" aggregate 3273 // types to avoid forcing arguments to memory unnecessarily. 3274 if (GPR_idx != Num_GPR_Regs) { 3275 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3276 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3277 3278 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3279 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3280 // value to MVT::i64 and then truncate to the correct register size. 3281 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3282 } else { 3283 if (CallConv == CallingConv::Fast) 3284 ComputeArgOffset(); 3285 3286 needsLoad = true; 3287 ArgSize = PtrByteSize; 3288 } 3289 if (CallConv != CallingConv::Fast || needsLoad) 3290 ArgOffset += 8; 3291 break; 3292 3293 case MVT::f32: 3294 case MVT::f64: 3295 // These can be scalar arguments or elements of a float array type 3296 // passed directly. The latter are used to implement ELFv2 homogenous 3297 // float aggregates. 3298 if (FPR_idx != Num_FPR_Regs) { 3299 unsigned VReg; 3300 3301 if (ObjectVT == MVT::f32) 3302 VReg = MF.addLiveIn(FPR[FPR_idx], 3303 Subtarget.hasP8Vector() 3304 ? &PPC::VSSRCRegClass 3305 : &PPC::F4RCRegClass); 3306 else 3307 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3308 ? &PPC::VSFRCRegClass 3309 : &PPC::F8RCRegClass); 3310 3311 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3312 ++FPR_idx; 3313 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3314 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3315 // once we support fp <-> gpr moves. 3316 3317 // This can only ever happen in the presence of f32 array types, 3318 // since otherwise we never run out of FPRs before running out 3319 // of GPRs. 3320 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3321 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3322 3323 if (ObjectVT == MVT::f32) { 3324 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3325 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3326 DAG.getConstant(32, dl, MVT::i32)); 3327 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3328 } 3329 3330 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3331 } else { 3332 if (CallConv == CallingConv::Fast) 3333 ComputeArgOffset(); 3334 3335 needsLoad = true; 3336 } 3337 3338 // When passing an array of floats, the array occupies consecutive 3339 // space in the argument area; only round up to the next doubleword 3340 // at the end of the array. Otherwise, each float takes 8 bytes. 3341 if (CallConv != CallingConv::Fast || needsLoad) { 3342 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3343 ArgOffset += ArgSize; 3344 if (Flags.isInConsecutiveRegsLast()) 3345 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3346 } 3347 break; 3348 case MVT::v4f32: 3349 case MVT::v4i32: 3350 case MVT::v8i16: 3351 case MVT::v16i8: 3352 case MVT::v2f64: 3353 case MVT::v2i64: 3354 case MVT::v1i128: 3355 if (!Subtarget.hasQPX()) { 3356 // These can be scalar arguments or elements of a vector array type 3357 // passed directly. The latter are used to implement ELFv2 homogenous 3358 // vector aggregates. 3359 if (VR_idx != Num_VR_Regs) { 3360 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 3361 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 3362 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3363 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3364 ++VR_idx; 3365 } else { 3366 if (CallConv == CallingConv::Fast) 3367 ComputeArgOffset(); 3368 3369 needsLoad = true; 3370 } 3371 if (CallConv != CallingConv::Fast || needsLoad) 3372 ArgOffset += 16; 3373 break; 3374 } // not QPX 3375 3376 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3377 "Invalid QPX parameter type"); 3378 /* fall through */ 3379 3380 case MVT::v4f64: 3381 case MVT::v4i1: 3382 // QPX vectors are treated like their scalar floating-point subregisters 3383 // (except that they're larger). 3384 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3385 if (QFPR_idx != Num_QFPR_Regs) { 3386 const TargetRegisterClass *RC; 3387 switch (ObjectVT.getSimpleVT().SimpleTy) { 3388 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3389 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3390 default: RC = &PPC::QBRCRegClass; break; 3391 } 3392 3393 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3394 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3395 ++QFPR_idx; 3396 } else { 3397 if (CallConv == CallingConv::Fast) 3398 ComputeArgOffset(); 3399 needsLoad = true; 3400 } 3401 if (CallConv != CallingConv::Fast || needsLoad) 3402 ArgOffset += Sz; 3403 break; 3404 } 3405 3406 // We need to load the argument to a virtual register if we determined 3407 // above that we ran out of physical registers of the appropriate type. 3408 if (needsLoad) { 3409 if (ObjSize < ArgSize && !isLittleEndian) 3410 CurArgOffset += ArgSize - ObjSize; 3411 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3412 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3413 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3414 false, false, false, 0); 3415 } 3416 3417 InVals.push_back(ArgVal); 3418 } 3419 3420 // Area that is at least reserved in the caller of this function. 3421 unsigned MinReservedArea; 3422 if (HasParameterArea) 3423 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3424 else 3425 MinReservedArea = LinkageSize; 3426 3427 // Set the size that is at least reserved in caller of this function. Tail 3428 // call optimized functions' reserved stack space needs to be aligned so that 3429 // taking the difference between two stack areas will result in an aligned 3430 // stack. 3431 MinReservedArea = 3432 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3433 FuncInfo->setMinReservedArea(MinReservedArea); 3434 3435 // If the function takes variable number of arguments, make a frame index for 3436 // the start of the first vararg value... for expansion of llvm.va_start. 3437 if (isVarArg) { 3438 int Depth = ArgOffset; 3439 3440 FuncInfo->setVarArgsFrameIndex( 3441 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 3442 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3443 3444 // If this function is vararg, store any remaining integer argument regs 3445 // to their spots on the stack so that they may be loaded by deferencing the 3446 // result of va_next. 3447 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3448 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3449 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3450 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3451 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3452 MachinePointerInfo(), false, false, 0); 3453 MemOps.push_back(Store); 3454 // Increment the address by four for the next argument to store 3455 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3456 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3457 } 3458 } 3459 3460 if (!MemOps.empty()) 3461 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3462 3463 return Chain; 3464 } 3465 3466 SDValue 3467 PPCTargetLowering::LowerFormalArguments_Darwin( 3468 SDValue Chain, 3469 CallingConv::ID CallConv, bool isVarArg, 3470 const SmallVectorImpl<ISD::InputArg> 3471 &Ins, 3472 SDLoc dl, SelectionDAG &DAG, 3473 SmallVectorImpl<SDValue> &InVals) const { 3474 // TODO: add description of PPC stack frame format, or at least some docs. 3475 // 3476 MachineFunction &MF = DAG.getMachineFunction(); 3477 MachineFrameInfo *MFI = MF.getFrameInfo(); 3478 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3479 3480 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3481 bool isPPC64 = PtrVT == MVT::i64; 3482 // Potential tail calls could cause overwriting of argument stack slots. 3483 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3484 (CallConv == CallingConv::Fast)); 3485 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3486 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3487 unsigned ArgOffset = LinkageSize; 3488 // Area that is at least reserved in caller of this function. 3489 unsigned MinReservedArea = ArgOffset; 3490 3491 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3492 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3493 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3494 }; 3495 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3496 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3497 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3498 }; 3499 static const MCPhysReg VR[] = { 3500 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3501 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3502 }; 3503 3504 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3505 const unsigned Num_FPR_Regs = 13; 3506 const unsigned Num_VR_Regs = array_lengthof( VR); 3507 3508 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3509 3510 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3511 3512 // In 32-bit non-varargs functions, the stack space for vectors is after the 3513 // stack space for non-vectors. We do not use this space unless we have 3514 // too many vectors to fit in registers, something that only occurs in 3515 // constructed examples:), but we have to walk the arglist to figure 3516 // that out...for the pathological case, compute VecArgOffset as the 3517 // start of the vector parameter area. Computing VecArgOffset is the 3518 // entire point of the following loop. 3519 unsigned VecArgOffset = ArgOffset; 3520 if (!isVarArg && !isPPC64) { 3521 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3522 ++ArgNo) { 3523 EVT ObjectVT = Ins[ArgNo].VT; 3524 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3525 3526 if (Flags.isByVal()) { 3527 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3528 unsigned ObjSize = Flags.getByValSize(); 3529 unsigned ArgSize = 3530 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3531 VecArgOffset += ArgSize; 3532 continue; 3533 } 3534 3535 switch(ObjectVT.getSimpleVT().SimpleTy) { 3536 default: llvm_unreachable("Unhandled argument type!"); 3537 case MVT::i1: 3538 case MVT::i32: 3539 case MVT::f32: 3540 VecArgOffset += 4; 3541 break; 3542 case MVT::i64: // PPC64 3543 case MVT::f64: 3544 // FIXME: We are guaranteed to be !isPPC64 at this point. 3545 // Does MVT::i64 apply? 3546 VecArgOffset += 8; 3547 break; 3548 case MVT::v4f32: 3549 case MVT::v4i32: 3550 case MVT::v8i16: 3551 case MVT::v16i8: 3552 // Nothing to do, we're only looking at Nonvector args here. 3553 break; 3554 } 3555 } 3556 } 3557 // We've found where the vector parameter area in memory is. Skip the 3558 // first 12 parameters; these don't use that memory. 3559 VecArgOffset = ((VecArgOffset+15)/16)*16; 3560 VecArgOffset += 12*16; 3561 3562 // Add DAG nodes to load the arguments or copy them out of registers. On 3563 // entry to a function on PPC, the arguments start after the linkage area, 3564 // although the first ones are often in registers. 3565 3566 SmallVector<SDValue, 8> MemOps; 3567 unsigned nAltivecParamsAtEnd = 0; 3568 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3569 unsigned CurArgIdx = 0; 3570 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3571 SDValue ArgVal; 3572 bool needsLoad = false; 3573 EVT ObjectVT = Ins[ArgNo].VT; 3574 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3575 unsigned ArgSize = ObjSize; 3576 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3577 if (Ins[ArgNo].isOrigArg()) { 3578 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3579 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3580 } 3581 unsigned CurArgOffset = ArgOffset; 3582 3583 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3584 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3585 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3586 if (isVarArg || isPPC64) { 3587 MinReservedArea = ((MinReservedArea+15)/16)*16; 3588 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3589 Flags, 3590 PtrByteSize); 3591 } else nAltivecParamsAtEnd++; 3592 } else 3593 // Calculate min reserved area. 3594 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3595 Flags, 3596 PtrByteSize); 3597 3598 // FIXME the codegen can be much improved in some cases. 3599 // We do not have to keep everything in memory. 3600 if (Flags.isByVal()) { 3601 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3602 3603 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3604 ObjSize = Flags.getByValSize(); 3605 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3606 // Objects of size 1 and 2 are right justified, everything else is 3607 // left justified. This means the memory address is adjusted forwards. 3608 if (ObjSize==1 || ObjSize==2) { 3609 CurArgOffset = CurArgOffset + (4 - ObjSize); 3610 } 3611 // The value of the object is its address. 3612 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true); 3613 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3614 InVals.push_back(FIN); 3615 if (ObjSize==1 || ObjSize==2) { 3616 if (GPR_idx != Num_GPR_Regs) { 3617 unsigned VReg; 3618 if (isPPC64) 3619 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3620 else 3621 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3622 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3623 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3624 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3625 MachinePointerInfo(&*FuncArg), 3626 ObjType, false, false, 0); 3627 MemOps.push_back(Store); 3628 ++GPR_idx; 3629 } 3630 3631 ArgOffset += PtrByteSize; 3632 3633 continue; 3634 } 3635 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3636 // Store whatever pieces of the object are in registers 3637 // to memory. ArgOffset will be the address of the beginning 3638 // of the object. 3639 if (GPR_idx != Num_GPR_Regs) { 3640 unsigned VReg; 3641 if (isPPC64) 3642 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3643 else 3644 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3645 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3646 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3647 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3648 SDValue Store = 3649 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3650 MachinePointerInfo(&*FuncArg, j), false, false, 0); 3651 MemOps.push_back(Store); 3652 ++GPR_idx; 3653 ArgOffset += PtrByteSize; 3654 } else { 3655 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3656 break; 3657 } 3658 } 3659 continue; 3660 } 3661 3662 switch (ObjectVT.getSimpleVT().SimpleTy) { 3663 default: llvm_unreachable("Unhandled argument type!"); 3664 case MVT::i1: 3665 case MVT::i32: 3666 if (!isPPC64) { 3667 if (GPR_idx != Num_GPR_Regs) { 3668 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3669 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3670 3671 if (ObjectVT == MVT::i1) 3672 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3673 3674 ++GPR_idx; 3675 } else { 3676 needsLoad = true; 3677 ArgSize = PtrByteSize; 3678 } 3679 // All int arguments reserve stack space in the Darwin ABI. 3680 ArgOffset += PtrByteSize; 3681 break; 3682 } 3683 // FALLTHROUGH 3684 case MVT::i64: // PPC64 3685 if (GPR_idx != Num_GPR_Regs) { 3686 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3687 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3688 3689 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3690 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3691 // value to MVT::i64 and then truncate to the correct register size. 3692 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3693 3694 ++GPR_idx; 3695 } else { 3696 needsLoad = true; 3697 ArgSize = PtrByteSize; 3698 } 3699 // All int arguments reserve stack space in the Darwin ABI. 3700 ArgOffset += 8; 3701 break; 3702 3703 case MVT::f32: 3704 case MVT::f64: 3705 // Every 4 bytes of argument space consumes one of the GPRs available for 3706 // argument passing. 3707 if (GPR_idx != Num_GPR_Regs) { 3708 ++GPR_idx; 3709 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3710 ++GPR_idx; 3711 } 3712 if (FPR_idx != Num_FPR_Regs) { 3713 unsigned VReg; 3714 3715 if (ObjectVT == MVT::f32) 3716 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3717 else 3718 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3719 3720 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3721 ++FPR_idx; 3722 } else { 3723 needsLoad = true; 3724 } 3725 3726 // All FP arguments reserve stack space in the Darwin ABI. 3727 ArgOffset += isPPC64 ? 8 : ObjSize; 3728 break; 3729 case MVT::v4f32: 3730 case MVT::v4i32: 3731 case MVT::v8i16: 3732 case MVT::v16i8: 3733 // Note that vector arguments in registers don't reserve stack space, 3734 // except in varargs functions. 3735 if (VR_idx != Num_VR_Regs) { 3736 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3737 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3738 if (isVarArg) { 3739 while ((ArgOffset % 16) != 0) { 3740 ArgOffset += PtrByteSize; 3741 if (GPR_idx != Num_GPR_Regs) 3742 GPR_idx++; 3743 } 3744 ArgOffset += 16; 3745 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3746 } 3747 ++VR_idx; 3748 } else { 3749 if (!isVarArg && !isPPC64) { 3750 // Vectors go after all the nonvectors. 3751 CurArgOffset = VecArgOffset; 3752 VecArgOffset += 16; 3753 } else { 3754 // Vectors are aligned. 3755 ArgOffset = ((ArgOffset+15)/16)*16; 3756 CurArgOffset = ArgOffset; 3757 ArgOffset += 16; 3758 } 3759 needsLoad = true; 3760 } 3761 break; 3762 } 3763 3764 // We need to load the argument to a virtual register if we determined above 3765 // that we ran out of physical registers of the appropriate type. 3766 if (needsLoad) { 3767 int FI = MFI->CreateFixedObject(ObjSize, 3768 CurArgOffset + (ArgSize - ObjSize), 3769 isImmutable); 3770 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3771 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3772 false, false, false, 0); 3773 } 3774 3775 InVals.push_back(ArgVal); 3776 } 3777 3778 // Allow for Altivec parameters at the end, if needed. 3779 if (nAltivecParamsAtEnd) { 3780 MinReservedArea = ((MinReservedArea+15)/16)*16; 3781 MinReservedArea += 16*nAltivecParamsAtEnd; 3782 } 3783 3784 // Area that is at least reserved in the caller of this function. 3785 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3786 3787 // Set the size that is at least reserved in caller of this function. Tail 3788 // call optimized functions' reserved stack space needs to be aligned so that 3789 // taking the difference between two stack areas will result in an aligned 3790 // stack. 3791 MinReservedArea = 3792 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3793 FuncInfo->setMinReservedArea(MinReservedArea); 3794 3795 // If the function takes variable number of arguments, make a frame index for 3796 // the start of the first vararg value... for expansion of llvm.va_start. 3797 if (isVarArg) { 3798 int Depth = ArgOffset; 3799 3800 FuncInfo->setVarArgsFrameIndex( 3801 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3802 Depth, true)); 3803 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3804 3805 // If this function is vararg, store any remaining integer argument regs 3806 // to their spots on the stack so that they may be loaded by deferencing the 3807 // result of va_next. 3808 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3809 unsigned VReg; 3810 3811 if (isPPC64) 3812 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3813 else 3814 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3815 3816 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3817 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3818 MachinePointerInfo(), false, false, 0); 3819 MemOps.push_back(Store); 3820 // Increment the address by four for the next argument to store 3821 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3822 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3823 } 3824 } 3825 3826 if (!MemOps.empty()) 3827 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3828 3829 return Chain; 3830 } 3831 3832 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3833 /// adjusted to accommodate the arguments for the tailcall. 3834 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3835 unsigned ParamSize) { 3836 3837 if (!isTailCall) return 0; 3838 3839 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3840 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3841 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3842 // Remember only if the new adjustement is bigger. 3843 if (SPDiff < FI->getTailCallSPDelta()) 3844 FI->setTailCallSPDelta(SPDiff); 3845 3846 return SPDiff; 3847 } 3848 3849 static bool isFunctionGlobalAddress(SDValue Callee); 3850 3851 static bool 3852 resideInSameModule(SDValue Callee, Reloc::Model RelMod) { 3853 // If !G, Callee can be an external symbol. 3854 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 3855 if (!G) return false; 3856 3857 const GlobalValue *GV = G->getGlobal(); 3858 3859 if (GV->isDeclaration()) return false; 3860 3861 switch(GV->getLinkage()) { 3862 default: llvm_unreachable("unknow linkage type"); 3863 case GlobalValue::AvailableExternallyLinkage: 3864 case GlobalValue::ExternalWeakLinkage: 3865 return false; 3866 3867 // Callee with weak linkage is allowed if it has hidden or protected 3868 // visibility 3869 case GlobalValue::LinkOnceAnyLinkage: 3870 case GlobalValue::LinkOnceODRLinkage: // e.g. c++ inline functions 3871 case GlobalValue::WeakAnyLinkage: 3872 case GlobalValue::WeakODRLinkage: // e.g. c++ template instantiation 3873 if (GV->hasDefaultVisibility()) 3874 return false; 3875 3876 case GlobalValue::ExternalLinkage: 3877 case GlobalValue::InternalLinkage: 3878 case GlobalValue::PrivateLinkage: 3879 break; 3880 } 3881 3882 // With '-fPIC', calling default visiblity function need insert 'nop' after 3883 // function call, no matter that function resides in same module or not, so 3884 // we treat it as in different module. 3885 if (RelMod == Reloc::PIC_ && GV->hasDefaultVisibility()) 3886 return false; 3887 3888 return true; 3889 } 3890 3891 static bool 3892 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 3893 const SmallVectorImpl<ISD::OutputArg> &Outs) { 3894 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 3895 3896 const unsigned PtrByteSize = 8; 3897 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3898 3899 static const MCPhysReg GPR[] = { 3900 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3901 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3902 }; 3903 static const MCPhysReg VR[] = { 3904 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3905 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3906 }; 3907 3908 const unsigned NumGPRs = array_lengthof(GPR); 3909 const unsigned NumFPRs = 13; 3910 const unsigned NumVRs = array_lengthof(VR); 3911 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 3912 3913 unsigned NumBytes = LinkageSize; 3914 unsigned AvailableFPRs = NumFPRs; 3915 unsigned AvailableVRs = NumVRs; 3916 3917 for (const ISD::OutputArg& Param : Outs) { 3918 if (Param.Flags.isNest()) continue; 3919 3920 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 3921 PtrByteSize, LinkageSize, ParamAreaSize, 3922 NumBytes, AvailableFPRs, AvailableVRs, 3923 Subtarget.hasQPX())) 3924 return true; 3925 } 3926 return false; 3927 } 3928 3929 static bool 3930 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS) { 3931 if (CS->arg_size() != CallerFn->getArgumentList().size()) 3932 return false; 3933 3934 ImmutableCallSite::arg_iterator CalleeArgIter = CS->arg_begin(); 3935 ImmutableCallSite::arg_iterator CalleeArgEnd = CS->arg_end(); 3936 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 3937 3938 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 3939 const Value* CalleeArg = *CalleeArgIter; 3940 const Value* CallerArg = &(*CallerArgIter); 3941 if (CalleeArg == CallerArg) 3942 continue; 3943 3944 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 3945 // tail call @callee([4 x i64] undef, [4 x i64] %b) 3946 // } 3947 // 1st argument of callee is undef and has the same type as caller. 3948 if (CalleeArg->getType() == CallerArg->getType() && 3949 isa<UndefValue>(CalleeArg)) 3950 continue; 3951 3952 return false; 3953 } 3954 3955 return true; 3956 } 3957 3958 bool 3959 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 3960 SDValue Callee, 3961 CallingConv::ID CalleeCC, 3962 ImmutableCallSite *CS, 3963 bool isVarArg, 3964 const SmallVectorImpl<ISD::OutputArg> &Outs, 3965 const SmallVectorImpl<ISD::InputArg> &Ins, 3966 SelectionDAG& DAG) const { 3967 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 3968 3969 if (DisableSCO && !TailCallOpt) return false; 3970 3971 // Variadic argument functions are not supported. 3972 if (isVarArg) return false; 3973 3974 MachineFunction &MF = DAG.getMachineFunction(); 3975 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3976 3977 // Tail or Sibling call optimization (TCO/SCO) needs callee and caller has 3978 // the same calling convention 3979 if (CallerCC != CalleeCC) return false; 3980 3981 // SCO support C calling convention 3982 if (CalleeCC != CallingConv::Fast && CalleeCC != CallingConv::C) 3983 return false; 3984 3985 // Functions containing by val parameters are not supported. 3986 if (std::any_of(Ins.begin(), Ins.end(), 3987 [](const ISD::InputArg& IA) { return IA.Flags.isByVal(); })) 3988 return false; 3989 3990 // No TCO/SCO on indirect call because Caller have to restore its TOC 3991 if (!isFunctionGlobalAddress(Callee) && 3992 !isa<ExternalSymbolSDNode>(Callee)) 3993 return false; 3994 3995 // Check if Callee resides in the same module, because for now, PPC64 SVR4 ABI 3996 // (ELFv1/ELFv2) doesn't allow tail calls to a symbol resides in another 3997 // module. 3998 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 3999 if (!resideInSameModule(Callee, getTargetMachine().getRelocationModel())) 4000 return false; 4001 4002 // TCO allows altering callee ABI, so we don't have to check further. 4003 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4004 return true; 4005 4006 if (DisableSCO) return false; 4007 4008 // If callee use the same argument list that caller is using, then we can 4009 // apply SCO on this case. If it is not, then we need to check if callee needs 4010 // stack for passing arguments. 4011 if (!hasSameArgumentList(MF.getFunction(), CS) && 4012 needStackSlotPassParameters(Subtarget, Outs)) { 4013 return false; 4014 } 4015 4016 return true; 4017 } 4018 4019 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4020 /// for tail call optimization. Targets which want to do tail call 4021 /// optimization should implement this function. 4022 bool 4023 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4024 CallingConv::ID CalleeCC, 4025 bool isVarArg, 4026 const SmallVectorImpl<ISD::InputArg> &Ins, 4027 SelectionDAG& DAG) const { 4028 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4029 return false; 4030 4031 // Variable argument functions are not supported. 4032 if (isVarArg) 4033 return false; 4034 4035 MachineFunction &MF = DAG.getMachineFunction(); 4036 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4037 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4038 // Functions containing by val parameters are not supported. 4039 for (unsigned i = 0; i != Ins.size(); i++) { 4040 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4041 if (Flags.isByVal()) return false; 4042 } 4043 4044 // Non-PIC/GOT tail calls are supported. 4045 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4046 return true; 4047 4048 // At the moment we can only do local tail calls (in same module, hidden 4049 // or protected) if we are generating PIC. 4050 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4051 return G->getGlobal()->hasHiddenVisibility() 4052 || G->getGlobal()->hasProtectedVisibility(); 4053 } 4054 4055 return false; 4056 } 4057 4058 /// isCallCompatibleAddress - Return the immediate to use if the specified 4059 /// 32-bit value is representable in the immediate field of a BxA instruction. 4060 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4061 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4062 if (!C) return nullptr; 4063 4064 int Addr = C->getZExtValue(); 4065 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4066 SignExtend32<26>(Addr) != Addr) 4067 return nullptr; // Top 6 bits have to be sext of immediate. 4068 4069 return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op), 4070 DAG.getTargetLoweringInfo().getPointerTy( 4071 DAG.getDataLayout())).getNode(); 4072 } 4073 4074 namespace { 4075 4076 struct TailCallArgumentInfo { 4077 SDValue Arg; 4078 SDValue FrameIdxOp; 4079 int FrameIdx; 4080 4081 TailCallArgumentInfo() : FrameIdx(0) {} 4082 }; 4083 } 4084 4085 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4086 static void 4087 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 4088 SDValue Chain, 4089 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4090 SmallVectorImpl<SDValue> &MemOpChains, 4091 SDLoc dl) { 4092 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4093 SDValue Arg = TailCallArgs[i].Arg; 4094 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4095 int FI = TailCallArgs[i].FrameIdx; 4096 // Store relative to framepointer. 4097 MemOpChains.push_back(DAG.getStore( 4098 Chain, dl, Arg, FIN, 4099 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 4100 false, 0)); 4101 } 4102 } 4103 4104 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4105 /// the appropriate stack slot for the tail call optimized function call. 4106 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 4107 MachineFunction &MF, 4108 SDValue Chain, 4109 SDValue OldRetAddr, 4110 SDValue OldFP, 4111 int SPDiff, 4112 bool isPPC64, 4113 bool isDarwinABI, 4114 SDLoc dl) { 4115 if (SPDiff) { 4116 // Calculate the new stack slot for the return address. 4117 int SlotSize = isPPC64 ? 8 : 4; 4118 const PPCFrameLowering *FL = 4119 MF.getSubtarget<PPCSubtarget>().getFrameLowering(); 4120 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4121 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 4122 NewRetAddrLoc, true); 4123 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4124 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4125 Chain = DAG.getStore( 4126 Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4127 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewRetAddr), 4128 false, false, 0); 4129 4130 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4131 // slot as the FP is never overwritten. 4132 if (isDarwinABI) { 4133 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4134 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 4135 true); 4136 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4137 Chain = DAG.getStore( 4138 Chain, dl, OldFP, NewFramePtrIdx, 4139 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFPIdx), 4140 false, false, 0); 4141 } 4142 } 4143 return Chain; 4144 } 4145 4146 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4147 /// the position of the argument. 4148 static void 4149 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4150 SDValue Arg, int SPDiff, unsigned ArgOffset, 4151 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4152 int Offset = ArgOffset + SPDiff; 4153 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 4154 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 4155 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4156 SDValue FIN = DAG.getFrameIndex(FI, VT); 4157 TailCallArgumentInfo Info; 4158 Info.Arg = Arg; 4159 Info.FrameIdxOp = FIN; 4160 Info.FrameIdx = FI; 4161 TailCallArguments.push_back(Info); 4162 } 4163 4164 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4165 /// stack slot. Returns the chain as result and the loaded frame pointers in 4166 /// LROpOut/FPOpout. Used when tail calling. 4167 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 4168 int SPDiff, 4169 SDValue Chain, 4170 SDValue &LROpOut, 4171 SDValue &FPOpOut, 4172 bool isDarwinABI, 4173 SDLoc dl) const { 4174 if (SPDiff) { 4175 // Load the LR and FP stack slot for later adjusting. 4176 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4177 LROpOut = getReturnAddrFrameIndex(DAG); 4178 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 4179 false, false, false, 0); 4180 Chain = SDValue(LROpOut.getNode(), 1); 4181 4182 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4183 // slot as the FP is never overwritten. 4184 if (isDarwinABI) { 4185 FPOpOut = getFramePointerFrameIndex(DAG); 4186 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 4187 false, false, false, 0); 4188 Chain = SDValue(FPOpOut.getNode(), 1); 4189 } 4190 } 4191 return Chain; 4192 } 4193 4194 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4195 /// by "Src" to address "Dst" of size "Size". Alignment information is 4196 /// specified by the specific parameter attribute. The copy will be passed as 4197 /// a byval function parameter. 4198 /// Sometimes what we are copying is the end of a larger object, the part that 4199 /// does not fit in registers. 4200 static SDValue 4201 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 4202 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 4203 SDLoc dl) { 4204 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4205 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4206 false, false, false, MachinePointerInfo(), 4207 MachinePointerInfo()); 4208 } 4209 4210 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4211 /// tail calls. 4212 static void 4213 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 4214 SDValue Arg, SDValue PtrOff, int SPDiff, 4215 unsigned ArgOffset, bool isPPC64, bool isTailCall, 4216 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4217 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 4218 SDLoc dl) { 4219 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4220 if (!isTailCall) { 4221 if (isVector) { 4222 SDValue StackPtr; 4223 if (isPPC64) 4224 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4225 else 4226 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4227 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4228 DAG.getConstant(ArgOffset, dl, PtrVT)); 4229 } 4230 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4231 MachinePointerInfo(), false, false, 0)); 4232 // Calculate and remember argument location. 4233 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4234 TailCallArguments); 4235 } 4236 4237 static 4238 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4239 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 4240 SDValue LROp, SDValue FPOp, bool isDarwinABI, 4241 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4242 MachineFunction &MF = DAG.getMachineFunction(); 4243 4244 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4245 // might overwrite each other in case of tail call optimization. 4246 SmallVector<SDValue, 8> MemOpChains2; 4247 // Do not flag preceding copytoreg stuff together with the following stuff. 4248 InFlag = SDValue(); 4249 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4250 MemOpChains2, dl); 4251 if (!MemOpChains2.empty()) 4252 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4253 4254 // Store the return address to the appropriate stack slot. 4255 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 4256 isPPC64, isDarwinABI, dl); 4257 4258 // Emit callseq_end just before tailcall node. 4259 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4260 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4261 InFlag = Chain.getValue(1); 4262 } 4263 4264 // Is this global address that of a function that can be called by name? (as 4265 // opposed to something that must hold a descriptor for an indirect call). 4266 static bool isFunctionGlobalAddress(SDValue Callee) { 4267 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4268 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4269 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4270 return false; 4271 4272 return G->getGlobal()->getValueType()->isFunctionTy(); 4273 } 4274 4275 return false; 4276 } 4277 4278 static 4279 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 4280 SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff, 4281 bool isTailCall, bool IsPatchPoint, bool hasNest, 4282 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 4283 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4284 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4285 4286 bool isPPC64 = Subtarget.isPPC64(); 4287 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4288 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4289 4290 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4291 NodeTys.push_back(MVT::Other); // Returns a chain 4292 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4293 4294 unsigned CallOpc = PPCISD::CALL; 4295 4296 bool needIndirectCall = true; 4297 if (!isSVR4ABI || !isPPC64) 4298 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4299 // If this is an absolute destination address, use the munged value. 4300 Callee = SDValue(Dest, 0); 4301 needIndirectCall = false; 4302 } 4303 4304 if (isFunctionGlobalAddress(Callee)) { 4305 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4306 // A call to a TLS address is actually an indirect call to a 4307 // thread-specific pointer. 4308 unsigned OpFlags = 0; 4309 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4310 (Subtarget.getTargetTriple().isMacOSX() && 4311 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 4312 !G->getGlobal()->isStrongDefinitionForLinker()) || 4313 (Subtarget.isTargetELF() && !isPPC64 && 4314 !G->getGlobal()->hasLocalLinkage() && 4315 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4316 // PC-relative references to external symbols should go through $stub, 4317 // unless we're building with the leopard linker or later, which 4318 // automatically synthesizes these stubs. 4319 OpFlags = PPCII::MO_PLT_OR_STUB; 4320 } 4321 4322 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4323 // every direct call is) turn it into a TargetGlobalAddress / 4324 // TargetExternalSymbol node so that legalize doesn't hack it. 4325 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4326 Callee.getValueType(), 0, OpFlags); 4327 needIndirectCall = false; 4328 } 4329 4330 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4331 unsigned char OpFlags = 0; 4332 4333 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4334 (Subtarget.getTargetTriple().isMacOSX() && 4335 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) || 4336 (Subtarget.isTargetELF() && !isPPC64 && 4337 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4338 // PC-relative references to external symbols should go through $stub, 4339 // unless we're building with the leopard linker or later, which 4340 // automatically synthesizes these stubs. 4341 OpFlags = PPCII::MO_PLT_OR_STUB; 4342 } 4343 4344 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4345 OpFlags); 4346 needIndirectCall = false; 4347 } 4348 4349 if (IsPatchPoint) { 4350 // We'll form an invalid direct call when lowering a patchpoint; the full 4351 // sequence for an indirect call is complicated, and many of the 4352 // instructions introduced might have side effects (and, thus, can't be 4353 // removed later). The call itself will be removed as soon as the 4354 // argument/return lowering is complete, so the fact that it has the wrong 4355 // kind of operands should not really matter. 4356 needIndirectCall = false; 4357 } 4358 4359 if (needIndirectCall) { 4360 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4361 // to do the call, we can't use PPCISD::CALL. 4362 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4363 4364 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4365 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4366 // entry point, but to the function descriptor (the function entry point 4367 // address is part of the function descriptor though). 4368 // The function descriptor is a three doubleword structure with the 4369 // following fields: function entry point, TOC base address and 4370 // environment pointer. 4371 // Thus for a call through a function pointer, the following actions need 4372 // to be performed: 4373 // 1. Save the TOC of the caller in the TOC save area of its stack 4374 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4375 // 2. Load the address of the function entry point from the function 4376 // descriptor. 4377 // 3. Load the TOC of the callee from the function descriptor into r2. 4378 // 4. Load the environment pointer from the function descriptor into 4379 // r11. 4380 // 5. Branch to the function entry point address. 4381 // 6. On return of the callee, the TOC of the caller needs to be 4382 // restored (this is done in FinishCall()). 4383 // 4384 // The loads are scheduled at the beginning of the call sequence, and the 4385 // register copies are flagged together to ensure that no other 4386 // operations can be scheduled in between. E.g. without flagging the 4387 // copies together, a TOC access in the caller could be scheduled between 4388 // the assignment of the callee TOC and the branch to the callee, which 4389 // results in the TOC access going through the TOC of the callee instead 4390 // of going through the TOC of the caller, which leads to incorrect code. 4391 4392 // Load the address of the function entry point from the function 4393 // descriptor. 4394 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4395 if (LDChain.getValueType() == MVT::Glue) 4396 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4397 4398 bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors(); 4399 4400 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4401 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4402 false, false, LoadsInv, 8); 4403 4404 // Load environment pointer into r11. 4405 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4406 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4407 SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, 4408 MPI.getWithOffset(16), false, false, 4409 LoadsInv, 8); 4410 4411 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4412 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4413 SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, 4414 MPI.getWithOffset(8), false, false, 4415 LoadsInv, 8); 4416 4417 setUsesTOCBasePtr(DAG); 4418 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4419 InFlag); 4420 Chain = TOCVal.getValue(0); 4421 InFlag = TOCVal.getValue(1); 4422 4423 // If the function call has an explicit 'nest' parameter, it takes the 4424 // place of the environment pointer. 4425 if (!hasNest) { 4426 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4427 InFlag); 4428 4429 Chain = EnvVal.getValue(0); 4430 InFlag = EnvVal.getValue(1); 4431 } 4432 4433 MTCTROps[0] = Chain; 4434 MTCTROps[1] = LoadFuncPtr; 4435 MTCTROps[2] = InFlag; 4436 } 4437 4438 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4439 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4440 InFlag = Chain.getValue(1); 4441 4442 NodeTys.clear(); 4443 NodeTys.push_back(MVT::Other); 4444 NodeTys.push_back(MVT::Glue); 4445 Ops.push_back(Chain); 4446 CallOpc = PPCISD::BCTRL; 4447 Callee.setNode(nullptr); 4448 // Add use of X11 (holding environment pointer) 4449 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4450 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4451 // Add CTR register as callee so a bctr can be emitted later. 4452 if (isTailCall) 4453 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4454 } 4455 4456 // If this is a direct call, pass the chain and the callee. 4457 if (Callee.getNode()) { 4458 Ops.push_back(Chain); 4459 Ops.push_back(Callee); 4460 } 4461 // If this is a tail call add stack pointer delta. 4462 if (isTailCall) 4463 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4464 4465 // Add argument registers to the end of the list so that they are known live 4466 // into the call. 4467 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4468 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4469 RegsToPass[i].second.getValueType())); 4470 4471 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4472 // into the call. 4473 if (isSVR4ABI && isPPC64 && !IsPatchPoint) { 4474 setUsesTOCBasePtr(DAG); 4475 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4476 } 4477 4478 return CallOpc; 4479 } 4480 4481 static 4482 bool isLocalCall(const SDValue &Callee) 4483 { 4484 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4485 return G->getGlobal()->isStrongDefinitionForLinker(); 4486 return false; 4487 } 4488 4489 SDValue 4490 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 4491 CallingConv::ID CallConv, bool isVarArg, 4492 const SmallVectorImpl<ISD::InputArg> &Ins, 4493 SDLoc dl, SelectionDAG &DAG, 4494 SmallVectorImpl<SDValue> &InVals) const { 4495 4496 SmallVector<CCValAssign, 16> RVLocs; 4497 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4498 *DAG.getContext()); 4499 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4500 4501 // Copy all of the result registers out of their specified physreg. 4502 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4503 CCValAssign &VA = RVLocs[i]; 4504 assert(VA.isRegLoc() && "Can only return in registers!"); 4505 4506 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4507 VA.getLocReg(), VA.getLocVT(), InFlag); 4508 Chain = Val.getValue(1); 4509 InFlag = Val.getValue(2); 4510 4511 switch (VA.getLocInfo()) { 4512 default: llvm_unreachable("Unknown loc info!"); 4513 case CCValAssign::Full: break; 4514 case CCValAssign::AExt: 4515 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4516 break; 4517 case CCValAssign::ZExt: 4518 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4519 DAG.getValueType(VA.getValVT())); 4520 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4521 break; 4522 case CCValAssign::SExt: 4523 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4524 DAG.getValueType(VA.getValVT())); 4525 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4526 break; 4527 } 4528 4529 InVals.push_back(Val); 4530 } 4531 4532 return Chain; 4533 } 4534 4535 SDValue 4536 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 4537 bool isTailCall, bool isVarArg, bool IsPatchPoint, 4538 bool hasNest, SelectionDAG &DAG, 4539 SmallVector<std::pair<unsigned, SDValue>, 8> 4540 &RegsToPass, 4541 SDValue InFlag, SDValue Chain, 4542 SDValue CallSeqStart, SDValue &Callee, 4543 int SPDiff, unsigned NumBytes, 4544 const SmallVectorImpl<ISD::InputArg> &Ins, 4545 SmallVectorImpl<SDValue> &InVals, 4546 ImmutableCallSite *CS) const { 4547 4548 std::vector<EVT> NodeTys; 4549 SmallVector<SDValue, 8> Ops; 4550 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4551 SPDiff, isTailCall, IsPatchPoint, hasNest, 4552 RegsToPass, Ops, NodeTys, CS, Subtarget); 4553 4554 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4555 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4556 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4557 4558 // When performing tail call optimization the callee pops its arguments off 4559 // the stack. Account for this here so these bytes can be pushed back on in 4560 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4561 int BytesCalleePops = 4562 (CallConv == CallingConv::Fast && 4563 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4564 4565 // Add a register mask operand representing the call-preserved registers. 4566 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4567 const uint32_t *Mask = 4568 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4569 assert(Mask && "Missing call preserved mask for calling convention"); 4570 Ops.push_back(DAG.getRegisterMask(Mask)); 4571 4572 if (InFlag.getNode()) 4573 Ops.push_back(InFlag); 4574 4575 // Emit tail call. 4576 if (isTailCall) { 4577 assert(((Callee.getOpcode() == ISD::Register && 4578 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4579 Callee.getOpcode() == ISD::TargetExternalSymbol || 4580 Callee.getOpcode() == ISD::TargetGlobalAddress || 4581 isa<ConstantSDNode>(Callee)) && 4582 "Expecting an global address, external symbol, absolute value or register"); 4583 4584 DAG.getMachineFunction().getFrameInfo()->setHasTailCall(); 4585 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4586 } 4587 4588 // Add a NOP immediately after the branch instruction when using the 64-bit 4589 // SVR4 ABI. At link time, if caller and callee are in a different module and 4590 // thus have a different TOC, the call will be replaced with a call to a stub 4591 // function which saves the current TOC, loads the TOC of the callee and 4592 // branches to the callee. The NOP will be replaced with a load instruction 4593 // which restores the TOC of the caller from the TOC save slot of the current 4594 // stack frame. If caller and callee belong to the same module (and have the 4595 // same TOC), the NOP will remain unchanged. 4596 4597 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4598 !IsPatchPoint) { 4599 if (CallOpc == PPCISD::BCTRL) { 4600 // This is a call through a function pointer. 4601 // Restore the caller TOC from the save area into R2. 4602 // See PrepareCall() for more information about calls through function 4603 // pointers in the 64-bit SVR4 ABI. 4604 // We are using a target-specific load with r2 hard coded, because the 4605 // result of a target-independent load would never go directly into r2, 4606 // since r2 is a reserved register (which prevents the register allocator 4607 // from allocating it), resulting in an additional register being 4608 // allocated and an unnecessary move instruction being generated. 4609 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4610 4611 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4612 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4613 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4614 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4615 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4616 4617 // The address needs to go after the chain input but before the flag (or 4618 // any other variadic arguments). 4619 Ops.insert(std::next(Ops.begin()), AddTOC); 4620 } else if ((CallOpc == PPCISD::CALL) && 4621 (!isLocalCall(Callee) || 4622 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) 4623 // Otherwise insert NOP for non-local calls. 4624 CallOpc = PPCISD::CALL_NOP; 4625 } 4626 4627 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4628 InFlag = Chain.getValue(1); 4629 4630 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4631 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4632 InFlag, dl); 4633 if (!Ins.empty()) 4634 InFlag = Chain.getValue(1); 4635 4636 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4637 Ins, dl, DAG, InVals); 4638 } 4639 4640 SDValue 4641 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4642 SmallVectorImpl<SDValue> &InVals) const { 4643 SelectionDAG &DAG = CLI.DAG; 4644 SDLoc &dl = CLI.DL; 4645 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4646 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4647 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4648 SDValue Chain = CLI.Chain; 4649 SDValue Callee = CLI.Callee; 4650 bool &isTailCall = CLI.IsTailCall; 4651 CallingConv::ID CallConv = CLI.CallConv; 4652 bool isVarArg = CLI.IsVarArg; 4653 bool IsPatchPoint = CLI.IsPatchPoint; 4654 ImmutableCallSite *CS = CLI.CS; 4655 4656 if (isTailCall) { 4657 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 4658 isTailCall = 4659 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 4660 isVarArg, Outs, Ins, DAG); 4661 else 4662 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4663 Ins, DAG); 4664 if (isTailCall) { 4665 ++NumTailCalls; 4666 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4667 ++NumSiblingCalls; 4668 4669 assert(isa<GlobalAddressSDNode>(Callee) && 4670 "Callee should be an llvm::Function object."); 4671 DEBUG( 4672 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 4673 const unsigned Width = 80 - strlen("TCO caller: ") 4674 - strlen(", callee linkage: 0, 0"); 4675 dbgs() << "TCO caller: " 4676 << left_justify(DAG.getMachineFunction().getName(), Width) 4677 << ", callee linkage: " 4678 << GV->getVisibility() << ", " << GV->getLinkage() << "\n" 4679 ); 4680 } 4681 } 4682 4683 if (!isTailCall && CS && CS->isMustTailCall()) 4684 report_fatal_error("failed to perform tail call elimination on a call " 4685 "site marked musttail"); 4686 4687 if (Subtarget.isSVR4ABI()) { 4688 if (Subtarget.isPPC64()) 4689 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4690 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4691 dl, DAG, InVals, CS); 4692 else 4693 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4694 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4695 dl, DAG, InVals, CS); 4696 } 4697 4698 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4699 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4700 dl, DAG, InVals, CS); 4701 } 4702 4703 SDValue 4704 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 4705 CallingConv::ID CallConv, bool isVarArg, 4706 bool isTailCall, bool IsPatchPoint, 4707 const SmallVectorImpl<ISD::OutputArg> &Outs, 4708 const SmallVectorImpl<SDValue> &OutVals, 4709 const SmallVectorImpl<ISD::InputArg> &Ins, 4710 SDLoc dl, SelectionDAG &DAG, 4711 SmallVectorImpl<SDValue> &InVals, 4712 ImmutableCallSite *CS) const { 4713 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4714 // of the 32-bit SVR4 ABI stack frame layout. 4715 4716 assert((CallConv == CallingConv::C || 4717 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4718 4719 unsigned PtrByteSize = 4; 4720 4721 MachineFunction &MF = DAG.getMachineFunction(); 4722 4723 // Mark this function as potentially containing a function that contains a 4724 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4725 // and restoring the callers stack pointer in this functions epilog. This is 4726 // done because by tail calling the called function might overwrite the value 4727 // in this function's (MF) stack pointer stack slot 0(SP). 4728 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4729 CallConv == CallingConv::Fast) 4730 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4731 4732 // Count how many bytes are to be pushed on the stack, including the linkage 4733 // area, parameter list area and the part of the local variable space which 4734 // contains copies of aggregates which are passed by value. 4735 4736 // Assign locations to all of the outgoing arguments. 4737 SmallVector<CCValAssign, 16> ArgLocs; 4738 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4739 *DAG.getContext()); 4740 4741 // Reserve space for the linkage area on the stack. 4742 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4743 PtrByteSize); 4744 4745 if (isVarArg) { 4746 // Handle fixed and variable vector arguments differently. 4747 // Fixed vector arguments go into registers as long as registers are 4748 // available. Variable vector arguments always go into memory. 4749 unsigned NumArgs = Outs.size(); 4750 4751 for (unsigned i = 0; i != NumArgs; ++i) { 4752 MVT ArgVT = Outs[i].VT; 4753 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4754 bool Result; 4755 4756 if (Outs[i].IsFixed) { 4757 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4758 CCInfo); 4759 } else { 4760 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4761 ArgFlags, CCInfo); 4762 } 4763 4764 if (Result) { 4765 #ifndef NDEBUG 4766 errs() << "Call operand #" << i << " has unhandled type " 4767 << EVT(ArgVT).getEVTString() << "\n"; 4768 #endif 4769 llvm_unreachable(nullptr); 4770 } 4771 } 4772 } else { 4773 // All arguments are treated the same. 4774 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4775 } 4776 4777 // Assign locations to all of the outgoing aggregate by value arguments. 4778 SmallVector<CCValAssign, 16> ByValArgLocs; 4779 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4780 ByValArgLocs, *DAG.getContext()); 4781 4782 // Reserve stack space for the allocations in CCInfo. 4783 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4784 4785 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4786 4787 // Size of the linkage area, parameter list area and the part of the local 4788 // space variable where copies of aggregates which are passed by value are 4789 // stored. 4790 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4791 4792 // Calculate by how many bytes the stack has to be adjusted in case of tail 4793 // call optimization. 4794 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4795 4796 // Adjust the stack pointer for the new arguments... 4797 // These operations are automatically eliminated by the prolog/epilog pass 4798 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4799 dl); 4800 SDValue CallSeqStart = Chain; 4801 4802 // Load the return address and frame pointer so it can be moved somewhere else 4803 // later. 4804 SDValue LROp, FPOp; 4805 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 4806 dl); 4807 4808 // Set up a copy of the stack pointer for use loading and storing any 4809 // arguments that may not fit in the registers available for argument 4810 // passing. 4811 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4812 4813 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4814 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4815 SmallVector<SDValue, 8> MemOpChains; 4816 4817 bool seenFloatArg = false; 4818 // Walk the register/memloc assignments, inserting copies/loads. 4819 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4820 i != e; 4821 ++i) { 4822 CCValAssign &VA = ArgLocs[i]; 4823 SDValue Arg = OutVals[i]; 4824 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4825 4826 if (Flags.isByVal()) { 4827 // Argument is an aggregate which is passed by value, thus we need to 4828 // create a copy of it in the local variable space of the current stack 4829 // frame (which is the stack frame of the caller) and pass the address of 4830 // this copy to the callee. 4831 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4832 CCValAssign &ByValVA = ByValArgLocs[j++]; 4833 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4834 4835 // Memory reserved in the local variable space of the callers stack frame. 4836 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4837 4838 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4839 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4840 StackPtr, PtrOff); 4841 4842 // Create a copy of the argument in the local area of the current 4843 // stack frame. 4844 SDValue MemcpyCall = 4845 CreateCopyOfByValArgument(Arg, PtrOff, 4846 CallSeqStart.getNode()->getOperand(0), 4847 Flags, DAG, dl); 4848 4849 // This must go outside the CALLSEQ_START..END. 4850 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4851 CallSeqStart.getNode()->getOperand(1), 4852 SDLoc(MemcpyCall)); 4853 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4854 NewCallSeqStart.getNode()); 4855 Chain = CallSeqStart = NewCallSeqStart; 4856 4857 // Pass the address of the aggregate copy on the stack either in a 4858 // physical register or in the parameter list area of the current stack 4859 // frame to the callee. 4860 Arg = PtrOff; 4861 } 4862 4863 if (VA.isRegLoc()) { 4864 if (Arg.getValueType() == MVT::i1) 4865 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4866 4867 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4868 // Put argument in a physical register. 4869 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4870 } else { 4871 // Put argument in the parameter list area of the current stack frame. 4872 assert(VA.isMemLoc()); 4873 unsigned LocMemOffset = VA.getLocMemOffset(); 4874 4875 if (!isTailCall) { 4876 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4877 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4878 StackPtr, PtrOff); 4879 4880 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4881 MachinePointerInfo(), 4882 false, false, 0)); 4883 } else { 4884 // Calculate and remember argument location. 4885 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4886 TailCallArguments); 4887 } 4888 } 4889 } 4890 4891 if (!MemOpChains.empty()) 4892 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4893 4894 // Build a sequence of copy-to-reg nodes chained together with token chain 4895 // and flag operands which copy the outgoing args into the appropriate regs. 4896 SDValue InFlag; 4897 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4898 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4899 RegsToPass[i].second, InFlag); 4900 InFlag = Chain.getValue(1); 4901 } 4902 4903 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4904 // registers. 4905 if (isVarArg) { 4906 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4907 SDValue Ops[] = { Chain, InFlag }; 4908 4909 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4910 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4911 4912 InFlag = Chain.getValue(1); 4913 } 4914 4915 if (isTailCall) 4916 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 4917 false, TailCallArguments); 4918 4919 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 4920 /* unused except on PPC64 ELFv1 */ false, DAG, 4921 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 4922 NumBytes, Ins, InVals, CS); 4923 } 4924 4925 // Copy an argument into memory, being careful to do this outside the 4926 // call sequence for the call to which the argument belongs. 4927 SDValue 4928 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 4929 SDValue CallSeqStart, 4930 ISD::ArgFlagsTy Flags, 4931 SelectionDAG &DAG, 4932 SDLoc dl) const { 4933 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4934 CallSeqStart.getNode()->getOperand(0), 4935 Flags, DAG, dl); 4936 // The MEMCPY must go outside the CALLSEQ_START..END. 4937 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4938 CallSeqStart.getNode()->getOperand(1), 4939 SDLoc(MemcpyCall)); 4940 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4941 NewCallSeqStart.getNode()); 4942 return NewCallSeqStart; 4943 } 4944 4945 SDValue 4946 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 4947 CallingConv::ID CallConv, bool isVarArg, 4948 bool isTailCall, bool IsPatchPoint, 4949 const SmallVectorImpl<ISD::OutputArg> &Outs, 4950 const SmallVectorImpl<SDValue> &OutVals, 4951 const SmallVectorImpl<ISD::InputArg> &Ins, 4952 SDLoc dl, SelectionDAG &DAG, 4953 SmallVectorImpl<SDValue> &InVals, 4954 ImmutableCallSite *CS) const { 4955 4956 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4957 bool isLittleEndian = Subtarget.isLittleEndian(); 4958 unsigned NumOps = Outs.size(); 4959 bool hasNest = false; 4960 bool IsSibCall = false; 4961 4962 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4963 unsigned PtrByteSize = 8; 4964 4965 MachineFunction &MF = DAG.getMachineFunction(); 4966 4967 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 4968 IsSibCall = true; 4969 4970 // Mark this function as potentially containing a function that contains a 4971 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4972 // and restoring the callers stack pointer in this functions epilog. This is 4973 // done because by tail calling the called function might overwrite the value 4974 // in this function's (MF) stack pointer stack slot 0(SP). 4975 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4976 CallConv == CallingConv::Fast) 4977 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4978 4979 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4980 "fastcc not supported on varargs functions"); 4981 4982 // Count how many bytes are to be pushed on the stack, including the linkage 4983 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 4984 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 4985 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 4986 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4987 unsigned NumBytes = LinkageSize; 4988 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4989 unsigned &QFPR_idx = FPR_idx; 4990 4991 static const MCPhysReg GPR[] = { 4992 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4993 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4994 }; 4995 static const MCPhysReg VR[] = { 4996 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4997 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4998 }; 4999 static const MCPhysReg VSRH[] = { 5000 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 5001 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 5002 }; 5003 5004 const unsigned NumGPRs = array_lengthof(GPR); 5005 const unsigned NumFPRs = 13; 5006 const unsigned NumVRs = array_lengthof(VR); 5007 const unsigned NumQFPRs = NumFPRs; 5008 5009 // When using the fast calling convention, we don't provide backing for 5010 // arguments that will be in registers. 5011 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5012 5013 // Add up all the space actually used. 5014 for (unsigned i = 0; i != NumOps; ++i) { 5015 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5016 EVT ArgVT = Outs[i].VT; 5017 EVT OrigVT = Outs[i].ArgVT; 5018 5019 if (Flags.isNest()) 5020 continue; 5021 5022 if (CallConv == CallingConv::Fast) { 5023 if (Flags.isByVal()) 5024 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5025 else 5026 switch (ArgVT.getSimpleVT().SimpleTy) { 5027 default: llvm_unreachable("Unexpected ValueType for argument!"); 5028 case MVT::i1: 5029 case MVT::i32: 5030 case MVT::i64: 5031 if (++NumGPRsUsed <= NumGPRs) 5032 continue; 5033 break; 5034 case MVT::v4i32: 5035 case MVT::v8i16: 5036 case MVT::v16i8: 5037 case MVT::v2f64: 5038 case MVT::v2i64: 5039 case MVT::v1i128: 5040 if (++NumVRsUsed <= NumVRs) 5041 continue; 5042 break; 5043 case MVT::v4f32: 5044 // When using QPX, this is handled like a FP register, otherwise, it 5045 // is an Altivec register. 5046 if (Subtarget.hasQPX()) { 5047 if (++NumFPRsUsed <= NumFPRs) 5048 continue; 5049 } else { 5050 if (++NumVRsUsed <= NumVRs) 5051 continue; 5052 } 5053 break; 5054 case MVT::f32: 5055 case MVT::f64: 5056 case MVT::v4f64: // QPX 5057 case MVT::v4i1: // QPX 5058 if (++NumFPRsUsed <= NumFPRs) 5059 continue; 5060 break; 5061 } 5062 } 5063 5064 /* Respect alignment of argument on the stack. */ 5065 unsigned Align = 5066 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5067 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5068 5069 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5070 if (Flags.isInConsecutiveRegsLast()) 5071 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5072 } 5073 5074 unsigned NumBytesActuallyUsed = NumBytes; 5075 5076 // The prolog code of the callee may store up to 8 GPR argument registers to 5077 // the stack, allowing va_start to index over them in memory if its varargs. 5078 // Because we cannot tell if this is needed on the caller side, we have to 5079 // conservatively assume that it is needed. As such, make sure we have at 5080 // least enough stack space for the caller to store the 8 GPRs. 5081 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 5082 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5083 5084 // Tail call needs the stack to be aligned. 5085 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5086 CallConv == CallingConv::Fast) 5087 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5088 5089 int SPDiff = 0; 5090 5091 // Calculate by how many bytes the stack has to be adjusted in case of tail 5092 // call optimization. 5093 if (!IsSibCall) 5094 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5095 5096 // To protect arguments on the stack from being clobbered in a tail call, 5097 // force all the loads to happen before doing any other lowering. 5098 if (isTailCall) 5099 Chain = DAG.getStackArgumentTokenFactor(Chain); 5100 5101 // Adjust the stack pointer for the new arguments... 5102 // These operations are automatically eliminated by the prolog/epilog pass 5103 if (!IsSibCall) 5104 Chain = DAG.getCALLSEQ_START(Chain, 5105 DAG.getIntPtrConstant(NumBytes, dl, true), dl); 5106 SDValue CallSeqStart = Chain; 5107 5108 // Load the return address and frame pointer so it can be move somewhere else 5109 // later. 5110 SDValue LROp, FPOp; 5111 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5112 dl); 5113 5114 // Set up a copy of the stack pointer for use loading and storing any 5115 // arguments that may not fit in the registers available for argument 5116 // passing. 5117 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5118 5119 // Figure out which arguments are going to go in registers, and which in 5120 // memory. Also, if this is a vararg function, floating point operations 5121 // must be stored to our stack, and loaded into integer regs as well, if 5122 // any integer regs are available for argument passing. 5123 unsigned ArgOffset = LinkageSize; 5124 5125 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5126 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5127 5128 SmallVector<SDValue, 8> MemOpChains; 5129 for (unsigned i = 0; i != NumOps; ++i) { 5130 SDValue Arg = OutVals[i]; 5131 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5132 EVT ArgVT = Outs[i].VT; 5133 EVT OrigVT = Outs[i].ArgVT; 5134 5135 // PtrOff will be used to store the current argument to the stack if a 5136 // register cannot be found for it. 5137 SDValue PtrOff; 5138 5139 // We re-align the argument offset for each argument, except when using the 5140 // fast calling convention, when we need to make sure we do that only when 5141 // we'll actually use a stack slot. 5142 auto ComputePtrOff = [&]() { 5143 /* Respect alignment of argument on the stack. */ 5144 unsigned Align = 5145 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5146 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5147 5148 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5149 5150 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5151 }; 5152 5153 if (CallConv != CallingConv::Fast) { 5154 ComputePtrOff(); 5155 5156 /* Compute GPR index associated with argument offset. */ 5157 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5158 GPR_idx = std::min(GPR_idx, NumGPRs); 5159 } 5160 5161 // Promote integers to 64-bit values. 5162 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5163 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5164 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5165 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5166 } 5167 5168 // FIXME memcpy is used way more than necessary. Correctness first. 5169 // Note: "by value" is code for passing a structure by value, not 5170 // basic types. 5171 if (Flags.isByVal()) { 5172 // Note: Size includes alignment padding, so 5173 // struct x { short a; char b; } 5174 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5175 // These are the proper values we need for right-justifying the 5176 // aggregate in a parameter register. 5177 unsigned Size = Flags.getByValSize(); 5178 5179 // An empty aggregate parameter takes up no storage and no 5180 // registers. 5181 if (Size == 0) 5182 continue; 5183 5184 if (CallConv == CallingConv::Fast) 5185 ComputePtrOff(); 5186 5187 // All aggregates smaller than 8 bytes must be passed right-justified. 5188 if (Size==1 || Size==2 || Size==4) { 5189 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5190 if (GPR_idx != NumGPRs) { 5191 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5192 MachinePointerInfo(), VT, 5193 false, false, false, 0); 5194 MemOpChains.push_back(Load.getValue(1)); 5195 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5196 5197 ArgOffset += PtrByteSize; 5198 continue; 5199 } 5200 } 5201 5202 if (GPR_idx == NumGPRs && Size < 8) { 5203 SDValue AddPtr = PtrOff; 5204 if (!isLittleEndian) { 5205 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5206 PtrOff.getValueType()); 5207 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5208 } 5209 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5210 CallSeqStart, 5211 Flags, DAG, dl); 5212 ArgOffset += PtrByteSize; 5213 continue; 5214 } 5215 // Copy entire object into memory. There are cases where gcc-generated 5216 // code assumes it is there, even if it could be put entirely into 5217 // registers. (This is not what the doc says.) 5218 5219 // FIXME: The above statement is likely due to a misunderstanding of the 5220 // documents. All arguments must be copied into the parameter area BY 5221 // THE CALLEE in the event that the callee takes the address of any 5222 // formal argument. That has not yet been implemented. However, it is 5223 // reasonable to use the stack area as a staging area for the register 5224 // load. 5225 5226 // Skip this for small aggregates, as we will use the same slot for a 5227 // right-justified copy, below. 5228 if (Size >= 8) 5229 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5230 CallSeqStart, 5231 Flags, DAG, dl); 5232 5233 // When a register is available, pass a small aggregate right-justified. 5234 if (Size < 8 && GPR_idx != NumGPRs) { 5235 // The easiest way to get this right-justified in a register 5236 // is to copy the structure into the rightmost portion of a 5237 // local variable slot, then load the whole slot into the 5238 // register. 5239 // FIXME: The memcpy seems to produce pretty awful code for 5240 // small aggregates, particularly for packed ones. 5241 // FIXME: It would be preferable to use the slot in the 5242 // parameter save area instead of a new local variable. 5243 SDValue AddPtr = PtrOff; 5244 if (!isLittleEndian) { 5245 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5246 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5247 } 5248 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5249 CallSeqStart, 5250 Flags, DAG, dl); 5251 5252 // Load the slot into the register. 5253 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 5254 MachinePointerInfo(), 5255 false, false, false, 0); 5256 MemOpChains.push_back(Load.getValue(1)); 5257 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5258 5259 // Done with this argument. 5260 ArgOffset += PtrByteSize; 5261 continue; 5262 } 5263 5264 // For aggregates larger than PtrByteSize, copy the pieces of the 5265 // object that fit into registers from the parameter save area. 5266 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5267 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5268 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5269 if (GPR_idx != NumGPRs) { 5270 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5271 MachinePointerInfo(), 5272 false, false, false, 0); 5273 MemOpChains.push_back(Load.getValue(1)); 5274 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5275 ArgOffset += PtrByteSize; 5276 } else { 5277 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5278 break; 5279 } 5280 } 5281 continue; 5282 } 5283 5284 switch (Arg.getSimpleValueType().SimpleTy) { 5285 default: llvm_unreachable("Unexpected ValueType for argument!"); 5286 case MVT::i1: 5287 case MVT::i32: 5288 case MVT::i64: 5289 if (Flags.isNest()) { 5290 // The 'nest' parameter, if any, is passed in R11. 5291 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5292 hasNest = true; 5293 break; 5294 } 5295 5296 // These can be scalar arguments or elements of an integer array type 5297 // passed directly. Clang may use those instead of "byval" aggregate 5298 // types to avoid forcing arguments to memory unnecessarily. 5299 if (GPR_idx != NumGPRs) { 5300 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5301 } else { 5302 if (CallConv == CallingConv::Fast) 5303 ComputePtrOff(); 5304 5305 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5306 true, isTailCall, false, MemOpChains, 5307 TailCallArguments, dl); 5308 if (CallConv == CallingConv::Fast) 5309 ArgOffset += PtrByteSize; 5310 } 5311 if (CallConv != CallingConv::Fast) 5312 ArgOffset += PtrByteSize; 5313 break; 5314 case MVT::f32: 5315 case MVT::f64: { 5316 // These can be scalar arguments or elements of a float array type 5317 // passed directly. The latter are used to implement ELFv2 homogenous 5318 // float aggregates. 5319 5320 // Named arguments go into FPRs first, and once they overflow, the 5321 // remaining arguments go into GPRs and then the parameter save area. 5322 // Unnamed arguments for vararg functions always go to GPRs and 5323 // then the parameter save area. For now, put all arguments to vararg 5324 // routines always in both locations (FPR *and* GPR or stack slot). 5325 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5326 bool NeededLoad = false; 5327 5328 // First load the argument into the next available FPR. 5329 if (FPR_idx != NumFPRs) 5330 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5331 5332 // Next, load the argument into GPR or stack slot if needed. 5333 if (!NeedGPROrStack) 5334 ; 5335 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5336 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5337 // once we support fp <-> gpr moves. 5338 5339 // In the non-vararg case, this can only ever happen in the 5340 // presence of f32 array types, since otherwise we never run 5341 // out of FPRs before running out of GPRs. 5342 SDValue ArgVal; 5343 5344 // Double values are always passed in a single GPR. 5345 if (Arg.getValueType() != MVT::f32) { 5346 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5347 5348 // Non-array float values are extended and passed in a GPR. 5349 } else if (!Flags.isInConsecutiveRegs()) { 5350 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5351 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5352 5353 // If we have an array of floats, we collect every odd element 5354 // together with its predecessor into one GPR. 5355 } else if (ArgOffset % PtrByteSize != 0) { 5356 SDValue Lo, Hi; 5357 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5358 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5359 if (!isLittleEndian) 5360 std::swap(Lo, Hi); 5361 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5362 5363 // The final element, if even, goes into the first half of a GPR. 5364 } else if (Flags.isInConsecutiveRegsLast()) { 5365 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5366 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5367 if (!isLittleEndian) 5368 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5369 DAG.getConstant(32, dl, MVT::i32)); 5370 5371 // Non-final even elements are skipped; they will be handled 5372 // together the with subsequent argument on the next go-around. 5373 } else 5374 ArgVal = SDValue(); 5375 5376 if (ArgVal.getNode()) 5377 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5378 } else { 5379 if (CallConv == CallingConv::Fast) 5380 ComputePtrOff(); 5381 5382 // Single-precision floating-point values are mapped to the 5383 // second (rightmost) word of the stack doubleword. 5384 if (Arg.getValueType() == MVT::f32 && 5385 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5386 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5387 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5388 } 5389 5390 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5391 true, isTailCall, false, MemOpChains, 5392 TailCallArguments, dl); 5393 5394 NeededLoad = true; 5395 } 5396 // When passing an array of floats, the array occupies consecutive 5397 // space in the argument area; only round up to the next doubleword 5398 // at the end of the array. Otherwise, each float takes 8 bytes. 5399 if (CallConv != CallingConv::Fast || NeededLoad) { 5400 ArgOffset += (Arg.getValueType() == MVT::f32 && 5401 Flags.isInConsecutiveRegs()) ? 4 : 8; 5402 if (Flags.isInConsecutiveRegsLast()) 5403 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5404 } 5405 break; 5406 } 5407 case MVT::v4f32: 5408 case MVT::v4i32: 5409 case MVT::v8i16: 5410 case MVT::v16i8: 5411 case MVT::v2f64: 5412 case MVT::v2i64: 5413 case MVT::v1i128: 5414 if (!Subtarget.hasQPX()) { 5415 // These can be scalar arguments or elements of a vector array type 5416 // passed directly. The latter are used to implement ELFv2 homogenous 5417 // vector aggregates. 5418 5419 // For a varargs call, named arguments go into VRs or on the stack as 5420 // usual; unnamed arguments always go to the stack or the corresponding 5421 // GPRs when within range. For now, we always put the value in both 5422 // locations (or even all three). 5423 if (isVarArg) { 5424 // We could elide this store in the case where the object fits 5425 // entirely in R registers. Maybe later. 5426 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5427 MachinePointerInfo(), false, false, 0); 5428 MemOpChains.push_back(Store); 5429 if (VR_idx != NumVRs) { 5430 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5431 MachinePointerInfo(), 5432 false, false, false, 0); 5433 MemOpChains.push_back(Load.getValue(1)); 5434 5435 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5436 Arg.getSimpleValueType() == MVT::v2i64) ? 5437 VSRH[VR_idx] : VR[VR_idx]; 5438 ++VR_idx; 5439 5440 RegsToPass.push_back(std::make_pair(VReg, Load)); 5441 } 5442 ArgOffset += 16; 5443 for (unsigned i=0; i<16; i+=PtrByteSize) { 5444 if (GPR_idx == NumGPRs) 5445 break; 5446 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5447 DAG.getConstant(i, dl, PtrVT)); 5448 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5449 false, false, false, 0); 5450 MemOpChains.push_back(Load.getValue(1)); 5451 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5452 } 5453 break; 5454 } 5455 5456 // Non-varargs Altivec params go into VRs or on the stack. 5457 if (VR_idx != NumVRs) { 5458 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5459 Arg.getSimpleValueType() == MVT::v2i64) ? 5460 VSRH[VR_idx] : VR[VR_idx]; 5461 ++VR_idx; 5462 5463 RegsToPass.push_back(std::make_pair(VReg, Arg)); 5464 } else { 5465 if (CallConv == CallingConv::Fast) 5466 ComputePtrOff(); 5467 5468 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5469 true, isTailCall, true, MemOpChains, 5470 TailCallArguments, dl); 5471 if (CallConv == CallingConv::Fast) 5472 ArgOffset += 16; 5473 } 5474 5475 if (CallConv != CallingConv::Fast) 5476 ArgOffset += 16; 5477 break; 5478 } // not QPX 5479 5480 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5481 "Invalid QPX parameter type"); 5482 5483 /* fall through */ 5484 case MVT::v4f64: 5485 case MVT::v4i1: { 5486 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5487 if (isVarArg) { 5488 // We could elide this store in the case where the object fits 5489 // entirely in R registers. Maybe later. 5490 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5491 MachinePointerInfo(), false, false, 0); 5492 MemOpChains.push_back(Store); 5493 if (QFPR_idx != NumQFPRs) { 5494 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, 5495 Store, PtrOff, MachinePointerInfo(), 5496 false, false, false, 0); 5497 MemOpChains.push_back(Load.getValue(1)); 5498 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5499 } 5500 ArgOffset += (IsF32 ? 16 : 32); 5501 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5502 if (GPR_idx == NumGPRs) 5503 break; 5504 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5505 DAG.getConstant(i, dl, PtrVT)); 5506 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5507 false, false, false, 0); 5508 MemOpChains.push_back(Load.getValue(1)); 5509 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5510 } 5511 break; 5512 } 5513 5514 // Non-varargs QPX params go into registers or on the stack. 5515 if (QFPR_idx != NumQFPRs) { 5516 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5517 } else { 5518 if (CallConv == CallingConv::Fast) 5519 ComputePtrOff(); 5520 5521 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5522 true, isTailCall, true, MemOpChains, 5523 TailCallArguments, dl); 5524 if (CallConv == CallingConv::Fast) 5525 ArgOffset += (IsF32 ? 16 : 32); 5526 } 5527 5528 if (CallConv != CallingConv::Fast) 5529 ArgOffset += (IsF32 ? 16 : 32); 5530 break; 5531 } 5532 } 5533 } 5534 5535 assert(NumBytesActuallyUsed == ArgOffset); 5536 (void)NumBytesActuallyUsed; 5537 5538 if (!MemOpChains.empty()) 5539 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5540 5541 // Check if this is an indirect call (MTCTR/BCTRL). 5542 // See PrepareCall() for more information about calls through function 5543 // pointers in the 64-bit SVR4 ABI. 5544 if (!isTailCall && !IsPatchPoint && 5545 !isFunctionGlobalAddress(Callee) && 5546 !isa<ExternalSymbolSDNode>(Callee)) { 5547 // Load r2 into a virtual register and store it to the TOC save area. 5548 setUsesTOCBasePtr(DAG); 5549 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5550 // TOC save area offset. 5551 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5552 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5553 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5554 Chain = DAG.getStore( 5555 Val.getValue(1), dl, Val, AddPtr, 5556 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset), 5557 false, false, 0); 5558 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5559 // This does not mean the MTCTR instruction must use R12; it's easier 5560 // to model this as an extra parameter, so do that. 5561 if (isELFv2ABI && !IsPatchPoint) 5562 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5563 } 5564 5565 // Build a sequence of copy-to-reg nodes chained together with token chain 5566 // and flag operands which copy the outgoing args into the appropriate regs. 5567 SDValue InFlag; 5568 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5569 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5570 RegsToPass[i].second, InFlag); 5571 InFlag = Chain.getValue(1); 5572 } 5573 5574 if (isTailCall && !IsSibCall) 5575 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 5576 FPOp, true, TailCallArguments); 5577 5578 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, hasNest, 5579 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 5580 SPDiff, NumBytes, Ins, InVals, CS); 5581 } 5582 5583 SDValue 5584 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 5585 CallingConv::ID CallConv, bool isVarArg, 5586 bool isTailCall, bool IsPatchPoint, 5587 const SmallVectorImpl<ISD::OutputArg> &Outs, 5588 const SmallVectorImpl<SDValue> &OutVals, 5589 const SmallVectorImpl<ISD::InputArg> &Ins, 5590 SDLoc dl, SelectionDAG &DAG, 5591 SmallVectorImpl<SDValue> &InVals, 5592 ImmutableCallSite *CS) const { 5593 5594 unsigned NumOps = Outs.size(); 5595 5596 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5597 bool isPPC64 = PtrVT == MVT::i64; 5598 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5599 5600 MachineFunction &MF = DAG.getMachineFunction(); 5601 5602 // Mark this function as potentially containing a function that contains a 5603 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5604 // and restoring the callers stack pointer in this functions epilog. This is 5605 // done because by tail calling the called function might overwrite the value 5606 // in this function's (MF) stack pointer stack slot 0(SP). 5607 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5608 CallConv == CallingConv::Fast) 5609 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5610 5611 // Count how many bytes are to be pushed on the stack, including the linkage 5612 // area, and parameter passing area. We start with 24/48 bytes, which is 5613 // prereserved space for [SP][CR][LR][3 x unused]. 5614 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5615 unsigned NumBytes = LinkageSize; 5616 5617 // Add up all the space actually used. 5618 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5619 // they all go in registers, but we must reserve stack space for them for 5620 // possible use by the caller. In varargs or 64-bit calls, parameters are 5621 // assigned stack space in order, with padding so Altivec parameters are 5622 // 16-byte aligned. 5623 unsigned nAltivecParamsAtEnd = 0; 5624 for (unsigned i = 0; i != NumOps; ++i) { 5625 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5626 EVT ArgVT = Outs[i].VT; 5627 // Varargs Altivec parameters are padded to a 16 byte boundary. 5628 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5629 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5630 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5631 if (!isVarArg && !isPPC64) { 5632 // Non-varargs Altivec parameters go after all the non-Altivec 5633 // parameters; handle those later so we know how much padding we need. 5634 nAltivecParamsAtEnd++; 5635 continue; 5636 } 5637 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5638 NumBytes = ((NumBytes+15)/16)*16; 5639 } 5640 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5641 } 5642 5643 // Allow for Altivec parameters at the end, if needed. 5644 if (nAltivecParamsAtEnd) { 5645 NumBytes = ((NumBytes+15)/16)*16; 5646 NumBytes += 16*nAltivecParamsAtEnd; 5647 } 5648 5649 // The prolog code of the callee may store up to 8 GPR argument registers to 5650 // the stack, allowing va_start to index over them in memory if its varargs. 5651 // Because we cannot tell if this is needed on the caller side, we have to 5652 // conservatively assume that it is needed. As such, make sure we have at 5653 // least enough stack space for the caller to store the 8 GPRs. 5654 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5655 5656 // Tail call needs the stack to be aligned. 5657 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5658 CallConv == CallingConv::Fast) 5659 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5660 5661 // Calculate by how many bytes the stack has to be adjusted in case of tail 5662 // call optimization. 5663 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5664 5665 // To protect arguments on the stack from being clobbered in a tail call, 5666 // force all the loads to happen before doing any other lowering. 5667 if (isTailCall) 5668 Chain = DAG.getStackArgumentTokenFactor(Chain); 5669 5670 // Adjust the stack pointer for the new arguments... 5671 // These operations are automatically eliminated by the prolog/epilog pass 5672 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5673 dl); 5674 SDValue CallSeqStart = Chain; 5675 5676 // Load the return address and frame pointer so it can be move somewhere else 5677 // later. 5678 SDValue LROp, FPOp; 5679 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5680 dl); 5681 5682 // Set up a copy of the stack pointer for use loading and storing any 5683 // arguments that may not fit in the registers available for argument 5684 // passing. 5685 SDValue StackPtr; 5686 if (isPPC64) 5687 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5688 else 5689 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5690 5691 // Figure out which arguments are going to go in registers, and which in 5692 // memory. Also, if this is a vararg function, floating point operations 5693 // must be stored to our stack, and loaded into integer regs as well, if 5694 // any integer regs are available for argument passing. 5695 unsigned ArgOffset = LinkageSize; 5696 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5697 5698 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5699 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5700 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5701 }; 5702 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5703 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5704 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5705 }; 5706 static const MCPhysReg VR[] = { 5707 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5708 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5709 }; 5710 const unsigned NumGPRs = array_lengthof(GPR_32); 5711 const unsigned NumFPRs = 13; 5712 const unsigned NumVRs = array_lengthof(VR); 5713 5714 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5715 5716 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5717 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5718 5719 SmallVector<SDValue, 8> MemOpChains; 5720 for (unsigned i = 0; i != NumOps; ++i) { 5721 SDValue Arg = OutVals[i]; 5722 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5723 5724 // PtrOff will be used to store the current argument to the stack if a 5725 // register cannot be found for it. 5726 SDValue PtrOff; 5727 5728 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5729 5730 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5731 5732 // On PPC64, promote integers to 64-bit values. 5733 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5734 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5735 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5736 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5737 } 5738 5739 // FIXME memcpy is used way more than necessary. Correctness first. 5740 // Note: "by value" is code for passing a structure by value, not 5741 // basic types. 5742 if (Flags.isByVal()) { 5743 unsigned Size = Flags.getByValSize(); 5744 // Very small objects are passed right-justified. Everything else is 5745 // passed left-justified. 5746 if (Size==1 || Size==2) { 5747 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5748 if (GPR_idx != NumGPRs) { 5749 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5750 MachinePointerInfo(), VT, 5751 false, false, false, 0); 5752 MemOpChains.push_back(Load.getValue(1)); 5753 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5754 5755 ArgOffset += PtrByteSize; 5756 } else { 5757 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5758 PtrOff.getValueType()); 5759 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5760 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5761 CallSeqStart, 5762 Flags, DAG, dl); 5763 ArgOffset += PtrByteSize; 5764 } 5765 continue; 5766 } 5767 // Copy entire object into memory. There are cases where gcc-generated 5768 // code assumes it is there, even if it could be put entirely into 5769 // registers. (This is not what the doc says.) 5770 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5771 CallSeqStart, 5772 Flags, DAG, dl); 5773 5774 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5775 // copy the pieces of the object that fit into registers from the 5776 // parameter save area. 5777 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5778 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5779 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5780 if (GPR_idx != NumGPRs) { 5781 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5782 MachinePointerInfo(), 5783 false, false, false, 0); 5784 MemOpChains.push_back(Load.getValue(1)); 5785 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5786 ArgOffset += PtrByteSize; 5787 } else { 5788 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5789 break; 5790 } 5791 } 5792 continue; 5793 } 5794 5795 switch (Arg.getSimpleValueType().SimpleTy) { 5796 default: llvm_unreachable("Unexpected ValueType for argument!"); 5797 case MVT::i1: 5798 case MVT::i32: 5799 case MVT::i64: 5800 if (GPR_idx != NumGPRs) { 5801 if (Arg.getValueType() == MVT::i1) 5802 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5803 5804 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5805 } else { 5806 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5807 isPPC64, isTailCall, false, MemOpChains, 5808 TailCallArguments, dl); 5809 } 5810 ArgOffset += PtrByteSize; 5811 break; 5812 case MVT::f32: 5813 case MVT::f64: 5814 if (FPR_idx != NumFPRs) { 5815 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5816 5817 if (isVarArg) { 5818 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5819 MachinePointerInfo(), false, false, 0); 5820 MemOpChains.push_back(Store); 5821 5822 // Float varargs are always shadowed in available integer registers 5823 if (GPR_idx != NumGPRs) { 5824 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5825 MachinePointerInfo(), false, false, 5826 false, 0); 5827 MemOpChains.push_back(Load.getValue(1)); 5828 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5829 } 5830 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5831 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5832 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5833 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5834 MachinePointerInfo(), 5835 false, false, false, 0); 5836 MemOpChains.push_back(Load.getValue(1)); 5837 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5838 } 5839 } else { 5840 // If we have any FPRs remaining, we may also have GPRs remaining. 5841 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5842 // GPRs. 5843 if (GPR_idx != NumGPRs) 5844 ++GPR_idx; 5845 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 5846 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 5847 ++GPR_idx; 5848 } 5849 } else 5850 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5851 isPPC64, isTailCall, false, MemOpChains, 5852 TailCallArguments, dl); 5853 if (isPPC64) 5854 ArgOffset += 8; 5855 else 5856 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 5857 break; 5858 case MVT::v4f32: 5859 case MVT::v4i32: 5860 case MVT::v8i16: 5861 case MVT::v16i8: 5862 if (isVarArg) { 5863 // These go aligned on the stack, or in the corresponding R registers 5864 // when within range. The Darwin PPC ABI doc claims they also go in 5865 // V registers; in fact gcc does this only for arguments that are 5866 // prototyped, not for those that match the ... We do it for all 5867 // arguments, seems to work. 5868 while (ArgOffset % 16 !=0) { 5869 ArgOffset += PtrByteSize; 5870 if (GPR_idx != NumGPRs) 5871 GPR_idx++; 5872 } 5873 // We could elide this store in the case where the object fits 5874 // entirely in R registers. Maybe later. 5875 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5876 DAG.getConstant(ArgOffset, dl, PtrVT)); 5877 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5878 MachinePointerInfo(), false, false, 0); 5879 MemOpChains.push_back(Store); 5880 if (VR_idx != NumVRs) { 5881 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5882 MachinePointerInfo(), 5883 false, false, false, 0); 5884 MemOpChains.push_back(Load.getValue(1)); 5885 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5886 } 5887 ArgOffset += 16; 5888 for (unsigned i=0; i<16; i+=PtrByteSize) { 5889 if (GPR_idx == NumGPRs) 5890 break; 5891 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5892 DAG.getConstant(i, dl, PtrVT)); 5893 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5894 false, false, false, 0); 5895 MemOpChains.push_back(Load.getValue(1)); 5896 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5897 } 5898 break; 5899 } 5900 5901 // Non-varargs Altivec params generally go in registers, but have 5902 // stack space allocated at the end. 5903 if (VR_idx != NumVRs) { 5904 // Doesn't have GPR space allocated. 5905 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5906 } else if (nAltivecParamsAtEnd==0) { 5907 // We are emitting Altivec params in order. 5908 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5909 isPPC64, isTailCall, true, MemOpChains, 5910 TailCallArguments, dl); 5911 ArgOffset += 16; 5912 } 5913 break; 5914 } 5915 } 5916 // If all Altivec parameters fit in registers, as they usually do, 5917 // they get stack space following the non-Altivec parameters. We 5918 // don't track this here because nobody below needs it. 5919 // If there are more Altivec parameters than fit in registers emit 5920 // the stores here. 5921 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5922 unsigned j = 0; 5923 // Offset is aligned; skip 1st 12 params which go in V registers. 5924 ArgOffset = ((ArgOffset+15)/16)*16; 5925 ArgOffset += 12*16; 5926 for (unsigned i = 0; i != NumOps; ++i) { 5927 SDValue Arg = OutVals[i]; 5928 EVT ArgType = Outs[i].VT; 5929 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5930 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5931 if (++j > NumVRs) { 5932 SDValue PtrOff; 5933 // We are emitting Altivec params in order. 5934 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5935 isPPC64, isTailCall, true, MemOpChains, 5936 TailCallArguments, dl); 5937 ArgOffset += 16; 5938 } 5939 } 5940 } 5941 } 5942 5943 if (!MemOpChains.empty()) 5944 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5945 5946 // On Darwin, R12 must contain the address of an indirect callee. This does 5947 // not mean the MTCTR instruction must use R12; it's easier to model this as 5948 // an extra parameter, so do that. 5949 if (!isTailCall && 5950 !isFunctionGlobalAddress(Callee) && 5951 !isa<ExternalSymbolSDNode>(Callee) && 5952 !isBLACompatibleAddress(Callee, DAG)) 5953 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5954 PPC::R12), Callee)); 5955 5956 // Build a sequence of copy-to-reg nodes chained together with token chain 5957 // and flag operands which copy the outgoing args into the appropriate regs. 5958 SDValue InFlag; 5959 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5960 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5961 RegsToPass[i].second, InFlag); 5962 InFlag = Chain.getValue(1); 5963 } 5964 5965 if (isTailCall) 5966 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 5967 FPOp, true, TailCallArguments); 5968 5969 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5970 /* unused except on PPC64 ELFv1 */ false, DAG, 5971 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5972 NumBytes, Ins, InVals, CS); 5973 } 5974 5975 bool 5976 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5977 MachineFunction &MF, bool isVarArg, 5978 const SmallVectorImpl<ISD::OutputArg> &Outs, 5979 LLVMContext &Context) const { 5980 SmallVector<CCValAssign, 16> RVLocs; 5981 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 5982 return CCInfo.CheckReturn(Outs, RetCC_PPC); 5983 } 5984 5985 SDValue 5986 PPCTargetLowering::LowerReturn(SDValue Chain, 5987 CallingConv::ID CallConv, bool isVarArg, 5988 const SmallVectorImpl<ISD::OutputArg> &Outs, 5989 const SmallVectorImpl<SDValue> &OutVals, 5990 SDLoc dl, SelectionDAG &DAG) const { 5991 5992 SmallVector<CCValAssign, 16> RVLocs; 5993 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5994 *DAG.getContext()); 5995 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 5996 5997 SDValue Flag; 5998 SmallVector<SDValue, 4> RetOps(1, Chain); 5999 6000 // Copy the result values into the output registers. 6001 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6002 CCValAssign &VA = RVLocs[i]; 6003 assert(VA.isRegLoc() && "Can only return in registers!"); 6004 6005 SDValue Arg = OutVals[i]; 6006 6007 switch (VA.getLocInfo()) { 6008 default: llvm_unreachable("Unknown loc info!"); 6009 case CCValAssign::Full: break; 6010 case CCValAssign::AExt: 6011 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6012 break; 6013 case CCValAssign::ZExt: 6014 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6015 break; 6016 case CCValAssign::SExt: 6017 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6018 break; 6019 } 6020 6021 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6022 Flag = Chain.getValue(1); 6023 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6024 } 6025 6026 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6027 const MCPhysReg *I = 6028 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6029 if (I) { 6030 for (; *I; ++I) { 6031 6032 if (PPC::G8RCRegClass.contains(*I)) 6033 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6034 else if (PPC::F8RCRegClass.contains(*I)) 6035 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6036 else if (PPC::CRRCRegClass.contains(*I)) 6037 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6038 else if (PPC::VRRCRegClass.contains(*I)) 6039 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6040 else 6041 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6042 } 6043 } 6044 6045 RetOps[0] = Chain; // Update chain. 6046 6047 // Add the flag if we have it. 6048 if (Flag.getNode()) 6049 RetOps.push_back(Flag); 6050 6051 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6052 } 6053 6054 SDValue PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET( 6055 SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) const { 6056 SDLoc dl(Op); 6057 6058 // Get the corect type for integers. 6059 EVT IntVT = Op.getValueType(); 6060 6061 // Get the inputs. 6062 SDValue Chain = Op.getOperand(0); 6063 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6064 // Build a DYNAREAOFFSET node. 6065 SDValue Ops[2] = {Chain, FPSIdx}; 6066 SDVTList VTs = DAG.getVTList(IntVT); 6067 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6068 } 6069 6070 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 6071 const PPCSubtarget &Subtarget) const { 6072 // When we pop the dynamic allocation we need to restore the SP link. 6073 SDLoc dl(Op); 6074 6075 // Get the corect type for pointers. 6076 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6077 6078 // Construct the stack pointer operand. 6079 bool isPPC64 = Subtarget.isPPC64(); 6080 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6081 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6082 6083 // Get the operands for the STACKRESTORE. 6084 SDValue Chain = Op.getOperand(0); 6085 SDValue SaveSP = Op.getOperand(1); 6086 6087 // Load the old link SP. 6088 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 6089 MachinePointerInfo(), 6090 false, false, false, 0); 6091 6092 // Restore the stack pointer. 6093 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6094 6095 // Store the old link SP. 6096 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 6097 false, false, 0); 6098 } 6099 6100 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6101 MachineFunction &MF = DAG.getMachineFunction(); 6102 bool isPPC64 = Subtarget.isPPC64(); 6103 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6104 6105 // Get current frame pointer save index. The users of this index will be 6106 // primarily DYNALLOC instructions. 6107 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6108 int RASI = FI->getReturnAddrSaveIndex(); 6109 6110 // If the frame pointer save index hasn't been defined yet. 6111 if (!RASI) { 6112 // Find out what the fix offset of the frame pointer save area. 6113 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6114 // Allocate the frame index for frame pointer save area. 6115 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6116 // Save the result. 6117 FI->setReturnAddrSaveIndex(RASI); 6118 } 6119 return DAG.getFrameIndex(RASI, PtrVT); 6120 } 6121 6122 SDValue 6123 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6124 MachineFunction &MF = DAG.getMachineFunction(); 6125 bool isPPC64 = Subtarget.isPPC64(); 6126 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6127 6128 // Get current frame pointer save index. The users of this index will be 6129 // primarily DYNALLOC instructions. 6130 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6131 int FPSI = FI->getFramePointerSaveIndex(); 6132 6133 // If the frame pointer save index hasn't been defined yet. 6134 if (!FPSI) { 6135 // Find out what the fix offset of the frame pointer save area. 6136 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6137 // Allocate the frame index for frame pointer save area. 6138 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6139 // Save the result. 6140 FI->setFramePointerSaveIndex(FPSI); 6141 } 6142 return DAG.getFrameIndex(FPSI, PtrVT); 6143 } 6144 6145 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6146 SelectionDAG &DAG, 6147 const PPCSubtarget &Subtarget) const { 6148 // Get the inputs. 6149 SDValue Chain = Op.getOperand(0); 6150 SDValue Size = Op.getOperand(1); 6151 SDLoc dl(Op); 6152 6153 // Get the corect type for pointers. 6154 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6155 // Negate the size. 6156 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6157 DAG.getConstant(0, dl, PtrVT), Size); 6158 // Construct a node for the frame pointer save index. 6159 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6160 // Build a DYNALLOC node. 6161 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6162 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6163 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6164 } 6165 6166 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6167 SelectionDAG &DAG) const { 6168 SDLoc DL(Op); 6169 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6170 DAG.getVTList(MVT::i32, MVT::Other), 6171 Op.getOperand(0), Op.getOperand(1)); 6172 } 6173 6174 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6175 SelectionDAG &DAG) const { 6176 SDLoc DL(Op); 6177 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6178 Op.getOperand(0), Op.getOperand(1)); 6179 } 6180 6181 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6182 if (Op.getValueType().isVector()) 6183 return LowerVectorLoad(Op, DAG); 6184 6185 assert(Op.getValueType() == MVT::i1 && 6186 "Custom lowering only for i1 loads"); 6187 6188 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6189 6190 SDLoc dl(Op); 6191 LoadSDNode *LD = cast<LoadSDNode>(Op); 6192 6193 SDValue Chain = LD->getChain(); 6194 SDValue BasePtr = LD->getBasePtr(); 6195 MachineMemOperand *MMO = LD->getMemOperand(); 6196 6197 SDValue NewLD = 6198 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6199 BasePtr, MVT::i8, MMO); 6200 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6201 6202 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6203 return DAG.getMergeValues(Ops, dl); 6204 } 6205 6206 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6207 if (Op.getOperand(1).getValueType().isVector()) 6208 return LowerVectorStore(Op, DAG); 6209 6210 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6211 "Custom lowering only for i1 stores"); 6212 6213 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6214 6215 SDLoc dl(Op); 6216 StoreSDNode *ST = cast<StoreSDNode>(Op); 6217 6218 SDValue Chain = ST->getChain(); 6219 SDValue BasePtr = ST->getBasePtr(); 6220 SDValue Value = ST->getValue(); 6221 MachineMemOperand *MMO = ST->getMemOperand(); 6222 6223 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6224 Value); 6225 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6226 } 6227 6228 // FIXME: Remove this once the ANDI glue bug is fixed: 6229 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6230 assert(Op.getValueType() == MVT::i1 && 6231 "Custom lowering only for i1 results"); 6232 6233 SDLoc DL(Op); 6234 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6235 Op.getOperand(0)); 6236 } 6237 6238 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6239 /// possible. 6240 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6241 // Not FP? Not a fsel. 6242 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6243 !Op.getOperand(2).getValueType().isFloatingPoint()) 6244 return Op; 6245 6246 // We might be able to do better than this under some circumstances, but in 6247 // general, fsel-based lowering of select is a finite-math-only optimization. 6248 // For more information, see section F.3 of the 2.06 ISA specification. 6249 if (!DAG.getTarget().Options.NoInfsFPMath || 6250 !DAG.getTarget().Options.NoNaNsFPMath) 6251 return Op; 6252 // TODO: Propagate flags from the select rather than global settings. 6253 SDNodeFlags Flags; 6254 Flags.setNoInfs(true); 6255 Flags.setNoNaNs(true); 6256 6257 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6258 6259 EVT ResVT = Op.getValueType(); 6260 EVT CmpVT = Op.getOperand(0).getValueType(); 6261 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6262 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6263 SDLoc dl(Op); 6264 6265 // If the RHS of the comparison is a 0.0, we don't need to do the 6266 // subtraction at all. 6267 SDValue Sel1; 6268 if (isFloatingPointZero(RHS)) 6269 switch (CC) { 6270 default: break; // SETUO etc aren't handled by fsel. 6271 case ISD::SETNE: 6272 std::swap(TV, FV); 6273 case ISD::SETEQ: 6274 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6275 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6276 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6277 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6278 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6279 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6280 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6281 case ISD::SETULT: 6282 case ISD::SETLT: 6283 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6284 case ISD::SETOGE: 6285 case ISD::SETGE: 6286 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6287 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6288 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6289 case ISD::SETUGT: 6290 case ISD::SETGT: 6291 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6292 case ISD::SETOLE: 6293 case ISD::SETLE: 6294 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6295 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6296 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6297 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6298 } 6299 6300 SDValue Cmp; 6301 switch (CC) { 6302 default: break; // SETUO etc aren't handled by fsel. 6303 case ISD::SETNE: 6304 std::swap(TV, FV); 6305 case ISD::SETEQ: 6306 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6307 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6308 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6309 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6310 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6311 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6312 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6313 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6314 case ISD::SETULT: 6315 case ISD::SETLT: 6316 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6317 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6318 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6319 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6320 case ISD::SETOGE: 6321 case ISD::SETGE: 6322 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6323 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6324 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6325 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6326 case ISD::SETUGT: 6327 case ISD::SETGT: 6328 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags); 6329 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6330 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6331 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6332 case ISD::SETOLE: 6333 case ISD::SETLE: 6334 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags); 6335 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6336 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6337 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6338 } 6339 return Op; 6340 } 6341 6342 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6343 SelectionDAG &DAG, 6344 SDLoc dl) const { 6345 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6346 SDValue Src = Op.getOperand(0); 6347 if (Src.getValueType() == MVT::f32) 6348 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6349 6350 SDValue Tmp; 6351 switch (Op.getSimpleValueType().SimpleTy) { 6352 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6353 case MVT::i32: 6354 Tmp = DAG.getNode( 6355 Op.getOpcode() == ISD::FP_TO_SINT 6356 ? PPCISD::FCTIWZ 6357 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6358 dl, MVT::f64, Src); 6359 break; 6360 case MVT::i64: 6361 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6362 "i64 FP_TO_UINT is supported only with FPCVT"); 6363 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6364 PPCISD::FCTIDUZ, 6365 dl, MVT::f64, Src); 6366 break; 6367 } 6368 6369 // Convert the FP value to an int value through memory. 6370 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6371 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6372 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6373 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6374 MachinePointerInfo MPI = 6375 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6376 6377 // Emit a store to the stack slot. 6378 SDValue Chain; 6379 if (i32Stack) { 6380 MachineFunction &MF = DAG.getMachineFunction(); 6381 MachineMemOperand *MMO = 6382 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6383 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6384 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6385 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6386 } else 6387 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 6388 MPI, false, false, 0); 6389 6390 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6391 // add in a bias on big endian. 6392 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6393 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6394 DAG.getConstant(4, dl, FIPtr.getValueType())); 6395 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6396 } 6397 6398 RLI.Chain = Chain; 6399 RLI.Ptr = FIPtr; 6400 RLI.MPI = MPI; 6401 } 6402 6403 /// \brief Custom lowers floating point to integer conversions to use 6404 /// the direct move instructions available in ISA 2.07 to avoid the 6405 /// need for load/store combinations. 6406 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6407 SelectionDAG &DAG, 6408 SDLoc dl) const { 6409 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6410 SDValue Src = Op.getOperand(0); 6411 6412 if (Src.getValueType() == MVT::f32) 6413 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6414 6415 SDValue Tmp; 6416 switch (Op.getSimpleValueType().SimpleTy) { 6417 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6418 case MVT::i32: 6419 Tmp = DAG.getNode( 6420 Op.getOpcode() == ISD::FP_TO_SINT 6421 ? PPCISD::FCTIWZ 6422 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6423 dl, MVT::f64, Src); 6424 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6425 break; 6426 case MVT::i64: 6427 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6428 "i64 FP_TO_UINT is supported only with FPCVT"); 6429 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6430 PPCISD::FCTIDUZ, 6431 dl, MVT::f64, Src); 6432 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6433 break; 6434 } 6435 return Tmp; 6436 } 6437 6438 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6439 SDLoc dl) const { 6440 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6441 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6442 6443 ReuseLoadInfo RLI; 6444 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6445 6446 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6447 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6448 RLI.Ranges); 6449 } 6450 6451 // We're trying to insert a regular store, S, and then a load, L. If the 6452 // incoming value, O, is a load, we might just be able to have our load use the 6453 // address used by O. However, we don't know if anything else will store to 6454 // that address before we can load from it. To prevent this situation, we need 6455 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6456 // the same chain operand as O, we create a token factor from the chain results 6457 // of O and L, and we replace all uses of O's chain result with that token 6458 // factor (see spliceIntoChain below for this last part). 6459 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6460 ReuseLoadInfo &RLI, 6461 SelectionDAG &DAG, 6462 ISD::LoadExtType ET) const { 6463 SDLoc dl(Op); 6464 if (ET == ISD::NON_EXTLOAD && 6465 (Op.getOpcode() == ISD::FP_TO_UINT || 6466 Op.getOpcode() == ISD::FP_TO_SINT) && 6467 isOperationLegalOrCustom(Op.getOpcode(), 6468 Op.getOperand(0).getValueType())) { 6469 6470 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6471 return true; 6472 } 6473 6474 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6475 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6476 LD->isNonTemporal()) 6477 return false; 6478 if (LD->getMemoryVT() != MemVT) 6479 return false; 6480 6481 RLI.Ptr = LD->getBasePtr(); 6482 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 6483 assert(LD->getAddressingMode() == ISD::PRE_INC && 6484 "Non-pre-inc AM on PPC?"); 6485 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6486 LD->getOffset()); 6487 } 6488 6489 RLI.Chain = LD->getChain(); 6490 RLI.MPI = LD->getPointerInfo(); 6491 RLI.IsInvariant = LD->isInvariant(); 6492 RLI.Alignment = LD->getAlignment(); 6493 RLI.AAInfo = LD->getAAInfo(); 6494 RLI.Ranges = LD->getRanges(); 6495 6496 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6497 return true; 6498 } 6499 6500 // Given the head of the old chain, ResChain, insert a token factor containing 6501 // it and NewResChain, and make users of ResChain now be users of that token 6502 // factor. 6503 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6504 SDValue NewResChain, 6505 SelectionDAG &DAG) const { 6506 if (!ResChain) 6507 return; 6508 6509 SDLoc dl(NewResChain); 6510 6511 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6512 NewResChain, DAG.getUNDEF(MVT::Other)); 6513 assert(TF.getNode() != NewResChain.getNode() && 6514 "A new TF really is required here"); 6515 6516 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6517 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6518 } 6519 6520 /// \brief Analyze profitability of direct move 6521 /// prefer float load to int load plus direct move 6522 /// when there is no integer use of int load 6523 static bool directMoveIsProfitable(const SDValue &Op) { 6524 SDNode *Origin = Op.getOperand(0).getNode(); 6525 if (Origin->getOpcode() != ISD::LOAD) 6526 return true; 6527 6528 for (SDNode::use_iterator UI = Origin->use_begin(), 6529 UE = Origin->use_end(); 6530 UI != UE; ++UI) { 6531 6532 // Only look at the users of the loaded value. 6533 if (UI.getUse().get().getResNo() != 0) 6534 continue; 6535 6536 if (UI->getOpcode() != ISD::SINT_TO_FP && 6537 UI->getOpcode() != ISD::UINT_TO_FP) 6538 return true; 6539 } 6540 6541 return false; 6542 } 6543 6544 /// \brief Custom lowers integer to floating point conversions to use 6545 /// the direct move instructions available in ISA 2.07 to avoid the 6546 /// need for load/store combinations. 6547 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6548 SelectionDAG &DAG, 6549 SDLoc dl) const { 6550 assert((Op.getValueType() == MVT::f32 || 6551 Op.getValueType() == MVT::f64) && 6552 "Invalid floating point type as target of conversion"); 6553 assert(Subtarget.hasFPCVT() && 6554 "Int to FP conversions with direct moves require FPCVT"); 6555 SDValue FP; 6556 SDValue Src = Op.getOperand(0); 6557 bool SinglePrec = Op.getValueType() == MVT::f32; 6558 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6559 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6560 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6561 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6562 6563 if (WordInt) { 6564 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6565 dl, MVT::f64, Src); 6566 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6567 } 6568 else { 6569 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6570 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6571 } 6572 6573 return FP; 6574 } 6575 6576 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6577 SelectionDAG &DAG) const { 6578 SDLoc dl(Op); 6579 6580 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6581 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6582 return SDValue(); 6583 6584 SDValue Value = Op.getOperand(0); 6585 // The values are now known to be -1 (false) or 1 (true). To convert this 6586 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6587 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6588 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6589 6590 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 6591 6592 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6593 6594 if (Op.getValueType() != MVT::v4f64) 6595 Value = DAG.getNode(ISD::FP_ROUND, dl, 6596 Op.getValueType(), Value, 6597 DAG.getIntPtrConstant(1, dl)); 6598 return Value; 6599 } 6600 6601 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6602 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6603 return SDValue(); 6604 6605 if (Op.getOperand(0).getValueType() == MVT::i1) 6606 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6607 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6608 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6609 6610 // If we have direct moves, we can do all the conversion, skip the store/load 6611 // however, without FPCVT we can't do most conversions. 6612 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 6613 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6614 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6615 6616 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6617 "UINT_TO_FP is supported only with FPCVT"); 6618 6619 // If we have FCFIDS, then use it when converting to single-precision. 6620 // Otherwise, convert to double-precision and then round. 6621 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6622 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6623 : PPCISD::FCFIDS) 6624 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6625 : PPCISD::FCFID); 6626 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6627 ? MVT::f32 6628 : MVT::f64; 6629 6630 if (Op.getOperand(0).getValueType() == MVT::i64) { 6631 SDValue SINT = Op.getOperand(0); 6632 // When converting to single-precision, we actually need to convert 6633 // to double-precision first and then round to single-precision. 6634 // To avoid double-rounding effects during that operation, we have 6635 // to prepare the input operand. Bits that might be truncated when 6636 // converting to double-precision are replaced by a bit that won't 6637 // be lost at this stage, but is below the single-precision rounding 6638 // position. 6639 // 6640 // However, if -enable-unsafe-fp-math is in effect, accept double 6641 // rounding to avoid the extra overhead. 6642 if (Op.getValueType() == MVT::f32 && 6643 !Subtarget.hasFPCVT() && 6644 !DAG.getTarget().Options.UnsafeFPMath) { 6645 6646 // Twiddle input to make sure the low 11 bits are zero. (If this 6647 // is the case, we are guaranteed the value will fit into the 53 bit 6648 // mantissa of an IEEE double-precision value without rounding.) 6649 // If any of those low 11 bits were not zero originally, make sure 6650 // bit 12 (value 2048) is set instead, so that the final rounding 6651 // to single-precision gets the correct result. 6652 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6653 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6654 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6655 Round, DAG.getConstant(2047, dl, MVT::i64)); 6656 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6657 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6658 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6659 6660 // However, we cannot use that value unconditionally: if the magnitude 6661 // of the input value is small, the bit-twiddling we did above might 6662 // end up visibly changing the output. Fortunately, in that case, we 6663 // don't need to twiddle bits since the original input will convert 6664 // exactly to double-precision floating-point already. Therefore, 6665 // construct a conditional to use the original value if the top 11 6666 // bits are all sign-bit copies, and use the rounded value computed 6667 // above otherwise. 6668 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6669 SINT, DAG.getConstant(53, dl, MVT::i32)); 6670 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6671 Cond, DAG.getConstant(1, dl, MVT::i64)); 6672 Cond = DAG.getSetCC(dl, MVT::i32, 6673 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6674 6675 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6676 } 6677 6678 ReuseLoadInfo RLI; 6679 SDValue Bits; 6680 6681 MachineFunction &MF = DAG.getMachineFunction(); 6682 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6683 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6684 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6685 RLI.Ranges); 6686 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6687 } else if (Subtarget.hasLFIWAX() && 6688 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6689 MachineMemOperand *MMO = 6690 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6691 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6692 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6693 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6694 DAG.getVTList(MVT::f64, MVT::Other), 6695 Ops, MVT::i32, MMO); 6696 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6697 } else if (Subtarget.hasFPCVT() && 6698 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6699 MachineMemOperand *MMO = 6700 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6701 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6702 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6703 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6704 DAG.getVTList(MVT::f64, MVT::Other), 6705 Ops, MVT::i32, MMO); 6706 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6707 } else if (((Subtarget.hasLFIWAX() && 6708 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6709 (Subtarget.hasFPCVT() && 6710 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6711 SINT.getOperand(0).getValueType() == MVT::i32) { 6712 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6713 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6714 6715 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6716 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6717 6718 SDValue Store = DAG.getStore( 6719 DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6720 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6721 false, false, 0); 6722 6723 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6724 "Expected an i32 store"); 6725 6726 RLI.Ptr = FIdx; 6727 RLI.Chain = Store; 6728 RLI.MPI = 6729 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6730 RLI.Alignment = 4; 6731 6732 MachineMemOperand *MMO = 6733 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6734 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6735 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6736 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6737 PPCISD::LFIWZX : PPCISD::LFIWAX, 6738 dl, DAG.getVTList(MVT::f64, MVT::Other), 6739 Ops, MVT::i32, MMO); 6740 } else 6741 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6742 6743 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6744 6745 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6746 FP = DAG.getNode(ISD::FP_ROUND, dl, 6747 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 6748 return FP; 6749 } 6750 6751 assert(Op.getOperand(0).getValueType() == MVT::i32 && 6752 "Unhandled INT_TO_FP type in custom expander!"); 6753 // Since we only generate this in 64-bit mode, we can take advantage of 6754 // 64-bit registers. In particular, sign extend the input value into the 6755 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 6756 // then lfd it and fcfid it. 6757 MachineFunction &MF = DAG.getMachineFunction(); 6758 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6759 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6760 6761 SDValue Ld; 6762 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 6763 ReuseLoadInfo RLI; 6764 bool ReusingLoad; 6765 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 6766 DAG))) { 6767 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6768 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6769 6770 SDValue Store = DAG.getStore( 6771 DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 6772 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6773 false, false, 0); 6774 6775 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6776 "Expected an i32 store"); 6777 6778 RLI.Ptr = FIdx; 6779 RLI.Chain = Store; 6780 RLI.MPI = 6781 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6782 RLI.Alignment = 4; 6783 } 6784 6785 MachineMemOperand *MMO = 6786 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6787 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6788 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6789 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 6790 PPCISD::LFIWZX : PPCISD::LFIWAX, 6791 dl, DAG.getVTList(MVT::f64, MVT::Other), 6792 Ops, MVT::i32, MMO); 6793 if (ReusingLoad) 6794 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 6795 } else { 6796 assert(Subtarget.isPPC64() && 6797 "i32->FP without LFIWAX supported only on PPC64"); 6798 6799 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 6800 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6801 6802 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 6803 Op.getOperand(0)); 6804 6805 // STD the extended value into the stack slot. 6806 SDValue Store = DAG.getStore( 6807 DAG.getEntryNode(), dl, Ext64, FIdx, 6808 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6809 false, false, 0); 6810 6811 // Load the value as a double. 6812 Ld = DAG.getLoad( 6813 MVT::f64, dl, Store, FIdx, 6814 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6815 false, false, false, 0); 6816 } 6817 6818 // FCFID it and return it. 6819 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 6820 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6821 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 6822 DAG.getIntPtrConstant(0, dl)); 6823 return FP; 6824 } 6825 6826 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6827 SelectionDAG &DAG) const { 6828 SDLoc dl(Op); 6829 /* 6830 The rounding mode is in bits 30:31 of FPSR, and has the following 6831 settings: 6832 00 Round to nearest 6833 01 Round to 0 6834 10 Round to +inf 6835 11 Round to -inf 6836 6837 FLT_ROUNDS, on the other hand, expects the following: 6838 -1 Undefined 6839 0 Round to 0 6840 1 Round to nearest 6841 2 Round to +inf 6842 3 Round to -inf 6843 6844 To perform the conversion, we do: 6845 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 6846 */ 6847 6848 MachineFunction &MF = DAG.getMachineFunction(); 6849 EVT VT = Op.getValueType(); 6850 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6851 6852 // Save FP Control Word to register 6853 EVT NodeTys[] = { 6854 MVT::f64, // return register 6855 MVT::Glue // unused in this context 6856 }; 6857 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 6858 6859 // Save FP register to stack slot 6860 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 6861 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 6862 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 6863 StackSlot, MachinePointerInfo(), false, false,0); 6864 6865 // Load FP Control Word from low 32 bits of stack slot. 6866 SDValue Four = DAG.getConstant(4, dl, PtrVT); 6867 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 6868 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 6869 false, false, false, 0); 6870 6871 // Transform as necessary 6872 SDValue CWD1 = 6873 DAG.getNode(ISD::AND, dl, MVT::i32, 6874 CWD, DAG.getConstant(3, dl, MVT::i32)); 6875 SDValue CWD2 = 6876 DAG.getNode(ISD::SRL, dl, MVT::i32, 6877 DAG.getNode(ISD::AND, dl, MVT::i32, 6878 DAG.getNode(ISD::XOR, dl, MVT::i32, 6879 CWD, DAG.getConstant(3, dl, MVT::i32)), 6880 DAG.getConstant(3, dl, MVT::i32)), 6881 DAG.getConstant(1, dl, MVT::i32)); 6882 6883 SDValue RetVal = 6884 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 6885 6886 return DAG.getNode((VT.getSizeInBits() < 16 ? 6887 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 6888 } 6889 6890 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6891 EVT VT = Op.getValueType(); 6892 unsigned BitWidth = VT.getSizeInBits(); 6893 SDLoc dl(Op); 6894 assert(Op.getNumOperands() == 3 && 6895 VT == Op.getOperand(1).getValueType() && 6896 "Unexpected SHL!"); 6897 6898 // Expand into a bunch of logical ops. Note that these ops 6899 // depend on the PPC behavior for oversized shift amounts. 6900 SDValue Lo = Op.getOperand(0); 6901 SDValue Hi = Op.getOperand(1); 6902 SDValue Amt = Op.getOperand(2); 6903 EVT AmtVT = Amt.getValueType(); 6904 6905 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6906 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6907 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 6908 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 6909 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 6910 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6911 DAG.getConstant(-BitWidth, dl, AmtVT)); 6912 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 6913 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6914 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 6915 SDValue OutOps[] = { OutLo, OutHi }; 6916 return DAG.getMergeValues(OutOps, dl); 6917 } 6918 6919 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6920 EVT VT = Op.getValueType(); 6921 SDLoc dl(Op); 6922 unsigned BitWidth = VT.getSizeInBits(); 6923 assert(Op.getNumOperands() == 3 && 6924 VT == Op.getOperand(1).getValueType() && 6925 "Unexpected SRL!"); 6926 6927 // Expand into a bunch of logical ops. Note that these ops 6928 // depend on the PPC behavior for oversized shift amounts. 6929 SDValue Lo = Op.getOperand(0); 6930 SDValue Hi = Op.getOperand(1); 6931 SDValue Amt = Op.getOperand(2); 6932 EVT AmtVT = Amt.getValueType(); 6933 6934 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6935 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6936 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6937 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6938 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6939 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6940 DAG.getConstant(-BitWidth, dl, AmtVT)); 6941 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 6942 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6943 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 6944 SDValue OutOps[] = { OutLo, OutHi }; 6945 return DAG.getMergeValues(OutOps, dl); 6946 } 6947 6948 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 6949 SDLoc dl(Op); 6950 EVT VT = Op.getValueType(); 6951 unsigned BitWidth = VT.getSizeInBits(); 6952 assert(Op.getNumOperands() == 3 && 6953 VT == Op.getOperand(1).getValueType() && 6954 "Unexpected SRA!"); 6955 6956 // Expand into a bunch of logical ops, followed by a select_cc. 6957 SDValue Lo = Op.getOperand(0); 6958 SDValue Hi = Op.getOperand(1); 6959 SDValue Amt = Op.getOperand(2); 6960 EVT AmtVT = Amt.getValueType(); 6961 6962 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6963 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6964 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6965 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6966 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6967 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6968 DAG.getConstant(-BitWidth, dl, AmtVT)); 6969 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 6970 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 6971 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 6972 Tmp4, Tmp6, ISD::SETLE); 6973 SDValue OutOps[] = { OutLo, OutHi }; 6974 return DAG.getMergeValues(OutOps, dl); 6975 } 6976 6977 //===----------------------------------------------------------------------===// 6978 // Vector related lowering. 6979 // 6980 6981 /// BuildSplatI - Build a canonical splati of Val with an element size of 6982 /// SplatSize. Cast the result to VT. 6983 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 6984 SelectionDAG &DAG, SDLoc dl) { 6985 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 6986 6987 static const MVT VTys[] = { // canonical VT to use for each size. 6988 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 6989 }; 6990 6991 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 6992 6993 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 6994 if (Val == -1) 6995 SplatSize = 1; 6996 6997 EVT CanonicalVT = VTys[SplatSize-1]; 6998 6999 // Build a canonical splat for this value. 7000 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7001 } 7002 7003 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7004 /// specified intrinsic ID. 7005 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 7006 SelectionDAG &DAG, SDLoc dl, 7007 EVT DestVT = MVT::Other) { 7008 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7009 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7010 DAG.getConstant(IID, dl, MVT::i32), Op); 7011 } 7012 7013 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7014 /// specified intrinsic ID. 7015 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7016 SelectionDAG &DAG, SDLoc dl, 7017 EVT DestVT = MVT::Other) { 7018 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7019 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7020 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7021 } 7022 7023 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7024 /// specified intrinsic ID. 7025 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7026 SDValue Op2, SelectionDAG &DAG, 7027 SDLoc dl, EVT DestVT = MVT::Other) { 7028 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7029 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7030 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7031 } 7032 7033 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7034 /// amount. The result has the specified value type. 7035 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 7036 EVT VT, SelectionDAG &DAG, SDLoc dl) { 7037 // Force LHS/RHS to be the right type. 7038 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7039 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7040 7041 int Ops[16]; 7042 for (unsigned i = 0; i != 16; ++i) 7043 Ops[i] = i + Amt; 7044 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7045 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7046 } 7047 7048 // If this is a case we can't handle, return null and let the default 7049 // expansion code take care of it. If we CAN select this case, and if it 7050 // selects to a single instruction, return Op. Otherwise, if we can codegen 7051 // this case more efficiently than a constant pool load, lower it to the 7052 // sequence of ops that should be used. 7053 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7054 SelectionDAG &DAG) const { 7055 SDLoc dl(Op); 7056 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7057 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7058 7059 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7060 // We first build an i32 vector, load it into a QPX register, 7061 // then convert it to a floating-point vector and compare it 7062 // to a zero vector to get the boolean result. 7063 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7064 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7065 MachinePointerInfo PtrInfo = 7066 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7067 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7068 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7069 7070 assert(BVN->getNumOperands() == 4 && 7071 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7072 7073 bool IsConst = true; 7074 for (unsigned i = 0; i < 4; ++i) { 7075 if (BVN->getOperand(i).isUndef()) continue; 7076 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7077 IsConst = false; 7078 break; 7079 } 7080 } 7081 7082 if (IsConst) { 7083 Constant *One = 7084 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7085 Constant *NegOne = 7086 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7087 7088 SmallVector<Constant*, 4> CV(4, NegOne); 7089 for (unsigned i = 0; i < 4; ++i) { 7090 if (BVN->getOperand(i).isUndef()) 7091 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7092 else if (isNullConstant(BVN->getOperand(i))) 7093 continue; 7094 else 7095 CV[i] = One; 7096 } 7097 7098 Constant *CP = ConstantVector::get(CV); 7099 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7100 16 /* alignment */); 7101 7102 SmallVector<SDValue, 2> Ops; 7103 Ops.push_back(DAG.getEntryNode()); 7104 Ops.push_back(CPIdx); 7105 7106 SmallVector<EVT, 2> ValueVTs; 7107 ValueVTs.push_back(MVT::v4i1); 7108 ValueVTs.push_back(MVT::Other); // chain 7109 SDVTList VTs = DAG.getVTList(ValueVTs); 7110 7111 return DAG.getMemIntrinsicNode( 7112 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7113 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7114 } 7115 7116 SmallVector<SDValue, 4> Stores; 7117 for (unsigned i = 0; i < 4; ++i) { 7118 if (BVN->getOperand(i).isUndef()) continue; 7119 7120 unsigned Offset = 4*i; 7121 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7122 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7123 7124 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7125 if (StoreSize > 4) { 7126 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 7127 BVN->getOperand(i), Idx, 7128 PtrInfo.getWithOffset(Offset), 7129 MVT::i32, false, false, 0)); 7130 } else { 7131 SDValue StoreValue = BVN->getOperand(i); 7132 if (StoreSize < 4) 7133 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7134 7135 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 7136 StoreValue, Idx, 7137 PtrInfo.getWithOffset(Offset), 7138 false, false, 0)); 7139 } 7140 } 7141 7142 SDValue StoreChain; 7143 if (!Stores.empty()) 7144 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7145 else 7146 StoreChain = DAG.getEntryNode(); 7147 7148 // Now load from v4i32 into the QPX register; this will extend it to 7149 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7150 // is typed as v4f64 because the QPX register integer states are not 7151 // explicitly represented. 7152 7153 SmallVector<SDValue, 2> Ops; 7154 Ops.push_back(StoreChain); 7155 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32)); 7156 Ops.push_back(FIdx); 7157 7158 SmallVector<EVT, 2> ValueVTs; 7159 ValueVTs.push_back(MVT::v4f64); 7160 ValueVTs.push_back(MVT::Other); // chain 7161 SDVTList VTs = DAG.getVTList(ValueVTs); 7162 7163 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7164 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7165 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7166 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7167 LoadedVect); 7168 7169 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7170 7171 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7172 } 7173 7174 // All other QPX vectors are handled by generic code. 7175 if (Subtarget.hasQPX()) 7176 return SDValue(); 7177 7178 // Check if this is a splat of a constant value. 7179 APInt APSplatBits, APSplatUndef; 7180 unsigned SplatBitSize; 7181 bool HasAnyUndefs; 7182 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7183 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7184 SplatBitSize > 32) 7185 return SDValue(); 7186 7187 unsigned SplatBits = APSplatBits.getZExtValue(); 7188 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7189 unsigned SplatSize = SplatBitSize / 8; 7190 7191 // First, handle single instruction cases. 7192 7193 // All zeros? 7194 if (SplatBits == 0) { 7195 // Canonicalize all zero vectors to be v4i32. 7196 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7197 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7198 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7199 } 7200 return Op; 7201 } 7202 7203 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7204 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7205 (32-SplatBitSize)); 7206 if (SextVal >= -16 && SextVal <= 15) 7207 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7208 7209 // Two instruction sequences. 7210 7211 // If this value is in the range [-32,30] and is even, use: 7212 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7213 // If this value is in the range [17,31] and is odd, use: 7214 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7215 // If this value is in the range [-31,-17] and is odd, use: 7216 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7217 // Note the last two are three-instruction sequences. 7218 if (SextVal >= -32 && SextVal <= 31) { 7219 // To avoid having these optimizations undone by constant folding, 7220 // we convert to a pseudo that will be expanded later into one of 7221 // the above forms. 7222 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7223 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7224 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7225 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7226 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7227 if (VT == Op.getValueType()) 7228 return RetVal; 7229 else 7230 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7231 } 7232 7233 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7234 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7235 // for fneg/fabs. 7236 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7237 // Make -1 and vspltisw -1: 7238 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7239 7240 // Make the VSLW intrinsic, computing 0x8000_0000. 7241 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7242 OnesV, DAG, dl); 7243 7244 // xor by OnesV to invert it. 7245 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7246 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7247 } 7248 7249 // Check to see if this is a wide variety of vsplti*, binop self cases. 7250 static const signed char SplatCsts[] = { 7251 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7252 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7253 }; 7254 7255 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7256 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7257 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7258 int i = SplatCsts[idx]; 7259 7260 // Figure out what shift amount will be used by altivec if shifted by i in 7261 // this splat size. 7262 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7263 7264 // vsplti + shl self. 7265 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7266 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7267 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7268 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7269 Intrinsic::ppc_altivec_vslw 7270 }; 7271 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7272 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7273 } 7274 7275 // vsplti + srl self. 7276 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7277 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7278 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7279 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7280 Intrinsic::ppc_altivec_vsrw 7281 }; 7282 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7283 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7284 } 7285 7286 // vsplti + sra self. 7287 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7288 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7289 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7290 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7291 Intrinsic::ppc_altivec_vsraw 7292 }; 7293 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7294 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7295 } 7296 7297 // vsplti + rol self. 7298 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7299 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7300 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7301 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7302 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7303 Intrinsic::ppc_altivec_vrlw 7304 }; 7305 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7306 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7307 } 7308 7309 // t = vsplti c, result = vsldoi t, t, 1 7310 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7311 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7312 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7313 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7314 } 7315 // t = vsplti c, result = vsldoi t, t, 2 7316 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7317 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7318 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7319 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7320 } 7321 // t = vsplti c, result = vsldoi t, t, 3 7322 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7323 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7324 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7325 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7326 } 7327 } 7328 7329 return SDValue(); 7330 } 7331 7332 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7333 /// the specified operations to build the shuffle. 7334 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7335 SDValue RHS, SelectionDAG &DAG, 7336 SDLoc dl) { 7337 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7338 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7339 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7340 7341 enum { 7342 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7343 OP_VMRGHW, 7344 OP_VMRGLW, 7345 OP_VSPLTISW0, 7346 OP_VSPLTISW1, 7347 OP_VSPLTISW2, 7348 OP_VSPLTISW3, 7349 OP_VSLDOI4, 7350 OP_VSLDOI8, 7351 OP_VSLDOI12 7352 }; 7353 7354 if (OpNum == OP_COPY) { 7355 if (LHSID == (1*9+2)*9+3) return LHS; 7356 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7357 return RHS; 7358 } 7359 7360 SDValue OpLHS, OpRHS; 7361 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7362 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7363 7364 int ShufIdxs[16]; 7365 switch (OpNum) { 7366 default: llvm_unreachable("Unknown i32 permute!"); 7367 case OP_VMRGHW: 7368 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7369 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7370 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7371 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7372 break; 7373 case OP_VMRGLW: 7374 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7375 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7376 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7377 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7378 break; 7379 case OP_VSPLTISW0: 7380 for (unsigned i = 0; i != 16; ++i) 7381 ShufIdxs[i] = (i&3)+0; 7382 break; 7383 case OP_VSPLTISW1: 7384 for (unsigned i = 0; i != 16; ++i) 7385 ShufIdxs[i] = (i&3)+4; 7386 break; 7387 case OP_VSPLTISW2: 7388 for (unsigned i = 0; i != 16; ++i) 7389 ShufIdxs[i] = (i&3)+8; 7390 break; 7391 case OP_VSPLTISW3: 7392 for (unsigned i = 0; i != 16; ++i) 7393 ShufIdxs[i] = (i&3)+12; 7394 break; 7395 case OP_VSLDOI4: 7396 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7397 case OP_VSLDOI8: 7398 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7399 case OP_VSLDOI12: 7400 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7401 } 7402 EVT VT = OpLHS.getValueType(); 7403 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7404 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7405 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7406 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7407 } 7408 7409 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7410 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7411 /// return the code it can be lowered into. Worst case, it can always be 7412 /// lowered into a vperm. 7413 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7414 SelectionDAG &DAG) const { 7415 SDLoc dl(Op); 7416 SDValue V1 = Op.getOperand(0); 7417 SDValue V2 = Op.getOperand(1); 7418 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7419 EVT VT = Op.getValueType(); 7420 bool isLittleEndian = Subtarget.isLittleEndian(); 7421 7422 if (Subtarget.hasQPX()) { 7423 if (VT.getVectorNumElements() != 4) 7424 return SDValue(); 7425 7426 if (V2.isUndef()) V2 = V1; 7427 7428 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 7429 if (AlignIdx != -1) { 7430 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7431 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7432 } else if (SVOp->isSplat()) { 7433 int SplatIdx = SVOp->getSplatIndex(); 7434 if (SplatIdx >= 4) { 7435 std::swap(V1, V2); 7436 SplatIdx -= 4; 7437 } 7438 7439 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7440 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7441 } 7442 7443 // Lower this into a qvgpci/qvfperm pair. 7444 7445 // Compute the qvgpci literal 7446 unsigned idx = 0; 7447 for (unsigned i = 0; i < 4; ++i) { 7448 int m = SVOp->getMaskElt(i); 7449 unsigned mm = m >= 0 ? (unsigned) m : i; 7450 idx |= mm << (3-i)*3; 7451 } 7452 7453 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7454 DAG.getConstant(idx, dl, MVT::i32)); 7455 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7456 } 7457 7458 // Cases that are handled by instructions that take permute immediates 7459 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7460 // selected by the instruction selector. 7461 if (V2.isUndef()) { 7462 if (PPC::isSplatShuffleMask(SVOp, 1) || 7463 PPC::isSplatShuffleMask(SVOp, 2) || 7464 PPC::isSplatShuffleMask(SVOp, 4) || 7465 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7466 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7467 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7468 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7469 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7470 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7471 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7472 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7473 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 7474 (Subtarget.hasP8Altivec() && ( 7475 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7476 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 7477 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 7478 return Op; 7479 } 7480 } 7481 7482 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7483 // and produce a fixed permutation. If any of these match, do not lower to 7484 // VPERM. 7485 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7486 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7487 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7488 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7489 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7490 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7491 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7492 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7493 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7494 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7495 (Subtarget.hasP8Altivec() && ( 7496 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7497 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 7498 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 7499 return Op; 7500 7501 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7502 // perfect shuffle table to emit an optimal matching sequence. 7503 ArrayRef<int> PermMask = SVOp->getMask(); 7504 7505 unsigned PFIndexes[4]; 7506 bool isFourElementShuffle = true; 7507 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7508 unsigned EltNo = 8; // Start out undef. 7509 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7510 if (PermMask[i*4+j] < 0) 7511 continue; // Undef, ignore it. 7512 7513 unsigned ByteSource = PermMask[i*4+j]; 7514 if ((ByteSource & 3) != j) { 7515 isFourElementShuffle = false; 7516 break; 7517 } 7518 7519 if (EltNo == 8) { 7520 EltNo = ByteSource/4; 7521 } else if (EltNo != ByteSource/4) { 7522 isFourElementShuffle = false; 7523 break; 7524 } 7525 } 7526 PFIndexes[i] = EltNo; 7527 } 7528 7529 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7530 // perfect shuffle vector to determine if it is cost effective to do this as 7531 // discrete instructions, or whether we should use a vperm. 7532 // For now, we skip this for little endian until such time as we have a 7533 // little-endian perfect shuffle table. 7534 if (isFourElementShuffle && !isLittleEndian) { 7535 // Compute the index in the perfect shuffle table. 7536 unsigned PFTableIndex = 7537 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7538 7539 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7540 unsigned Cost = (PFEntry >> 30); 7541 7542 // Determining when to avoid vperm is tricky. Many things affect the cost 7543 // of vperm, particularly how many times the perm mask needs to be computed. 7544 // For example, if the perm mask can be hoisted out of a loop or is already 7545 // used (perhaps because there are multiple permutes with the same shuffle 7546 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7547 // the loop requires an extra register. 7548 // 7549 // As a compromise, we only emit discrete instructions if the shuffle can be 7550 // generated in 3 or fewer operations. When we have loop information 7551 // available, if this block is within a loop, we should avoid using vperm 7552 // for 3-operation perms and use a constant pool load instead. 7553 if (Cost < 3) 7554 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7555 } 7556 7557 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7558 // vector that will get spilled to the constant pool. 7559 if (V2.isUndef()) V2 = V1; 7560 7561 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7562 // that it is in input element units, not in bytes. Convert now. 7563 7564 // For little endian, the order of the input vectors is reversed, and 7565 // the permutation mask is complemented with respect to 31. This is 7566 // necessary to produce proper semantics with the big-endian-biased vperm 7567 // instruction. 7568 EVT EltVT = V1.getValueType().getVectorElementType(); 7569 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7570 7571 SmallVector<SDValue, 16> ResultMask; 7572 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7573 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7574 7575 for (unsigned j = 0; j != BytesPerElement; ++j) 7576 if (isLittleEndian) 7577 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7578 dl, MVT::i32)); 7579 else 7580 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7581 MVT::i32)); 7582 } 7583 7584 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 7585 if (isLittleEndian) 7586 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7587 V2, V1, VPermMask); 7588 else 7589 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7590 V1, V2, VPermMask); 7591 } 7592 7593 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 7594 /// vector comparison. If it is, return true and fill in Opc/isDot with 7595 /// information about the intrinsic. 7596 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 7597 bool &isDot, const PPCSubtarget &Subtarget) { 7598 unsigned IntrinsicID = 7599 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7600 CompareOpc = -1; 7601 isDot = false; 7602 switch (IntrinsicID) { 7603 default: return false; 7604 // Comparison predicates. 7605 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 7606 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 7607 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 7608 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 7609 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 7610 case Intrinsic::ppc_altivec_vcmpequd_p: 7611 if (Subtarget.hasP8Altivec()) { 7612 CompareOpc = 199; 7613 isDot = 1; 7614 } else 7615 return false; 7616 7617 break; 7618 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 7619 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 7620 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 7621 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 7622 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 7623 case Intrinsic::ppc_altivec_vcmpgtsd_p: 7624 if (Subtarget.hasP8Altivec()) { 7625 CompareOpc = 967; 7626 isDot = 1; 7627 } else 7628 return false; 7629 7630 break; 7631 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 7632 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 7633 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 7634 case Intrinsic::ppc_altivec_vcmpgtud_p: 7635 if (Subtarget.hasP8Altivec()) { 7636 CompareOpc = 711; 7637 isDot = 1; 7638 } else 7639 return false; 7640 7641 break; 7642 // VSX predicate comparisons use the same infrastructure 7643 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 7644 case Intrinsic::ppc_vsx_xvcmpgedp_p: 7645 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 7646 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 7647 case Intrinsic::ppc_vsx_xvcmpgesp_p: 7648 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 7649 if (Subtarget.hasVSX()) { 7650 switch (IntrinsicID) { 7651 case Intrinsic::ppc_vsx_xvcmpeqdp_p: CompareOpc = 99; break; 7652 case Intrinsic::ppc_vsx_xvcmpgedp_p: CompareOpc = 115; break; 7653 case Intrinsic::ppc_vsx_xvcmpgtdp_p: CompareOpc = 107; break; 7654 case Intrinsic::ppc_vsx_xvcmpeqsp_p: CompareOpc = 67; break; 7655 case Intrinsic::ppc_vsx_xvcmpgesp_p: CompareOpc = 83; break; 7656 case Intrinsic::ppc_vsx_xvcmpgtsp_p: CompareOpc = 75; break; 7657 } 7658 isDot = 1; 7659 } 7660 else 7661 return false; 7662 7663 break; 7664 7665 // Normal Comparisons. 7666 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 7667 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 7668 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 7669 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 7670 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 7671 case Intrinsic::ppc_altivec_vcmpequd: 7672 if (Subtarget.hasP8Altivec()) { 7673 CompareOpc = 199; 7674 isDot = 0; 7675 } else 7676 return false; 7677 7678 break; 7679 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 7680 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 7681 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 7682 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 7683 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 7684 case Intrinsic::ppc_altivec_vcmpgtsd: 7685 if (Subtarget.hasP8Altivec()) { 7686 CompareOpc = 967; 7687 isDot = 0; 7688 } else 7689 return false; 7690 7691 break; 7692 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 7693 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 7694 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 7695 case Intrinsic::ppc_altivec_vcmpgtud: 7696 if (Subtarget.hasP8Altivec()) { 7697 CompareOpc = 711; 7698 isDot = 0; 7699 } else 7700 return false; 7701 7702 break; 7703 } 7704 return true; 7705 } 7706 7707 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 7708 /// lower, do it, otherwise return null. 7709 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 7710 SelectionDAG &DAG) const { 7711 unsigned IntrinsicID = 7712 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7713 7714 if (IntrinsicID == Intrinsic::thread_pointer) { 7715 // Reads the thread pointer register, used for __builtin_thread_pointer. 7716 bool is64bit = Subtarget.isPPC64(); 7717 return DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 7718 is64bit ? MVT::i64 : MVT::i32); 7719 } 7720 7721 // If this is a lowered altivec predicate compare, CompareOpc is set to the 7722 // opcode number of the comparison. 7723 SDLoc dl(Op); 7724 int CompareOpc; 7725 bool isDot; 7726 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 7727 return SDValue(); // Don't custom lower most intrinsics. 7728 7729 // If this is a non-dot comparison, make the VCMP node and we are done. 7730 if (!isDot) { 7731 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 7732 Op.getOperand(1), Op.getOperand(2), 7733 DAG.getConstant(CompareOpc, dl, MVT::i32)); 7734 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 7735 } 7736 7737 // Create the PPCISD altivec 'dot' comparison node. 7738 SDValue Ops[] = { 7739 Op.getOperand(2), // LHS 7740 Op.getOperand(3), // RHS 7741 DAG.getConstant(CompareOpc, dl, MVT::i32) 7742 }; 7743 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 7744 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 7745 7746 // Now that we have the comparison, emit a copy from the CR to a GPR. 7747 // This is flagged to the above dot comparison. 7748 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 7749 DAG.getRegister(PPC::CR6, MVT::i32), 7750 CompNode.getValue(1)); 7751 7752 // Unpack the result based on how the target uses it. 7753 unsigned BitNo; // Bit # of CR6. 7754 bool InvertBit; // Invert result? 7755 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 7756 default: // Can't happen, don't crash on invalid number though. 7757 case 0: // Return the value of the EQ bit of CR6. 7758 BitNo = 0; InvertBit = false; 7759 break; 7760 case 1: // Return the inverted value of the EQ bit of CR6. 7761 BitNo = 0; InvertBit = true; 7762 break; 7763 case 2: // Return the value of the LT bit of CR6. 7764 BitNo = 2; InvertBit = false; 7765 break; 7766 case 3: // Return the inverted value of the LT bit of CR6. 7767 BitNo = 2; InvertBit = true; 7768 break; 7769 } 7770 7771 // Shift the bit into the low position. 7772 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 7773 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 7774 // Isolate the bit. 7775 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 7776 DAG.getConstant(1, dl, MVT::i32)); 7777 7778 // If we are supposed to, toggle the bit. 7779 if (InvertBit) 7780 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 7781 DAG.getConstant(1, dl, MVT::i32)); 7782 return Flags; 7783 } 7784 7785 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 7786 SelectionDAG &DAG) const { 7787 SDLoc dl(Op); 7788 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 7789 // instructions), but for smaller types, we need to first extend up to v2i32 7790 // before doing going farther. 7791 if (Op.getValueType() == MVT::v2i64) { 7792 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 7793 if (ExtVT != MVT::v2i32) { 7794 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 7795 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 7796 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 7797 ExtVT.getVectorElementType(), 4))); 7798 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 7799 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 7800 DAG.getValueType(MVT::v2i32)); 7801 } 7802 7803 return Op; 7804 } 7805 7806 return SDValue(); 7807 } 7808 7809 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 7810 SelectionDAG &DAG) const { 7811 SDLoc dl(Op); 7812 // Create a stack slot that is 16-byte aligned. 7813 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7814 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7815 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7816 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7817 7818 // Store the input value into Value#0 of the stack slot. 7819 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 7820 Op.getOperand(0), FIdx, MachinePointerInfo(), 7821 false, false, 0); 7822 // Load it out. 7823 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 7824 false, false, false, 0); 7825 } 7826 7827 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7828 SelectionDAG &DAG) const { 7829 SDLoc dl(Op); 7830 SDNode *N = Op.getNode(); 7831 7832 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 7833 "Unknown extract_vector_elt type"); 7834 7835 SDValue Value = N->getOperand(0); 7836 7837 // The first part of this is like the store lowering except that we don't 7838 // need to track the chain. 7839 7840 // The values are now known to be -1 (false) or 1 (true). To convert this 7841 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7842 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7843 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7844 7845 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7846 // understand how to form the extending load. 7847 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7848 7849 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7850 7851 // Now convert to an integer and store. 7852 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7853 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7854 Value); 7855 7856 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7857 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7858 MachinePointerInfo PtrInfo = 7859 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7860 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7861 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7862 7863 SDValue StoreChain = DAG.getEntryNode(); 7864 SmallVector<SDValue, 2> Ops; 7865 Ops.push_back(StoreChain); 7866 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7867 Ops.push_back(Value); 7868 Ops.push_back(FIdx); 7869 7870 SmallVector<EVT, 2> ValueVTs; 7871 ValueVTs.push_back(MVT::Other); // chain 7872 SDVTList VTs = DAG.getVTList(ValueVTs); 7873 7874 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7875 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7876 7877 // Extract the value requested. 7878 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7879 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7880 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7881 7882 SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7883 PtrInfo.getWithOffset(Offset), 7884 false, false, false, 0); 7885 7886 if (!Subtarget.useCRBits()) 7887 return IntVal; 7888 7889 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 7890 } 7891 7892 /// Lowering for QPX v4i1 loads 7893 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 7894 SelectionDAG &DAG) const { 7895 SDLoc dl(Op); 7896 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 7897 SDValue LoadChain = LN->getChain(); 7898 SDValue BasePtr = LN->getBasePtr(); 7899 7900 if (Op.getValueType() == MVT::v4f64 || 7901 Op.getValueType() == MVT::v4f32) { 7902 EVT MemVT = LN->getMemoryVT(); 7903 unsigned Alignment = LN->getAlignment(); 7904 7905 // If this load is properly aligned, then it is legal. 7906 if (Alignment >= MemVT.getStoreSize()) 7907 return Op; 7908 7909 EVT ScalarVT = Op.getValueType().getScalarType(), 7910 ScalarMemVT = MemVT.getScalarType(); 7911 unsigned Stride = ScalarMemVT.getStoreSize(); 7912 7913 SmallVector<SDValue, 8> Vals, LoadChains; 7914 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7915 SDValue Load; 7916 if (ScalarVT != ScalarMemVT) 7917 Load = 7918 DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 7919 BasePtr, 7920 LN->getPointerInfo().getWithOffset(Idx*Stride), 7921 ScalarMemVT, LN->isVolatile(), LN->isNonTemporal(), 7922 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7923 LN->getAAInfo()); 7924 else 7925 Load = 7926 DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 7927 LN->getPointerInfo().getWithOffset(Idx*Stride), 7928 LN->isVolatile(), LN->isNonTemporal(), 7929 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7930 LN->getAAInfo()); 7931 7932 if (Idx == 0 && LN->isIndexed()) { 7933 assert(LN->getAddressingMode() == ISD::PRE_INC && 7934 "Unknown addressing mode on vector load"); 7935 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 7936 LN->getAddressingMode()); 7937 } 7938 7939 Vals.push_back(Load); 7940 LoadChains.push_back(Load.getValue(1)); 7941 7942 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7943 DAG.getConstant(Stride, dl, 7944 BasePtr.getValueType())); 7945 } 7946 7947 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7948 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 7949 7950 if (LN->isIndexed()) { 7951 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 7952 return DAG.getMergeValues(RetOps, dl); 7953 } 7954 7955 SDValue RetOps[] = { Value, TF }; 7956 return DAG.getMergeValues(RetOps, dl); 7957 } 7958 7959 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 7960 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 7961 7962 // To lower v4i1 from a byte array, we load the byte elements of the 7963 // vector and then reuse the BUILD_VECTOR logic. 7964 7965 SmallVector<SDValue, 4> VectElmts, VectElmtChains; 7966 for (unsigned i = 0; i < 4; ++i) { 7967 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7968 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7969 7970 VectElmts.push_back(DAG.getExtLoad(ISD::EXTLOAD, 7971 dl, MVT::i32, LoadChain, Idx, 7972 LN->getPointerInfo().getWithOffset(i), 7973 MVT::i8 /* memory type */, 7974 LN->isVolatile(), LN->isNonTemporal(), 7975 LN->isInvariant(), 7976 1 /* alignment */, LN->getAAInfo())); 7977 VectElmtChains.push_back(VectElmts[i].getValue(1)); 7978 } 7979 7980 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 7981 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 7982 7983 SDValue RVals[] = { Value, LoadChain }; 7984 return DAG.getMergeValues(RVals, dl); 7985 } 7986 7987 /// Lowering for QPX v4i1 stores 7988 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 7989 SelectionDAG &DAG) const { 7990 SDLoc dl(Op); 7991 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 7992 SDValue StoreChain = SN->getChain(); 7993 SDValue BasePtr = SN->getBasePtr(); 7994 SDValue Value = SN->getValue(); 7995 7996 if (Value.getValueType() == MVT::v4f64 || 7997 Value.getValueType() == MVT::v4f32) { 7998 EVT MemVT = SN->getMemoryVT(); 7999 unsigned Alignment = SN->getAlignment(); 8000 8001 // If this store is properly aligned, then it is legal. 8002 if (Alignment >= MemVT.getStoreSize()) 8003 return Op; 8004 8005 EVT ScalarVT = Value.getValueType().getScalarType(), 8006 ScalarMemVT = MemVT.getScalarType(); 8007 unsigned Stride = ScalarMemVT.getStoreSize(); 8008 8009 SmallVector<SDValue, 8> Stores; 8010 for (unsigned Idx = 0; Idx < 4; ++Idx) { 8011 SDValue Ex = DAG.getNode( 8012 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 8013 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 8014 SDValue Store; 8015 if (ScalarVT != ScalarMemVT) 8016 Store = 8017 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 8018 SN->getPointerInfo().getWithOffset(Idx*Stride), 8019 ScalarMemVT, SN->isVolatile(), SN->isNonTemporal(), 8020 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 8021 else 8022 Store = 8023 DAG.getStore(StoreChain, dl, Ex, BasePtr, 8024 SN->getPointerInfo().getWithOffset(Idx*Stride), 8025 SN->isVolatile(), SN->isNonTemporal(), 8026 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 8027 8028 if (Idx == 0 && SN->isIndexed()) { 8029 assert(SN->getAddressingMode() == ISD::PRE_INC && 8030 "Unknown addressing mode on vector store"); 8031 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 8032 SN->getAddressingMode()); 8033 } 8034 8035 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8036 DAG.getConstant(Stride, dl, 8037 BasePtr.getValueType())); 8038 Stores.push_back(Store); 8039 } 8040 8041 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8042 8043 if (SN->isIndexed()) { 8044 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 8045 return DAG.getMergeValues(RetOps, dl); 8046 } 8047 8048 return TF; 8049 } 8050 8051 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 8052 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 8053 8054 // The values are now known to be -1 (false) or 1 (true). To convert this 8055 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8056 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8057 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8058 8059 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8060 // understand how to form the extending load. 8061 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8062 8063 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8064 8065 // Now convert to an integer and store. 8066 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8067 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8068 Value); 8069 8070 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 8071 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 8072 MachinePointerInfo PtrInfo = 8073 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8074 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8075 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8076 8077 SmallVector<SDValue, 2> Ops; 8078 Ops.push_back(StoreChain); 8079 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 8080 Ops.push_back(Value); 8081 Ops.push_back(FIdx); 8082 8083 SmallVector<EVT, 2> ValueVTs; 8084 ValueVTs.push_back(MVT::Other); // chain 8085 SDVTList VTs = DAG.getVTList(ValueVTs); 8086 8087 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8088 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8089 8090 // Move data into the byte array. 8091 SmallVector<SDValue, 4> Loads, LoadChains; 8092 for (unsigned i = 0; i < 4; ++i) { 8093 unsigned Offset = 4*i; 8094 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8095 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8096 8097 Loads.push_back(DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 8098 PtrInfo.getWithOffset(Offset), 8099 false, false, false, 0)); 8100 LoadChains.push_back(Loads[i].getValue(1)); 8101 } 8102 8103 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8104 8105 SmallVector<SDValue, 4> Stores; 8106 for (unsigned i = 0; i < 4; ++i) { 8107 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8108 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8109 8110 Stores.push_back(DAG.getTruncStore( 8111 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 8112 MVT::i8 /* memory type */, SN->isNonTemporal(), SN->isVolatile(), 8113 1 /* alignment */, SN->getAAInfo())); 8114 } 8115 8116 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8117 8118 return StoreChain; 8119 } 8120 8121 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 8122 SDLoc dl(Op); 8123 if (Op.getValueType() == MVT::v4i32) { 8124 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8125 8126 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 8127 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 8128 8129 SDValue RHSSwap = // = vrlw RHS, 16 8130 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 8131 8132 // Shrinkify inputs to v8i16. 8133 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 8134 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 8135 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 8136 8137 // Low parts multiplied together, generating 32-bit results (we ignore the 8138 // top parts). 8139 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 8140 LHS, RHS, DAG, dl, MVT::v4i32); 8141 8142 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 8143 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 8144 // Shift the high parts up 16 bits. 8145 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 8146 Neg16, DAG, dl); 8147 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 8148 } else if (Op.getValueType() == MVT::v8i16) { 8149 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8150 8151 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 8152 8153 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 8154 LHS, RHS, Zero, DAG, dl); 8155 } else if (Op.getValueType() == MVT::v16i8) { 8156 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8157 bool isLittleEndian = Subtarget.isLittleEndian(); 8158 8159 // Multiply the even 8-bit parts, producing 16-bit sums. 8160 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 8161 LHS, RHS, DAG, dl, MVT::v8i16); 8162 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 8163 8164 // Multiply the odd 8-bit parts, producing 16-bit sums. 8165 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 8166 LHS, RHS, DAG, dl, MVT::v8i16); 8167 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 8168 8169 // Merge the results together. Because vmuleub and vmuloub are 8170 // instructions with a big-endian bias, we must reverse the 8171 // element numbering and reverse the meaning of "odd" and "even" 8172 // when generating little endian code. 8173 int Ops[16]; 8174 for (unsigned i = 0; i != 8; ++i) { 8175 if (isLittleEndian) { 8176 Ops[i*2 ] = 2*i; 8177 Ops[i*2+1] = 2*i+16; 8178 } else { 8179 Ops[i*2 ] = 2*i+1; 8180 Ops[i*2+1] = 2*i+1+16; 8181 } 8182 } 8183 if (isLittleEndian) 8184 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 8185 else 8186 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 8187 } else { 8188 llvm_unreachable("Unknown mul to lower!"); 8189 } 8190 } 8191 8192 /// LowerOperation - Provide custom lowering hooks for some operations. 8193 /// 8194 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8195 switch (Op.getOpcode()) { 8196 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 8197 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 8198 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 8199 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 8200 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 8201 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 8202 case ISD::SETCC: return LowerSETCC(Op, DAG); 8203 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 8204 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 8205 case ISD::VASTART: 8206 return LowerVASTART(Op, DAG, Subtarget); 8207 8208 case ISD::VAARG: 8209 return LowerVAARG(Op, DAG, Subtarget); 8210 8211 case ISD::VACOPY: 8212 return LowerVACOPY(Op, DAG, Subtarget); 8213 8214 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget); 8215 case ISD::DYNAMIC_STACKALLOC: 8216 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget); 8217 case ISD::GET_DYNAMIC_AREA_OFFSET: return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG, Subtarget); 8218 8219 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 8220 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 8221 8222 case ISD::LOAD: return LowerLOAD(Op, DAG); 8223 case ISD::STORE: return LowerSTORE(Op, DAG); 8224 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 8225 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 8226 case ISD::FP_TO_UINT: 8227 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 8228 SDLoc(Op)); 8229 case ISD::UINT_TO_FP: 8230 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 8231 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 8232 8233 // Lower 64-bit shifts. 8234 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 8235 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 8236 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 8237 8238 // Vector-related lowering. 8239 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 8240 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 8241 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 8242 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 8243 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 8244 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 8245 case ISD::MUL: return LowerMUL(Op, DAG); 8246 8247 // For counter-based loop handling. 8248 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 8249 8250 // Frame & Return address. 8251 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 8252 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 8253 } 8254 } 8255 8256 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 8257 SmallVectorImpl<SDValue>&Results, 8258 SelectionDAG &DAG) const { 8259 SDLoc dl(N); 8260 switch (N->getOpcode()) { 8261 default: 8262 llvm_unreachable("Do not know how to custom type legalize this operation!"); 8263 case ISD::READCYCLECOUNTER: { 8264 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 8265 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 8266 8267 Results.push_back(RTB); 8268 Results.push_back(RTB.getValue(1)); 8269 Results.push_back(RTB.getValue(2)); 8270 break; 8271 } 8272 case ISD::INTRINSIC_W_CHAIN: { 8273 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 8274 Intrinsic::ppc_is_decremented_ctr_nonzero) 8275 break; 8276 8277 assert(N->getValueType(0) == MVT::i1 && 8278 "Unexpected result type for CTR decrement intrinsic"); 8279 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 8280 N->getValueType(0)); 8281 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 8282 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 8283 N->getOperand(1)); 8284 8285 Results.push_back(NewInt); 8286 Results.push_back(NewInt.getValue(1)); 8287 break; 8288 } 8289 case ISD::VAARG: { 8290 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 8291 return; 8292 8293 EVT VT = N->getValueType(0); 8294 8295 if (VT == MVT::i64) { 8296 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget); 8297 8298 Results.push_back(NewNode); 8299 Results.push_back(NewNode.getValue(1)); 8300 } 8301 return; 8302 } 8303 case ISD::FP_ROUND_INREG: { 8304 assert(N->getValueType(0) == MVT::ppcf128); 8305 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 8306 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8307 MVT::f64, N->getOperand(0), 8308 DAG.getIntPtrConstant(0, dl)); 8309 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8310 MVT::f64, N->getOperand(0), 8311 DAG.getIntPtrConstant(1, dl)); 8312 8313 // Add the two halves of the long double in round-to-zero mode. 8314 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 8315 8316 // We know the low half is about to be thrown away, so just use something 8317 // convenient. 8318 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 8319 FPreg, FPreg)); 8320 return; 8321 } 8322 case ISD::FP_TO_SINT: 8323 case ISD::FP_TO_UINT: 8324 // LowerFP_TO_INT() can only handle f32 and f64. 8325 if (N->getOperand(0).getValueType() == MVT::ppcf128) 8326 return; 8327 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 8328 return; 8329 } 8330 } 8331 8332 //===----------------------------------------------------------------------===// 8333 // Other Lowering Code 8334 //===----------------------------------------------------------------------===// 8335 8336 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 8337 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 8338 Function *Func = Intrinsic::getDeclaration(M, Id); 8339 return Builder.CreateCall(Func, {}); 8340 } 8341 8342 // The mappings for emitLeading/TrailingFence is taken from 8343 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 8344 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 8345 AtomicOrdering Ord, bool IsStore, 8346 bool IsLoad) const { 8347 if (Ord == AtomicOrdering::SequentiallyConsistent) 8348 return callIntrinsic(Builder, Intrinsic::ppc_sync); 8349 if (isReleaseOrStronger(Ord)) 8350 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8351 return nullptr; 8352 } 8353 8354 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 8355 AtomicOrdering Ord, bool IsStore, 8356 bool IsLoad) const { 8357 if (IsLoad && isAcquireOrStronger(Ord)) 8358 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8359 // FIXME: this is too conservative, a dependent branch + isync is enough. 8360 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 8361 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 8362 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 8363 return nullptr; 8364 } 8365 8366 MachineBasicBlock * 8367 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 8368 unsigned AtomicSize, 8369 unsigned BinOpcode) const { 8370 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8371 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8372 8373 auto LoadMnemonic = PPC::LDARX; 8374 auto StoreMnemonic = PPC::STDCX; 8375 switch (AtomicSize) { 8376 default: 8377 llvm_unreachable("Unexpected size of atomic entity"); 8378 case 1: 8379 LoadMnemonic = PPC::LBARX; 8380 StoreMnemonic = PPC::STBCX; 8381 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8382 break; 8383 case 2: 8384 LoadMnemonic = PPC::LHARX; 8385 StoreMnemonic = PPC::STHCX; 8386 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8387 break; 8388 case 4: 8389 LoadMnemonic = PPC::LWARX; 8390 StoreMnemonic = PPC::STWCX; 8391 break; 8392 case 8: 8393 LoadMnemonic = PPC::LDARX; 8394 StoreMnemonic = PPC::STDCX; 8395 break; 8396 } 8397 8398 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8399 MachineFunction *F = BB->getParent(); 8400 MachineFunction::iterator It = ++BB->getIterator(); 8401 8402 unsigned dest = MI->getOperand(0).getReg(); 8403 unsigned ptrA = MI->getOperand(1).getReg(); 8404 unsigned ptrB = MI->getOperand(2).getReg(); 8405 unsigned incr = MI->getOperand(3).getReg(); 8406 DebugLoc dl = MI->getDebugLoc(); 8407 8408 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8409 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8410 F->insert(It, loopMBB); 8411 F->insert(It, exitMBB); 8412 exitMBB->splice(exitMBB->begin(), BB, 8413 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8414 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8415 8416 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8417 unsigned TmpReg = (!BinOpcode) ? incr : 8418 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 8419 : &PPC::GPRCRegClass); 8420 8421 // thisMBB: 8422 // ... 8423 // fallthrough --> loopMBB 8424 BB->addSuccessor(loopMBB); 8425 8426 // loopMBB: 8427 // l[wd]arx dest, ptr 8428 // add r0, dest, incr 8429 // st[wd]cx. r0, ptr 8430 // bne- loopMBB 8431 // fallthrough --> exitMBB 8432 BB = loopMBB; 8433 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8434 .addReg(ptrA).addReg(ptrB); 8435 if (BinOpcode) 8436 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 8437 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8438 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 8439 BuildMI(BB, dl, TII->get(PPC::BCC)) 8440 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8441 BB->addSuccessor(loopMBB); 8442 BB->addSuccessor(exitMBB); 8443 8444 // exitMBB: 8445 // ... 8446 BB = exitMBB; 8447 return BB; 8448 } 8449 8450 MachineBasicBlock * 8451 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 8452 MachineBasicBlock *BB, 8453 bool is8bit, // operation 8454 unsigned BinOpcode) const { 8455 // If we support part-word atomic mnemonics, just use them 8456 if (Subtarget.hasPartwordAtomics()) 8457 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode); 8458 8459 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8460 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8461 // In 64 bit mode we have to use 64 bits for addresses, even though the 8462 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 8463 // registers without caring whether they're 32 or 64, but here we're 8464 // doing actual arithmetic on the addresses. 8465 bool is64bit = Subtarget.isPPC64(); 8466 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8467 8468 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8469 MachineFunction *F = BB->getParent(); 8470 MachineFunction::iterator It = ++BB->getIterator(); 8471 8472 unsigned dest = MI->getOperand(0).getReg(); 8473 unsigned ptrA = MI->getOperand(1).getReg(); 8474 unsigned ptrB = MI->getOperand(2).getReg(); 8475 unsigned incr = MI->getOperand(3).getReg(); 8476 DebugLoc dl = MI->getDebugLoc(); 8477 8478 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8479 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8480 F->insert(It, loopMBB); 8481 F->insert(It, exitMBB); 8482 exitMBB->splice(exitMBB->begin(), BB, 8483 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8484 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8485 8486 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8487 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8488 : &PPC::GPRCRegClass; 8489 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8490 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8491 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8492 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 8493 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8494 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8495 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8496 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8497 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 8498 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8499 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8500 unsigned Ptr1Reg; 8501 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 8502 8503 // thisMBB: 8504 // ... 8505 // fallthrough --> loopMBB 8506 BB->addSuccessor(loopMBB); 8507 8508 // The 4-byte load must be aligned, while a char or short may be 8509 // anywhere in the word. Hence all this nasty bookkeeping code. 8510 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8511 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8512 // xori shift, shift1, 24 [16] 8513 // rlwinm ptr, ptr1, 0, 0, 29 8514 // slw incr2, incr, shift 8515 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8516 // slw mask, mask2, shift 8517 // loopMBB: 8518 // lwarx tmpDest, ptr 8519 // add tmp, tmpDest, incr2 8520 // andc tmp2, tmpDest, mask 8521 // and tmp3, tmp, mask 8522 // or tmp4, tmp3, tmp2 8523 // stwcx. tmp4, ptr 8524 // bne- loopMBB 8525 // fallthrough --> exitMBB 8526 // srw dest, tmpDest, shift 8527 if (ptrA != ZeroReg) { 8528 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8529 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8530 .addReg(ptrA).addReg(ptrB); 8531 } else { 8532 Ptr1Reg = ptrB; 8533 } 8534 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8535 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8536 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8537 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8538 if (is64bit) 8539 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8540 .addReg(Ptr1Reg).addImm(0).addImm(61); 8541 else 8542 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8543 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8544 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 8545 .addReg(incr).addReg(ShiftReg); 8546 if (is8bit) 8547 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8548 else { 8549 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8550 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 8551 } 8552 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8553 .addReg(Mask2Reg).addReg(ShiftReg); 8554 8555 BB = loopMBB; 8556 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8557 .addReg(ZeroReg).addReg(PtrReg); 8558 if (BinOpcode) 8559 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 8560 .addReg(Incr2Reg).addReg(TmpDestReg); 8561 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 8562 .addReg(TmpDestReg).addReg(MaskReg); 8563 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 8564 .addReg(TmpReg).addReg(MaskReg); 8565 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 8566 .addReg(Tmp3Reg).addReg(Tmp2Reg); 8567 BuildMI(BB, dl, TII->get(PPC::STWCX)) 8568 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 8569 BuildMI(BB, dl, TII->get(PPC::BCC)) 8570 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8571 BB->addSuccessor(loopMBB); 8572 BB->addSuccessor(exitMBB); 8573 8574 // exitMBB: 8575 // ... 8576 BB = exitMBB; 8577 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 8578 .addReg(ShiftReg); 8579 return BB; 8580 } 8581 8582 llvm::MachineBasicBlock* 8583 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 8584 MachineBasicBlock *MBB) const { 8585 DebugLoc DL = MI->getDebugLoc(); 8586 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8587 8588 MachineFunction *MF = MBB->getParent(); 8589 MachineRegisterInfo &MRI = MF->getRegInfo(); 8590 8591 const BasicBlock *BB = MBB->getBasicBlock(); 8592 MachineFunction::iterator I = ++MBB->getIterator(); 8593 8594 // Memory Reference 8595 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8596 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8597 8598 unsigned DstReg = MI->getOperand(0).getReg(); 8599 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 8600 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 8601 unsigned mainDstReg = MRI.createVirtualRegister(RC); 8602 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 8603 8604 MVT PVT = getPointerTy(MF->getDataLayout()); 8605 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8606 "Invalid Pointer Size!"); 8607 // For v = setjmp(buf), we generate 8608 // 8609 // thisMBB: 8610 // SjLjSetup mainMBB 8611 // bl mainMBB 8612 // v_restore = 1 8613 // b sinkMBB 8614 // 8615 // mainMBB: 8616 // buf[LabelOffset] = LR 8617 // v_main = 0 8618 // 8619 // sinkMBB: 8620 // v = phi(main, restore) 8621 // 8622 8623 MachineBasicBlock *thisMBB = MBB; 8624 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 8625 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 8626 MF->insert(I, mainMBB); 8627 MF->insert(I, sinkMBB); 8628 8629 MachineInstrBuilder MIB; 8630 8631 // Transfer the remainder of BB and its successor edges to sinkMBB. 8632 sinkMBB->splice(sinkMBB->begin(), MBB, 8633 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8634 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 8635 8636 // Note that the structure of the jmp_buf used here is not compatible 8637 // with that used by libc, and is not designed to be. Specifically, it 8638 // stores only those 'reserved' registers that LLVM does not otherwise 8639 // understand how to spill. Also, by convention, by the time this 8640 // intrinsic is called, Clang has already stored the frame address in the 8641 // first slot of the buffer and stack address in the third. Following the 8642 // X86 target code, we'll store the jump address in the second slot. We also 8643 // need to save the TOC pointer (R2) to handle jumps between shared 8644 // libraries, and that will be stored in the fourth slot. The thread 8645 // identifier (R13) is not affected. 8646 8647 // thisMBB: 8648 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8649 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8650 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8651 8652 // Prepare IP either in reg. 8653 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 8654 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 8655 unsigned BufReg = MI->getOperand(1).getReg(); 8656 8657 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 8658 setUsesTOCBasePtr(*MBB->getParent()); 8659 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 8660 .addReg(PPC::X2) 8661 .addImm(TOCOffset) 8662 .addReg(BufReg); 8663 MIB.setMemRefs(MMOBegin, MMOEnd); 8664 } 8665 8666 // Naked functions never have a base pointer, and so we use r1. For all 8667 // other functions, this decision must be delayed until during PEI. 8668 unsigned BaseReg; 8669 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 8670 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 8671 else 8672 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 8673 8674 MIB = BuildMI(*thisMBB, MI, DL, 8675 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 8676 .addReg(BaseReg) 8677 .addImm(BPOffset) 8678 .addReg(BufReg); 8679 MIB.setMemRefs(MMOBegin, MMOEnd); 8680 8681 // Setup 8682 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 8683 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 8684 MIB.addRegMask(TRI->getNoPreservedMask()); 8685 8686 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 8687 8688 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 8689 .addMBB(mainMBB); 8690 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 8691 8692 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 8693 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 8694 8695 // mainMBB: 8696 // mainDstReg = 0 8697 MIB = 8698 BuildMI(mainMBB, DL, 8699 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 8700 8701 // Store IP 8702 if (Subtarget.isPPC64()) { 8703 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 8704 .addReg(LabelReg) 8705 .addImm(LabelOffset) 8706 .addReg(BufReg); 8707 } else { 8708 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 8709 .addReg(LabelReg) 8710 .addImm(LabelOffset) 8711 .addReg(BufReg); 8712 } 8713 8714 MIB.setMemRefs(MMOBegin, MMOEnd); 8715 8716 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 8717 mainMBB->addSuccessor(sinkMBB); 8718 8719 // sinkMBB: 8720 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 8721 TII->get(PPC::PHI), DstReg) 8722 .addReg(mainDstReg).addMBB(mainMBB) 8723 .addReg(restoreDstReg).addMBB(thisMBB); 8724 8725 MI->eraseFromParent(); 8726 return sinkMBB; 8727 } 8728 8729 MachineBasicBlock * 8730 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 8731 MachineBasicBlock *MBB) const { 8732 DebugLoc DL = MI->getDebugLoc(); 8733 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8734 8735 MachineFunction *MF = MBB->getParent(); 8736 MachineRegisterInfo &MRI = MF->getRegInfo(); 8737 8738 // Memory Reference 8739 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8740 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8741 8742 MVT PVT = getPointerTy(MF->getDataLayout()); 8743 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8744 "Invalid Pointer Size!"); 8745 8746 const TargetRegisterClass *RC = 8747 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 8748 unsigned Tmp = MRI.createVirtualRegister(RC); 8749 // Since FP is only updated here but NOT referenced, it's treated as GPR. 8750 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 8751 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 8752 unsigned BP = 8753 (PVT == MVT::i64) 8754 ? PPC::X30 8755 : (Subtarget.isSVR4ABI() && 8756 MF->getTarget().getRelocationModel() == Reloc::PIC_ 8757 ? PPC::R29 8758 : PPC::R30); 8759 8760 MachineInstrBuilder MIB; 8761 8762 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8763 const int64_t SPOffset = 2 * PVT.getStoreSize(); 8764 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8765 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8766 8767 unsigned BufReg = MI->getOperand(0).getReg(); 8768 8769 // Reload FP (the jumped-to function may not have had a 8770 // frame pointer, and if so, then its r31 will be restored 8771 // as necessary). 8772 if (PVT == MVT::i64) { 8773 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 8774 .addImm(0) 8775 .addReg(BufReg); 8776 } else { 8777 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 8778 .addImm(0) 8779 .addReg(BufReg); 8780 } 8781 MIB.setMemRefs(MMOBegin, MMOEnd); 8782 8783 // Reload IP 8784 if (PVT == MVT::i64) { 8785 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 8786 .addImm(LabelOffset) 8787 .addReg(BufReg); 8788 } else { 8789 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 8790 .addImm(LabelOffset) 8791 .addReg(BufReg); 8792 } 8793 MIB.setMemRefs(MMOBegin, MMOEnd); 8794 8795 // Reload SP 8796 if (PVT == MVT::i64) { 8797 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 8798 .addImm(SPOffset) 8799 .addReg(BufReg); 8800 } else { 8801 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 8802 .addImm(SPOffset) 8803 .addReg(BufReg); 8804 } 8805 MIB.setMemRefs(MMOBegin, MMOEnd); 8806 8807 // Reload BP 8808 if (PVT == MVT::i64) { 8809 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 8810 .addImm(BPOffset) 8811 .addReg(BufReg); 8812 } else { 8813 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 8814 .addImm(BPOffset) 8815 .addReg(BufReg); 8816 } 8817 MIB.setMemRefs(MMOBegin, MMOEnd); 8818 8819 // Reload TOC 8820 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 8821 setUsesTOCBasePtr(*MBB->getParent()); 8822 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 8823 .addImm(TOCOffset) 8824 .addReg(BufReg); 8825 8826 MIB.setMemRefs(MMOBegin, MMOEnd); 8827 } 8828 8829 // Jump 8830 BuildMI(*MBB, MI, DL, 8831 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 8832 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 8833 8834 MI->eraseFromParent(); 8835 return MBB; 8836 } 8837 8838 MachineBasicBlock * 8839 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 8840 MachineBasicBlock *BB) const { 8841 if (MI->getOpcode() == TargetOpcode::STACKMAP || 8842 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8843 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 8844 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8845 // Call lowering should have added an r2 operand to indicate a dependence 8846 // on the TOC base pointer value. It can't however, because there is no 8847 // way to mark the dependence as implicit there, and so the stackmap code 8848 // will confuse it with a regular operand. Instead, add the dependence 8849 // here. 8850 setUsesTOCBasePtr(*BB->getParent()); 8851 MI->addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 8852 } 8853 8854 return emitPatchPoint(MI, BB); 8855 } 8856 8857 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 8858 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 8859 return emitEHSjLjSetJmp(MI, BB); 8860 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 8861 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 8862 return emitEHSjLjLongJmp(MI, BB); 8863 } 8864 8865 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8866 8867 // To "insert" these instructions we actually have to insert their 8868 // control-flow patterns. 8869 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8870 MachineFunction::iterator It = ++BB->getIterator(); 8871 8872 MachineFunction *F = BB->getParent(); 8873 8874 if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 8875 MI->getOpcode() == PPC::SELECT_CC_I8 || 8876 MI->getOpcode() == PPC::SELECT_I4 || 8877 MI->getOpcode() == PPC::SELECT_I8)) { 8878 SmallVector<MachineOperand, 2> Cond; 8879 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8880 MI->getOpcode() == PPC::SELECT_CC_I8) 8881 Cond.push_back(MI->getOperand(4)); 8882 else 8883 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 8884 Cond.push_back(MI->getOperand(1)); 8885 8886 DebugLoc dl = MI->getDebugLoc(); 8887 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 8888 Cond, MI->getOperand(2).getReg(), 8889 MI->getOperand(3).getReg()); 8890 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8891 MI->getOpcode() == PPC::SELECT_CC_I8 || 8892 MI->getOpcode() == PPC::SELECT_CC_F4 || 8893 MI->getOpcode() == PPC::SELECT_CC_F8 || 8894 MI->getOpcode() == PPC::SELECT_CC_QFRC || 8895 MI->getOpcode() == PPC::SELECT_CC_QSRC || 8896 MI->getOpcode() == PPC::SELECT_CC_QBRC || 8897 MI->getOpcode() == PPC::SELECT_CC_VRRC || 8898 MI->getOpcode() == PPC::SELECT_CC_VSFRC || 8899 MI->getOpcode() == PPC::SELECT_CC_VSSRC || 8900 MI->getOpcode() == PPC::SELECT_CC_VSRC || 8901 MI->getOpcode() == PPC::SELECT_I4 || 8902 MI->getOpcode() == PPC::SELECT_I8 || 8903 MI->getOpcode() == PPC::SELECT_F4 || 8904 MI->getOpcode() == PPC::SELECT_F8 || 8905 MI->getOpcode() == PPC::SELECT_QFRC || 8906 MI->getOpcode() == PPC::SELECT_QSRC || 8907 MI->getOpcode() == PPC::SELECT_QBRC || 8908 MI->getOpcode() == PPC::SELECT_VRRC || 8909 MI->getOpcode() == PPC::SELECT_VSFRC || 8910 MI->getOpcode() == PPC::SELECT_VSSRC || 8911 MI->getOpcode() == PPC::SELECT_VSRC) { 8912 // The incoming instruction knows the destination vreg to set, the 8913 // condition code register to branch on, the true/false values to 8914 // select between, and a branch opcode to use. 8915 8916 // thisMBB: 8917 // ... 8918 // TrueVal = ... 8919 // cmpTY ccX, r1, r2 8920 // bCC copy1MBB 8921 // fallthrough --> copy0MBB 8922 MachineBasicBlock *thisMBB = BB; 8923 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8924 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8925 DebugLoc dl = MI->getDebugLoc(); 8926 F->insert(It, copy0MBB); 8927 F->insert(It, sinkMBB); 8928 8929 // Transfer the remainder of BB and its successor edges to sinkMBB. 8930 sinkMBB->splice(sinkMBB->begin(), BB, 8931 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8932 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8933 8934 // Next, add the true and fallthrough blocks as its successors. 8935 BB->addSuccessor(copy0MBB); 8936 BB->addSuccessor(sinkMBB); 8937 8938 if (MI->getOpcode() == PPC::SELECT_I4 || 8939 MI->getOpcode() == PPC::SELECT_I8 || 8940 MI->getOpcode() == PPC::SELECT_F4 || 8941 MI->getOpcode() == PPC::SELECT_F8 || 8942 MI->getOpcode() == PPC::SELECT_QFRC || 8943 MI->getOpcode() == PPC::SELECT_QSRC || 8944 MI->getOpcode() == PPC::SELECT_QBRC || 8945 MI->getOpcode() == PPC::SELECT_VRRC || 8946 MI->getOpcode() == PPC::SELECT_VSFRC || 8947 MI->getOpcode() == PPC::SELECT_VSSRC || 8948 MI->getOpcode() == PPC::SELECT_VSRC) { 8949 BuildMI(BB, dl, TII->get(PPC::BC)) 8950 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8951 } else { 8952 unsigned SelectPred = MI->getOperand(4).getImm(); 8953 BuildMI(BB, dl, TII->get(PPC::BCC)) 8954 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8955 } 8956 8957 // copy0MBB: 8958 // %FalseValue = ... 8959 // # fallthrough to sinkMBB 8960 BB = copy0MBB; 8961 8962 // Update machine-CFG edges 8963 BB->addSuccessor(sinkMBB); 8964 8965 // sinkMBB: 8966 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8967 // ... 8968 BB = sinkMBB; 8969 BuildMI(*BB, BB->begin(), dl, 8970 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 8971 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 8972 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 8973 } else if (MI->getOpcode() == PPC::ReadTB) { 8974 // To read the 64-bit time-base register on a 32-bit target, we read the 8975 // two halves. Should the counter have wrapped while it was being read, we 8976 // need to try again. 8977 // ... 8978 // readLoop: 8979 // mfspr Rx,TBU # load from TBU 8980 // mfspr Ry,TB # load from TB 8981 // mfspr Rz,TBU # load from TBU 8982 // cmpw crX,Rx,Rz # check if 'old'='new' 8983 // bne readLoop # branch if they're not equal 8984 // ... 8985 8986 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 8987 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8988 DebugLoc dl = MI->getDebugLoc(); 8989 F->insert(It, readMBB); 8990 F->insert(It, sinkMBB); 8991 8992 // Transfer the remainder of BB and its successor edges to sinkMBB. 8993 sinkMBB->splice(sinkMBB->begin(), BB, 8994 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8995 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8996 8997 BB->addSuccessor(readMBB); 8998 BB = readMBB; 8999 9000 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9001 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9002 unsigned LoReg = MI->getOperand(0).getReg(); 9003 unsigned HiReg = MI->getOperand(1).getReg(); 9004 9005 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 9006 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 9007 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 9008 9009 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9010 9011 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 9012 .addReg(HiReg).addReg(ReadAgainReg); 9013 BuildMI(BB, dl, TII->get(PPC::BCC)) 9014 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 9015 9016 BB->addSuccessor(readMBB); 9017 BB->addSuccessor(sinkMBB); 9018 } 9019 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 9020 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 9021 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 9022 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 9023 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 9024 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 9025 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 9026 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 9027 9028 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 9029 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 9030 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 9031 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 9032 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 9033 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 9034 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 9035 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 9036 9037 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 9038 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 9039 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 9040 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 9041 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 9042 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 9043 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 9044 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 9045 9046 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 9047 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 9048 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 9049 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 9050 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 9051 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 9052 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 9053 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 9054 9055 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 9056 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 9057 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 9058 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 9059 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 9060 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 9061 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 9062 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 9063 9064 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 9065 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 9066 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 9067 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 9068 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 9069 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 9070 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 9071 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 9072 9073 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 9074 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 9075 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 9076 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 9077 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 9078 BB = EmitAtomicBinary(MI, BB, 4, 0); 9079 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 9080 BB = EmitAtomicBinary(MI, BB, 8, 0); 9081 9082 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 9083 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 9084 (Subtarget.hasPartwordAtomics() && 9085 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 9086 (Subtarget.hasPartwordAtomics() && 9087 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 9088 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 9089 9090 auto LoadMnemonic = PPC::LDARX; 9091 auto StoreMnemonic = PPC::STDCX; 9092 switch(MI->getOpcode()) { 9093 default: 9094 llvm_unreachable("Compare and swap of unknown size"); 9095 case PPC::ATOMIC_CMP_SWAP_I8: 9096 LoadMnemonic = PPC::LBARX; 9097 StoreMnemonic = PPC::STBCX; 9098 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9099 break; 9100 case PPC::ATOMIC_CMP_SWAP_I16: 9101 LoadMnemonic = PPC::LHARX; 9102 StoreMnemonic = PPC::STHCX; 9103 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9104 break; 9105 case PPC::ATOMIC_CMP_SWAP_I32: 9106 LoadMnemonic = PPC::LWARX; 9107 StoreMnemonic = PPC::STWCX; 9108 break; 9109 case PPC::ATOMIC_CMP_SWAP_I64: 9110 LoadMnemonic = PPC::LDARX; 9111 StoreMnemonic = PPC::STDCX; 9112 break; 9113 } 9114 unsigned dest = MI->getOperand(0).getReg(); 9115 unsigned ptrA = MI->getOperand(1).getReg(); 9116 unsigned ptrB = MI->getOperand(2).getReg(); 9117 unsigned oldval = MI->getOperand(3).getReg(); 9118 unsigned newval = MI->getOperand(4).getReg(); 9119 DebugLoc dl = MI->getDebugLoc(); 9120 9121 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9122 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9123 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9124 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9125 F->insert(It, loop1MBB); 9126 F->insert(It, loop2MBB); 9127 F->insert(It, midMBB); 9128 F->insert(It, exitMBB); 9129 exitMBB->splice(exitMBB->begin(), BB, 9130 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9131 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9132 9133 // thisMBB: 9134 // ... 9135 // fallthrough --> loopMBB 9136 BB->addSuccessor(loop1MBB); 9137 9138 // loop1MBB: 9139 // l[bhwd]arx dest, ptr 9140 // cmp[wd] dest, oldval 9141 // bne- midMBB 9142 // loop2MBB: 9143 // st[bhwd]cx. newval, ptr 9144 // bne- loopMBB 9145 // b exitBB 9146 // midMBB: 9147 // st[bhwd]cx. dest, ptr 9148 // exitBB: 9149 BB = loop1MBB; 9150 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9151 .addReg(ptrA).addReg(ptrB); 9152 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 9153 .addReg(oldval).addReg(dest); 9154 BuildMI(BB, dl, TII->get(PPC::BCC)) 9155 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9156 BB->addSuccessor(loop2MBB); 9157 BB->addSuccessor(midMBB); 9158 9159 BB = loop2MBB; 9160 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9161 .addReg(newval).addReg(ptrA).addReg(ptrB); 9162 BuildMI(BB, dl, TII->get(PPC::BCC)) 9163 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9164 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9165 BB->addSuccessor(loop1MBB); 9166 BB->addSuccessor(exitMBB); 9167 9168 BB = midMBB; 9169 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9170 .addReg(dest).addReg(ptrA).addReg(ptrB); 9171 BB->addSuccessor(exitMBB); 9172 9173 // exitMBB: 9174 // ... 9175 BB = exitMBB; 9176 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 9177 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 9178 // We must use 64-bit registers for addresses when targeting 64-bit, 9179 // since we're actually doing arithmetic on them. Other registers 9180 // can be 32-bit. 9181 bool is64bit = Subtarget.isPPC64(); 9182 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 9183 9184 unsigned dest = MI->getOperand(0).getReg(); 9185 unsigned ptrA = MI->getOperand(1).getReg(); 9186 unsigned ptrB = MI->getOperand(2).getReg(); 9187 unsigned oldval = MI->getOperand(3).getReg(); 9188 unsigned newval = MI->getOperand(4).getReg(); 9189 DebugLoc dl = MI->getDebugLoc(); 9190 9191 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9192 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9193 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9194 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9195 F->insert(It, loop1MBB); 9196 F->insert(It, loop2MBB); 9197 F->insert(It, midMBB); 9198 F->insert(It, exitMBB); 9199 exitMBB->splice(exitMBB->begin(), BB, 9200 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9201 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9202 9203 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9204 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9205 : &PPC::GPRCRegClass; 9206 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9207 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9208 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 9209 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 9210 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 9211 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 9212 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 9213 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9214 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9215 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9216 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9217 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9218 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9219 unsigned Ptr1Reg; 9220 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 9221 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9222 // thisMBB: 9223 // ... 9224 // fallthrough --> loopMBB 9225 BB->addSuccessor(loop1MBB); 9226 9227 // The 4-byte load must be aligned, while a char or short may be 9228 // anywhere in the word. Hence all this nasty bookkeeping code. 9229 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9230 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9231 // xori shift, shift1, 24 [16] 9232 // rlwinm ptr, ptr1, 0, 0, 29 9233 // slw newval2, newval, shift 9234 // slw oldval2, oldval,shift 9235 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9236 // slw mask, mask2, shift 9237 // and newval3, newval2, mask 9238 // and oldval3, oldval2, mask 9239 // loop1MBB: 9240 // lwarx tmpDest, ptr 9241 // and tmp, tmpDest, mask 9242 // cmpw tmp, oldval3 9243 // bne- midMBB 9244 // loop2MBB: 9245 // andc tmp2, tmpDest, mask 9246 // or tmp4, tmp2, newval3 9247 // stwcx. tmp4, ptr 9248 // bne- loop1MBB 9249 // b exitBB 9250 // midMBB: 9251 // stwcx. tmpDest, ptr 9252 // exitBB: 9253 // srw dest, tmpDest, shift 9254 if (ptrA != ZeroReg) { 9255 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9256 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9257 .addReg(ptrA).addReg(ptrB); 9258 } else { 9259 Ptr1Reg = ptrB; 9260 } 9261 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9262 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9263 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9264 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9265 if (is64bit) 9266 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9267 .addReg(Ptr1Reg).addImm(0).addImm(61); 9268 else 9269 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9270 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9271 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 9272 .addReg(newval).addReg(ShiftReg); 9273 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 9274 .addReg(oldval).addReg(ShiftReg); 9275 if (is8bit) 9276 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9277 else { 9278 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9279 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 9280 .addReg(Mask3Reg).addImm(65535); 9281 } 9282 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9283 .addReg(Mask2Reg).addReg(ShiftReg); 9284 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 9285 .addReg(NewVal2Reg).addReg(MaskReg); 9286 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 9287 .addReg(OldVal2Reg).addReg(MaskReg); 9288 9289 BB = loop1MBB; 9290 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9291 .addReg(ZeroReg).addReg(PtrReg); 9292 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 9293 .addReg(TmpDestReg).addReg(MaskReg); 9294 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 9295 .addReg(TmpReg).addReg(OldVal3Reg); 9296 BuildMI(BB, dl, TII->get(PPC::BCC)) 9297 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9298 BB->addSuccessor(loop2MBB); 9299 BB->addSuccessor(midMBB); 9300 9301 BB = loop2MBB; 9302 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 9303 .addReg(TmpDestReg).addReg(MaskReg); 9304 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 9305 .addReg(Tmp2Reg).addReg(NewVal3Reg); 9306 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 9307 .addReg(ZeroReg).addReg(PtrReg); 9308 BuildMI(BB, dl, TII->get(PPC::BCC)) 9309 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9310 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9311 BB->addSuccessor(loop1MBB); 9312 BB->addSuccessor(exitMBB); 9313 9314 BB = midMBB; 9315 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 9316 .addReg(ZeroReg).addReg(PtrReg); 9317 BB->addSuccessor(exitMBB); 9318 9319 // exitMBB: 9320 // ... 9321 BB = exitMBB; 9322 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 9323 .addReg(ShiftReg); 9324 } else if (MI->getOpcode() == PPC::FADDrtz) { 9325 // This pseudo performs an FADD with rounding mode temporarily forced 9326 // to round-to-zero. We emit this via custom inserter since the FPSCR 9327 // is not modeled at the SelectionDAG level. 9328 unsigned Dest = MI->getOperand(0).getReg(); 9329 unsigned Src1 = MI->getOperand(1).getReg(); 9330 unsigned Src2 = MI->getOperand(2).getReg(); 9331 DebugLoc dl = MI->getDebugLoc(); 9332 9333 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9334 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 9335 9336 // Save FPSCR value. 9337 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 9338 9339 // Set rounding mode to round-to-zero. 9340 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 9341 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 9342 9343 // Perform addition. 9344 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 9345 9346 // Restore FPSCR value. 9347 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 9348 } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9349 MI->getOpcode() == PPC::ANDIo_1_GT_BIT || 9350 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9351 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) { 9352 unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9353 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ? 9354 PPC::ANDIo8 : PPC::ANDIo; 9355 bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9356 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8); 9357 9358 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9359 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 9360 &PPC::GPRCRegClass : 9361 &PPC::G8RCRegClass); 9362 9363 DebugLoc dl = MI->getDebugLoc(); 9364 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 9365 .addReg(MI->getOperand(1).getReg()).addImm(1); 9366 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 9367 MI->getOperand(0).getReg()) 9368 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 9369 } else if (MI->getOpcode() == PPC::TCHECK_RET) { 9370 DebugLoc Dl = MI->getDebugLoc(); 9371 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9372 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9373 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 9374 return BB; 9375 } else { 9376 llvm_unreachable("Unexpected instr type to insert"); 9377 } 9378 9379 MI->eraseFromParent(); // The pseudo instruction is gone now. 9380 return BB; 9381 } 9382 9383 //===----------------------------------------------------------------------===// 9384 // Target Optimization Hooks 9385 //===----------------------------------------------------------------------===// 9386 9387 static std::string getRecipOp(const char *Base, EVT VT) { 9388 std::string RecipOp(Base); 9389 if (VT.getScalarType() == MVT::f64) 9390 RecipOp += "d"; 9391 else 9392 RecipOp += "f"; 9393 9394 if (VT.isVector()) 9395 RecipOp = "vec-" + RecipOp; 9396 9397 return RecipOp; 9398 } 9399 9400 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand, 9401 DAGCombinerInfo &DCI, 9402 unsigned &RefinementSteps, 9403 bool &UseOneConstNR) const { 9404 EVT VT = Operand.getValueType(); 9405 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 9406 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 9407 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9408 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9409 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9410 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9411 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9412 std::string RecipOp = getRecipOp("sqrt", VT); 9413 if (!Recips.isEnabled(RecipOp)) 9414 return SDValue(); 9415 9416 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9417 UseOneConstNR = true; 9418 return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 9419 } 9420 return SDValue(); 9421 } 9422 9423 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, 9424 DAGCombinerInfo &DCI, 9425 unsigned &RefinementSteps) const { 9426 EVT VT = Operand.getValueType(); 9427 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 9428 (VT == MVT::f64 && Subtarget.hasFRE()) || 9429 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9430 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9431 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9432 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9433 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9434 std::string RecipOp = getRecipOp("div", VT); 9435 if (!Recips.isEnabled(RecipOp)) 9436 return SDValue(); 9437 9438 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9439 return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 9440 } 9441 return SDValue(); 9442 } 9443 9444 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 9445 // Note: This functionality is used only when unsafe-fp-math is enabled, and 9446 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 9447 // enabled for division), this functionality is redundant with the default 9448 // combiner logic (once the division -> reciprocal/multiply transformation 9449 // has taken place). As a result, this matters more for older cores than for 9450 // newer ones. 9451 9452 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9453 // reciprocal if there are two or more FDIVs (for embedded cores with only 9454 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 9455 switch (Subtarget.getDarwinDirective()) { 9456 default: 9457 return 3; 9458 case PPC::DIR_440: 9459 case PPC::DIR_A2: 9460 case PPC::DIR_E500mc: 9461 case PPC::DIR_E5500: 9462 return 2; 9463 } 9464 } 9465 9466 // isConsecutiveLSLoc needs to work even if all adds have not yet been 9467 // collapsed, and so we need to look through chains of them. 9468 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 9469 int64_t& Offset, SelectionDAG &DAG) { 9470 if (DAG.isBaseWithConstantOffset(Loc)) { 9471 Base = Loc.getOperand(0); 9472 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 9473 9474 // The base might itself be a base plus an offset, and if so, accumulate 9475 // that as well. 9476 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 9477 } 9478 } 9479 9480 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 9481 unsigned Bytes, int Dist, 9482 SelectionDAG &DAG) { 9483 if (VT.getSizeInBits() / 8 != Bytes) 9484 return false; 9485 9486 SDValue BaseLoc = Base->getBasePtr(); 9487 if (Loc.getOpcode() == ISD::FrameIndex) { 9488 if (BaseLoc.getOpcode() != ISD::FrameIndex) 9489 return false; 9490 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9491 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 9492 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 9493 int FS = MFI->getObjectSize(FI); 9494 int BFS = MFI->getObjectSize(BFI); 9495 if (FS != BFS || FS != (int)Bytes) return false; 9496 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 9497 } 9498 9499 SDValue Base1 = Loc, Base2 = BaseLoc; 9500 int64_t Offset1 = 0, Offset2 = 0; 9501 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 9502 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 9503 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 9504 return true; 9505 9506 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9507 const GlobalValue *GV1 = nullptr; 9508 const GlobalValue *GV2 = nullptr; 9509 Offset1 = 0; 9510 Offset2 = 0; 9511 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 9512 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 9513 if (isGA1 && isGA2 && GV1 == GV2) 9514 return Offset1 == (Offset2 + Dist*Bytes); 9515 return false; 9516 } 9517 9518 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 9519 // not enforce equality of the chain operands. 9520 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 9521 unsigned Bytes, int Dist, 9522 SelectionDAG &DAG) { 9523 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 9524 EVT VT = LS->getMemoryVT(); 9525 SDValue Loc = LS->getBasePtr(); 9526 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 9527 } 9528 9529 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 9530 EVT VT; 9531 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9532 default: return false; 9533 case Intrinsic::ppc_qpx_qvlfd: 9534 case Intrinsic::ppc_qpx_qvlfda: 9535 VT = MVT::v4f64; 9536 break; 9537 case Intrinsic::ppc_qpx_qvlfs: 9538 case Intrinsic::ppc_qpx_qvlfsa: 9539 VT = MVT::v4f32; 9540 break; 9541 case Intrinsic::ppc_qpx_qvlfcd: 9542 case Intrinsic::ppc_qpx_qvlfcda: 9543 VT = MVT::v2f64; 9544 break; 9545 case Intrinsic::ppc_qpx_qvlfcs: 9546 case Intrinsic::ppc_qpx_qvlfcsa: 9547 VT = MVT::v2f32; 9548 break; 9549 case Intrinsic::ppc_qpx_qvlfiwa: 9550 case Intrinsic::ppc_qpx_qvlfiwz: 9551 case Intrinsic::ppc_altivec_lvx: 9552 case Intrinsic::ppc_altivec_lvxl: 9553 case Intrinsic::ppc_vsx_lxvw4x: 9554 VT = MVT::v4i32; 9555 break; 9556 case Intrinsic::ppc_vsx_lxvd2x: 9557 VT = MVT::v2f64; 9558 break; 9559 case Intrinsic::ppc_altivec_lvebx: 9560 VT = MVT::i8; 9561 break; 9562 case Intrinsic::ppc_altivec_lvehx: 9563 VT = MVT::i16; 9564 break; 9565 case Intrinsic::ppc_altivec_lvewx: 9566 VT = MVT::i32; 9567 break; 9568 } 9569 9570 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 9571 } 9572 9573 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 9574 EVT VT; 9575 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9576 default: return false; 9577 case Intrinsic::ppc_qpx_qvstfd: 9578 case Intrinsic::ppc_qpx_qvstfda: 9579 VT = MVT::v4f64; 9580 break; 9581 case Intrinsic::ppc_qpx_qvstfs: 9582 case Intrinsic::ppc_qpx_qvstfsa: 9583 VT = MVT::v4f32; 9584 break; 9585 case Intrinsic::ppc_qpx_qvstfcd: 9586 case Intrinsic::ppc_qpx_qvstfcda: 9587 VT = MVT::v2f64; 9588 break; 9589 case Intrinsic::ppc_qpx_qvstfcs: 9590 case Intrinsic::ppc_qpx_qvstfcsa: 9591 VT = MVT::v2f32; 9592 break; 9593 case Intrinsic::ppc_qpx_qvstfiw: 9594 case Intrinsic::ppc_qpx_qvstfiwa: 9595 case Intrinsic::ppc_altivec_stvx: 9596 case Intrinsic::ppc_altivec_stvxl: 9597 case Intrinsic::ppc_vsx_stxvw4x: 9598 VT = MVT::v4i32; 9599 break; 9600 case Intrinsic::ppc_vsx_stxvd2x: 9601 VT = MVT::v2f64; 9602 break; 9603 case Intrinsic::ppc_altivec_stvebx: 9604 VT = MVT::i8; 9605 break; 9606 case Intrinsic::ppc_altivec_stvehx: 9607 VT = MVT::i16; 9608 break; 9609 case Intrinsic::ppc_altivec_stvewx: 9610 VT = MVT::i32; 9611 break; 9612 } 9613 9614 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 9615 } 9616 9617 return false; 9618 } 9619 9620 // Return true is there is a nearyby consecutive load to the one provided 9621 // (regardless of alignment). We search up and down the chain, looking though 9622 // token factors and other loads (but nothing else). As a result, a true result 9623 // indicates that it is safe to create a new consecutive load adjacent to the 9624 // load provided. 9625 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 9626 SDValue Chain = LD->getChain(); 9627 EVT VT = LD->getMemoryVT(); 9628 9629 SmallSet<SDNode *, 16> LoadRoots; 9630 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 9631 SmallSet<SDNode *, 16> Visited; 9632 9633 // First, search up the chain, branching to follow all token-factor operands. 9634 // If we find a consecutive load, then we're done, otherwise, record all 9635 // nodes just above the top-level loads and token factors. 9636 while (!Queue.empty()) { 9637 SDNode *ChainNext = Queue.pop_back_val(); 9638 if (!Visited.insert(ChainNext).second) 9639 continue; 9640 9641 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 9642 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9643 return true; 9644 9645 if (!Visited.count(ChainLD->getChain().getNode())) 9646 Queue.push_back(ChainLD->getChain().getNode()); 9647 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 9648 for (const SDUse &O : ChainNext->ops()) 9649 if (!Visited.count(O.getNode())) 9650 Queue.push_back(O.getNode()); 9651 } else 9652 LoadRoots.insert(ChainNext); 9653 } 9654 9655 // Second, search down the chain, starting from the top-level nodes recorded 9656 // in the first phase. These top-level nodes are the nodes just above all 9657 // loads and token factors. Starting with their uses, recursively look though 9658 // all loads (just the chain uses) and token factors to find a consecutive 9659 // load. 9660 Visited.clear(); 9661 Queue.clear(); 9662 9663 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 9664 IE = LoadRoots.end(); I != IE; ++I) { 9665 Queue.push_back(*I); 9666 9667 while (!Queue.empty()) { 9668 SDNode *LoadRoot = Queue.pop_back_val(); 9669 if (!Visited.insert(LoadRoot).second) 9670 continue; 9671 9672 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 9673 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9674 return true; 9675 9676 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 9677 UE = LoadRoot->use_end(); UI != UE; ++UI) 9678 if (((isa<MemSDNode>(*UI) && 9679 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 9680 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 9681 Queue.push_back(*UI); 9682 } 9683 } 9684 9685 return false; 9686 } 9687 9688 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 9689 DAGCombinerInfo &DCI) const { 9690 SelectionDAG &DAG = DCI.DAG; 9691 SDLoc dl(N); 9692 9693 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 9694 // If we're tracking CR bits, we need to be careful that we don't have: 9695 // trunc(binary-ops(zext(x), zext(y))) 9696 // or 9697 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 9698 // such that we're unnecessarily moving things into GPRs when it would be 9699 // better to keep them in CR bits. 9700 9701 // Note that trunc here can be an actual i1 trunc, or can be the effective 9702 // truncation that comes from a setcc or select_cc. 9703 if (N->getOpcode() == ISD::TRUNCATE && 9704 N->getValueType(0) != MVT::i1) 9705 return SDValue(); 9706 9707 if (N->getOperand(0).getValueType() != MVT::i32 && 9708 N->getOperand(0).getValueType() != MVT::i64) 9709 return SDValue(); 9710 9711 if (N->getOpcode() == ISD::SETCC || 9712 N->getOpcode() == ISD::SELECT_CC) { 9713 // If we're looking at a comparison, then we need to make sure that the 9714 // high bits (all except for the first) don't matter the result. 9715 ISD::CondCode CC = 9716 cast<CondCodeSDNode>(N->getOperand( 9717 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 9718 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 9719 9720 if (ISD::isSignedIntSetCC(CC)) { 9721 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 9722 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 9723 return SDValue(); 9724 } else if (ISD::isUnsignedIntSetCC(CC)) { 9725 if (!DAG.MaskedValueIsZero(N->getOperand(0), 9726 APInt::getHighBitsSet(OpBits, OpBits-1)) || 9727 !DAG.MaskedValueIsZero(N->getOperand(1), 9728 APInt::getHighBitsSet(OpBits, OpBits-1))) 9729 return SDValue(); 9730 } else { 9731 // This is neither a signed nor an unsigned comparison, just make sure 9732 // that the high bits are equal. 9733 APInt Op1Zero, Op1One; 9734 APInt Op2Zero, Op2One; 9735 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 9736 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 9737 9738 // We don't really care about what is known about the first bit (if 9739 // anything), so clear it in all masks prior to comparing them. 9740 Op1Zero.clearBit(0); Op1One.clearBit(0); 9741 Op2Zero.clearBit(0); Op2One.clearBit(0); 9742 9743 if (Op1Zero != Op2Zero || Op1One != Op2One) 9744 return SDValue(); 9745 } 9746 } 9747 9748 // We now know that the higher-order bits are irrelevant, we just need to 9749 // make sure that all of the intermediate operations are bit operations, and 9750 // all inputs are extensions. 9751 if (N->getOperand(0).getOpcode() != ISD::AND && 9752 N->getOperand(0).getOpcode() != ISD::OR && 9753 N->getOperand(0).getOpcode() != ISD::XOR && 9754 N->getOperand(0).getOpcode() != ISD::SELECT && 9755 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 9756 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 9757 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 9758 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 9759 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 9760 return SDValue(); 9761 9762 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 9763 N->getOperand(1).getOpcode() != ISD::AND && 9764 N->getOperand(1).getOpcode() != ISD::OR && 9765 N->getOperand(1).getOpcode() != ISD::XOR && 9766 N->getOperand(1).getOpcode() != ISD::SELECT && 9767 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 9768 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 9769 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 9770 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 9771 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 9772 return SDValue(); 9773 9774 SmallVector<SDValue, 4> Inputs; 9775 SmallVector<SDValue, 8> BinOps, PromOps; 9776 SmallPtrSet<SDNode *, 16> Visited; 9777 9778 for (unsigned i = 0; i < 2; ++i) { 9779 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9780 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9781 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9782 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9783 isa<ConstantSDNode>(N->getOperand(i))) 9784 Inputs.push_back(N->getOperand(i)); 9785 else 9786 BinOps.push_back(N->getOperand(i)); 9787 9788 if (N->getOpcode() == ISD::TRUNCATE) 9789 break; 9790 } 9791 9792 // Visit all inputs, collect all binary operations (and, or, xor and 9793 // select) that are all fed by extensions. 9794 while (!BinOps.empty()) { 9795 SDValue BinOp = BinOps.back(); 9796 BinOps.pop_back(); 9797 9798 if (!Visited.insert(BinOp.getNode()).second) 9799 continue; 9800 9801 PromOps.push_back(BinOp); 9802 9803 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9804 // The condition of the select is not promoted. 9805 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9806 continue; 9807 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9808 continue; 9809 9810 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9811 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9812 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9813 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9814 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9815 Inputs.push_back(BinOp.getOperand(i)); 9816 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9817 BinOp.getOperand(i).getOpcode() == ISD::OR || 9818 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9819 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9820 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 9821 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9822 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9823 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9824 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 9825 BinOps.push_back(BinOp.getOperand(i)); 9826 } else { 9827 // We have an input that is not an extension or another binary 9828 // operation; we'll abort this transformation. 9829 return SDValue(); 9830 } 9831 } 9832 } 9833 9834 // Make sure that this is a self-contained cluster of operations (which 9835 // is not quite the same thing as saying that everything has only one 9836 // use). 9837 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9838 if (isa<ConstantSDNode>(Inputs[i])) 9839 continue; 9840 9841 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9842 UE = Inputs[i].getNode()->use_end(); 9843 UI != UE; ++UI) { 9844 SDNode *User = *UI; 9845 if (User != N && !Visited.count(User)) 9846 return SDValue(); 9847 9848 // Make sure that we're not going to promote the non-output-value 9849 // operand(s) or SELECT or SELECT_CC. 9850 // FIXME: Although we could sometimes handle this, and it does occur in 9851 // practice that one of the condition inputs to the select is also one of 9852 // the outputs, we currently can't deal with this. 9853 if (User->getOpcode() == ISD::SELECT) { 9854 if (User->getOperand(0) == Inputs[i]) 9855 return SDValue(); 9856 } else if (User->getOpcode() == ISD::SELECT_CC) { 9857 if (User->getOperand(0) == Inputs[i] || 9858 User->getOperand(1) == Inputs[i]) 9859 return SDValue(); 9860 } 9861 } 9862 } 9863 9864 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9865 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9866 UE = PromOps[i].getNode()->use_end(); 9867 UI != UE; ++UI) { 9868 SDNode *User = *UI; 9869 if (User != N && !Visited.count(User)) 9870 return SDValue(); 9871 9872 // Make sure that we're not going to promote the non-output-value 9873 // operand(s) or SELECT or SELECT_CC. 9874 // FIXME: Although we could sometimes handle this, and it does occur in 9875 // practice that one of the condition inputs to the select is also one of 9876 // the outputs, we currently can't deal with this. 9877 if (User->getOpcode() == ISD::SELECT) { 9878 if (User->getOperand(0) == PromOps[i]) 9879 return SDValue(); 9880 } else if (User->getOpcode() == ISD::SELECT_CC) { 9881 if (User->getOperand(0) == PromOps[i] || 9882 User->getOperand(1) == PromOps[i]) 9883 return SDValue(); 9884 } 9885 } 9886 } 9887 9888 // Replace all inputs with the extension operand. 9889 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9890 // Constants may have users outside the cluster of to-be-promoted nodes, 9891 // and so we need to replace those as we do the promotions. 9892 if (isa<ConstantSDNode>(Inputs[i])) 9893 continue; 9894 else 9895 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 9896 } 9897 9898 // Replace all operations (these are all the same, but have a different 9899 // (i1) return type). DAG.getNode will validate that the types of 9900 // a binary operator match, so go through the list in reverse so that 9901 // we've likely promoted both operands first. Any intermediate truncations or 9902 // extensions disappear. 9903 while (!PromOps.empty()) { 9904 SDValue PromOp = PromOps.back(); 9905 PromOps.pop_back(); 9906 9907 if (PromOp.getOpcode() == ISD::TRUNCATE || 9908 PromOp.getOpcode() == ISD::SIGN_EXTEND || 9909 PromOp.getOpcode() == ISD::ZERO_EXTEND || 9910 PromOp.getOpcode() == ISD::ANY_EXTEND) { 9911 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 9912 PromOp.getOperand(0).getValueType() != MVT::i1) { 9913 // The operand is not yet ready (see comment below). 9914 PromOps.insert(PromOps.begin(), PromOp); 9915 continue; 9916 } 9917 9918 SDValue RepValue = PromOp.getOperand(0); 9919 if (isa<ConstantSDNode>(RepValue)) 9920 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 9921 9922 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 9923 continue; 9924 } 9925 9926 unsigned C; 9927 switch (PromOp.getOpcode()) { 9928 default: C = 0; break; 9929 case ISD::SELECT: C = 1; break; 9930 case ISD::SELECT_CC: C = 2; break; 9931 } 9932 9933 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9934 PromOp.getOperand(C).getValueType() != MVT::i1) || 9935 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9936 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 9937 // The to-be-promoted operands of this node have not yet been 9938 // promoted (this should be rare because we're going through the 9939 // list backward, but if one of the operands has several users in 9940 // this cluster of to-be-promoted nodes, it is possible). 9941 PromOps.insert(PromOps.begin(), PromOp); 9942 continue; 9943 } 9944 9945 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9946 PromOp.getNode()->op_end()); 9947 9948 // If there are any constant inputs, make sure they're replaced now. 9949 for (unsigned i = 0; i < 2; ++i) 9950 if (isa<ConstantSDNode>(Ops[C+i])) 9951 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 9952 9953 DAG.ReplaceAllUsesOfValueWith(PromOp, 9954 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 9955 } 9956 9957 // Now we're left with the initial truncation itself. 9958 if (N->getOpcode() == ISD::TRUNCATE) 9959 return N->getOperand(0); 9960 9961 // Otherwise, this is a comparison. The operands to be compared have just 9962 // changed type (to i1), but everything else is the same. 9963 return SDValue(N, 0); 9964 } 9965 9966 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 9967 DAGCombinerInfo &DCI) const { 9968 SelectionDAG &DAG = DCI.DAG; 9969 SDLoc dl(N); 9970 9971 // If we're tracking CR bits, we need to be careful that we don't have: 9972 // zext(binary-ops(trunc(x), trunc(y))) 9973 // or 9974 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 9975 // such that we're unnecessarily moving things into CR bits that can more 9976 // efficiently stay in GPRs. Note that if we're not certain that the high 9977 // bits are set as required by the final extension, we still may need to do 9978 // some masking to get the proper behavior. 9979 9980 // This same functionality is important on PPC64 when dealing with 9981 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 9982 // the return values of functions. Because it is so similar, it is handled 9983 // here as well. 9984 9985 if (N->getValueType(0) != MVT::i32 && 9986 N->getValueType(0) != MVT::i64) 9987 return SDValue(); 9988 9989 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 9990 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 9991 return SDValue(); 9992 9993 if (N->getOperand(0).getOpcode() != ISD::AND && 9994 N->getOperand(0).getOpcode() != ISD::OR && 9995 N->getOperand(0).getOpcode() != ISD::XOR && 9996 N->getOperand(0).getOpcode() != ISD::SELECT && 9997 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 9998 return SDValue(); 9999 10000 SmallVector<SDValue, 4> Inputs; 10001 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 10002 SmallPtrSet<SDNode *, 16> Visited; 10003 10004 // Visit all inputs, collect all binary operations (and, or, xor and 10005 // select) that are all fed by truncations. 10006 while (!BinOps.empty()) { 10007 SDValue BinOp = BinOps.back(); 10008 BinOps.pop_back(); 10009 10010 if (!Visited.insert(BinOp.getNode()).second) 10011 continue; 10012 10013 PromOps.push_back(BinOp); 10014 10015 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 10016 // The condition of the select is not promoted. 10017 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 10018 continue; 10019 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 10020 continue; 10021 10022 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 10023 isa<ConstantSDNode>(BinOp.getOperand(i))) { 10024 Inputs.push_back(BinOp.getOperand(i)); 10025 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 10026 BinOp.getOperand(i).getOpcode() == ISD::OR || 10027 BinOp.getOperand(i).getOpcode() == ISD::XOR || 10028 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 10029 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 10030 BinOps.push_back(BinOp.getOperand(i)); 10031 } else { 10032 // We have an input that is not a truncation or another binary 10033 // operation; we'll abort this transformation. 10034 return SDValue(); 10035 } 10036 } 10037 } 10038 10039 // The operands of a select that must be truncated when the select is 10040 // promoted because the operand is actually part of the to-be-promoted set. 10041 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 10042 10043 // Make sure that this is a self-contained cluster of operations (which 10044 // is not quite the same thing as saying that everything has only one 10045 // use). 10046 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10047 if (isa<ConstantSDNode>(Inputs[i])) 10048 continue; 10049 10050 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 10051 UE = Inputs[i].getNode()->use_end(); 10052 UI != UE; ++UI) { 10053 SDNode *User = *UI; 10054 if (User != N && !Visited.count(User)) 10055 return SDValue(); 10056 10057 // If we're going to promote the non-output-value operand(s) or SELECT or 10058 // SELECT_CC, record them for truncation. 10059 if (User->getOpcode() == ISD::SELECT) { 10060 if (User->getOperand(0) == Inputs[i]) 10061 SelectTruncOp[0].insert(std::make_pair(User, 10062 User->getOperand(0).getValueType())); 10063 } else if (User->getOpcode() == ISD::SELECT_CC) { 10064 if (User->getOperand(0) == Inputs[i]) 10065 SelectTruncOp[0].insert(std::make_pair(User, 10066 User->getOperand(0).getValueType())); 10067 if (User->getOperand(1) == Inputs[i]) 10068 SelectTruncOp[1].insert(std::make_pair(User, 10069 User->getOperand(1).getValueType())); 10070 } 10071 } 10072 } 10073 10074 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 10075 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 10076 UE = PromOps[i].getNode()->use_end(); 10077 UI != UE; ++UI) { 10078 SDNode *User = *UI; 10079 if (User != N && !Visited.count(User)) 10080 return SDValue(); 10081 10082 // If we're going to promote the non-output-value operand(s) or SELECT or 10083 // SELECT_CC, record them for truncation. 10084 if (User->getOpcode() == ISD::SELECT) { 10085 if (User->getOperand(0) == PromOps[i]) 10086 SelectTruncOp[0].insert(std::make_pair(User, 10087 User->getOperand(0).getValueType())); 10088 } else if (User->getOpcode() == ISD::SELECT_CC) { 10089 if (User->getOperand(0) == PromOps[i]) 10090 SelectTruncOp[0].insert(std::make_pair(User, 10091 User->getOperand(0).getValueType())); 10092 if (User->getOperand(1) == PromOps[i]) 10093 SelectTruncOp[1].insert(std::make_pair(User, 10094 User->getOperand(1).getValueType())); 10095 } 10096 } 10097 } 10098 10099 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 10100 bool ReallyNeedsExt = false; 10101 if (N->getOpcode() != ISD::ANY_EXTEND) { 10102 // If all of the inputs are not already sign/zero extended, then 10103 // we'll still need to do that at the end. 10104 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10105 if (isa<ConstantSDNode>(Inputs[i])) 10106 continue; 10107 10108 unsigned OpBits = 10109 Inputs[i].getOperand(0).getValueSizeInBits(); 10110 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 10111 10112 if ((N->getOpcode() == ISD::ZERO_EXTEND && 10113 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 10114 APInt::getHighBitsSet(OpBits, 10115 OpBits-PromBits))) || 10116 (N->getOpcode() == ISD::SIGN_EXTEND && 10117 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 10118 (OpBits-(PromBits-1)))) { 10119 ReallyNeedsExt = true; 10120 break; 10121 } 10122 } 10123 } 10124 10125 // Replace all inputs, either with the truncation operand, or a 10126 // truncation or extension to the final output type. 10127 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10128 // Constant inputs need to be replaced with the to-be-promoted nodes that 10129 // use them because they might have users outside of the cluster of 10130 // promoted nodes. 10131 if (isa<ConstantSDNode>(Inputs[i])) 10132 continue; 10133 10134 SDValue InSrc = Inputs[i].getOperand(0); 10135 if (Inputs[i].getValueType() == N->getValueType(0)) 10136 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 10137 else if (N->getOpcode() == ISD::SIGN_EXTEND) 10138 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10139 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 10140 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10141 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10142 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 10143 else 10144 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10145 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 10146 } 10147 10148 // Replace all operations (these are all the same, but have a different 10149 // (promoted) return type). DAG.getNode will validate that the types of 10150 // a binary operator match, so go through the list in reverse so that 10151 // we've likely promoted both operands first. 10152 while (!PromOps.empty()) { 10153 SDValue PromOp = PromOps.back(); 10154 PromOps.pop_back(); 10155 10156 unsigned C; 10157 switch (PromOp.getOpcode()) { 10158 default: C = 0; break; 10159 case ISD::SELECT: C = 1; break; 10160 case ISD::SELECT_CC: C = 2; break; 10161 } 10162 10163 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 10164 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 10165 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 10166 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 10167 // The to-be-promoted operands of this node have not yet been 10168 // promoted (this should be rare because we're going through the 10169 // list backward, but if one of the operands has several users in 10170 // this cluster of to-be-promoted nodes, it is possible). 10171 PromOps.insert(PromOps.begin(), PromOp); 10172 continue; 10173 } 10174 10175 // For SELECT and SELECT_CC nodes, we do a similar check for any 10176 // to-be-promoted comparison inputs. 10177 if (PromOp.getOpcode() == ISD::SELECT || 10178 PromOp.getOpcode() == ISD::SELECT_CC) { 10179 if ((SelectTruncOp[0].count(PromOp.getNode()) && 10180 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 10181 (SelectTruncOp[1].count(PromOp.getNode()) && 10182 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 10183 PromOps.insert(PromOps.begin(), PromOp); 10184 continue; 10185 } 10186 } 10187 10188 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 10189 PromOp.getNode()->op_end()); 10190 10191 // If this node has constant inputs, then they'll need to be promoted here. 10192 for (unsigned i = 0; i < 2; ++i) { 10193 if (!isa<ConstantSDNode>(Ops[C+i])) 10194 continue; 10195 if (Ops[C+i].getValueType() == N->getValueType(0)) 10196 continue; 10197 10198 if (N->getOpcode() == ISD::SIGN_EXTEND) 10199 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10200 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10201 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10202 else 10203 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10204 } 10205 10206 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 10207 // truncate them again to the original value type. 10208 if (PromOp.getOpcode() == ISD::SELECT || 10209 PromOp.getOpcode() == ISD::SELECT_CC) { 10210 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 10211 if (SI0 != SelectTruncOp[0].end()) 10212 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 10213 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 10214 if (SI1 != SelectTruncOp[1].end()) 10215 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 10216 } 10217 10218 DAG.ReplaceAllUsesOfValueWith(PromOp, 10219 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 10220 } 10221 10222 // Now we're left with the initial extension itself. 10223 if (!ReallyNeedsExt) 10224 return N->getOperand(0); 10225 10226 // To zero extend, just mask off everything except for the first bit (in the 10227 // i1 case). 10228 if (N->getOpcode() == ISD::ZERO_EXTEND) 10229 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 10230 DAG.getConstant(APInt::getLowBitsSet( 10231 N->getValueSizeInBits(0), PromBits), 10232 dl, N->getValueType(0))); 10233 10234 assert(N->getOpcode() == ISD::SIGN_EXTEND && 10235 "Invalid extension type"); 10236 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 10237 SDValue ShiftCst = 10238 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 10239 return DAG.getNode( 10240 ISD::SRA, dl, N->getValueType(0), 10241 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 10242 ShiftCst); 10243 } 10244 10245 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 10246 DAGCombinerInfo &DCI) const { 10247 assert((N->getOpcode() == ISD::SINT_TO_FP || 10248 N->getOpcode() == ISD::UINT_TO_FP) && 10249 "Need an int -> FP conversion node here"); 10250 10251 if (!Subtarget.has64BitSupport()) 10252 return SDValue(); 10253 10254 SelectionDAG &DAG = DCI.DAG; 10255 SDLoc dl(N); 10256 SDValue Op(N, 0); 10257 10258 // Don't handle ppc_fp128 here or i1 conversions. 10259 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 10260 return SDValue(); 10261 if (Op.getOperand(0).getValueType() == MVT::i1) 10262 return SDValue(); 10263 10264 // For i32 intermediate values, unfortunately, the conversion functions 10265 // leave the upper 32 bits of the value are undefined. Within the set of 10266 // scalar instructions, we have no method for zero- or sign-extending the 10267 // value. Thus, we cannot handle i32 intermediate values here. 10268 if (Op.getOperand(0).getValueType() == MVT::i32) 10269 return SDValue(); 10270 10271 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 10272 "UINT_TO_FP is supported only with FPCVT"); 10273 10274 // If we have FCFIDS, then use it when converting to single-precision. 10275 // Otherwise, convert to double-precision and then round. 10276 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 10277 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 10278 : PPCISD::FCFIDS) 10279 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 10280 : PPCISD::FCFID); 10281 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 10282 ? MVT::f32 10283 : MVT::f64; 10284 10285 // If we're converting from a float, to an int, and back to a float again, 10286 // then we don't need the store/load pair at all. 10287 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 10288 Subtarget.hasFPCVT()) || 10289 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 10290 SDValue Src = Op.getOperand(0).getOperand(0); 10291 if (Src.getValueType() == MVT::f32) { 10292 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 10293 DCI.AddToWorklist(Src.getNode()); 10294 } else if (Src.getValueType() != MVT::f64) { 10295 // Make sure that we don't pick up a ppc_fp128 source value. 10296 return SDValue(); 10297 } 10298 10299 unsigned FCTOp = 10300 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 10301 PPCISD::FCTIDUZ; 10302 10303 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 10304 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 10305 10306 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 10307 FP = DAG.getNode(ISD::FP_ROUND, dl, 10308 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 10309 DCI.AddToWorklist(FP.getNode()); 10310 } 10311 10312 return FP; 10313 } 10314 10315 return SDValue(); 10316 } 10317 10318 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 10319 // builtins) into loads with swaps. 10320 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 10321 DAGCombinerInfo &DCI) const { 10322 SelectionDAG &DAG = DCI.DAG; 10323 SDLoc dl(N); 10324 SDValue Chain; 10325 SDValue Base; 10326 MachineMemOperand *MMO; 10327 10328 switch (N->getOpcode()) { 10329 default: 10330 llvm_unreachable("Unexpected opcode for little endian VSX load"); 10331 case ISD::LOAD: { 10332 LoadSDNode *LD = cast<LoadSDNode>(N); 10333 Chain = LD->getChain(); 10334 Base = LD->getBasePtr(); 10335 MMO = LD->getMemOperand(); 10336 // If the MMO suggests this isn't a load of a full vector, leave 10337 // things alone. For a built-in, we have to make the change for 10338 // correctness, so if there is a size problem that will be a bug. 10339 if (MMO->getSize() < 16) 10340 return SDValue(); 10341 break; 10342 } 10343 case ISD::INTRINSIC_W_CHAIN: { 10344 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10345 Chain = Intrin->getChain(); 10346 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 10347 // us what we want. Get operand 2 instead. 10348 Base = Intrin->getOperand(2); 10349 MMO = Intrin->getMemOperand(); 10350 break; 10351 } 10352 } 10353 10354 MVT VecTy = N->getValueType(0).getSimpleVT(); 10355 SDValue LoadOps[] = { Chain, Base }; 10356 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 10357 DAG.getVTList(MVT::v2f64, MVT::Other), 10358 LoadOps, MVT::v2f64, MMO); 10359 10360 DCI.AddToWorklist(Load.getNode()); 10361 Chain = Load.getValue(1); 10362 SDValue Swap = DAG.getNode( 10363 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 10364 DCI.AddToWorklist(Swap.getNode()); 10365 10366 // Add a bitcast if the resulting load type doesn't match v2f64. 10367 if (VecTy != MVT::v2f64) { 10368 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 10369 DCI.AddToWorklist(N.getNode()); 10370 // Package {bitcast value, swap's chain} to match Load's shape. 10371 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 10372 N, Swap.getValue(1)); 10373 } 10374 10375 return Swap; 10376 } 10377 10378 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 10379 // builtins) into stores with swaps. 10380 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 10381 DAGCombinerInfo &DCI) const { 10382 SelectionDAG &DAG = DCI.DAG; 10383 SDLoc dl(N); 10384 SDValue Chain; 10385 SDValue Base; 10386 unsigned SrcOpnd; 10387 MachineMemOperand *MMO; 10388 10389 switch (N->getOpcode()) { 10390 default: 10391 llvm_unreachable("Unexpected opcode for little endian VSX store"); 10392 case ISD::STORE: { 10393 StoreSDNode *ST = cast<StoreSDNode>(N); 10394 Chain = ST->getChain(); 10395 Base = ST->getBasePtr(); 10396 MMO = ST->getMemOperand(); 10397 SrcOpnd = 1; 10398 // If the MMO suggests this isn't a store of a full vector, leave 10399 // things alone. For a built-in, we have to make the change for 10400 // correctness, so if there is a size problem that will be a bug. 10401 if (MMO->getSize() < 16) 10402 return SDValue(); 10403 break; 10404 } 10405 case ISD::INTRINSIC_VOID: { 10406 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10407 Chain = Intrin->getChain(); 10408 // Intrin->getBasePtr() oddly does not get what we want. 10409 Base = Intrin->getOperand(3); 10410 MMO = Intrin->getMemOperand(); 10411 SrcOpnd = 2; 10412 break; 10413 } 10414 } 10415 10416 SDValue Src = N->getOperand(SrcOpnd); 10417 MVT VecTy = Src.getValueType().getSimpleVT(); 10418 10419 // All stores are done as v2f64 and possible bit cast. 10420 if (VecTy != MVT::v2f64) { 10421 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 10422 DCI.AddToWorklist(Src.getNode()); 10423 } 10424 10425 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10426 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 10427 DCI.AddToWorklist(Swap.getNode()); 10428 Chain = Swap.getValue(1); 10429 SDValue StoreOps[] = { Chain, Swap, Base }; 10430 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 10431 DAG.getVTList(MVT::Other), 10432 StoreOps, VecTy, MMO); 10433 DCI.AddToWorklist(Store.getNode()); 10434 return Store; 10435 } 10436 10437 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 10438 DAGCombinerInfo &DCI) const { 10439 SelectionDAG &DAG = DCI.DAG; 10440 SDLoc dl(N); 10441 switch (N->getOpcode()) { 10442 default: break; 10443 case PPCISD::SHL: 10444 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 10445 return N->getOperand(0); 10446 break; 10447 case PPCISD::SRL: 10448 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 10449 return N->getOperand(0); 10450 break; 10451 case PPCISD::SRA: 10452 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10453 if (C->isNullValue() || // 0 >>s V -> 0. 10454 C->isAllOnesValue()) // -1 >>s V -> -1. 10455 return N->getOperand(0); 10456 } 10457 break; 10458 case ISD::SIGN_EXTEND: 10459 case ISD::ZERO_EXTEND: 10460 case ISD::ANY_EXTEND: 10461 return DAGCombineExtBoolTrunc(N, DCI); 10462 case ISD::TRUNCATE: 10463 case ISD::SETCC: 10464 case ISD::SELECT_CC: 10465 return DAGCombineTruncBoolExt(N, DCI); 10466 case ISD::SINT_TO_FP: 10467 case ISD::UINT_TO_FP: 10468 return combineFPToIntToFP(N, DCI); 10469 case ISD::STORE: { 10470 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 10471 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 10472 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 10473 N->getOperand(1).getValueType() == MVT::i32 && 10474 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 10475 SDValue Val = N->getOperand(1).getOperand(0); 10476 if (Val.getValueType() == MVT::f32) { 10477 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 10478 DCI.AddToWorklist(Val.getNode()); 10479 } 10480 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 10481 DCI.AddToWorklist(Val.getNode()); 10482 10483 SDValue Ops[] = { 10484 N->getOperand(0), Val, N->getOperand(2), 10485 DAG.getValueType(N->getOperand(1).getValueType()) 10486 }; 10487 10488 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 10489 DAG.getVTList(MVT::Other), Ops, 10490 cast<StoreSDNode>(N)->getMemoryVT(), 10491 cast<StoreSDNode>(N)->getMemOperand()); 10492 DCI.AddToWorklist(Val.getNode()); 10493 return Val; 10494 } 10495 10496 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 10497 if (cast<StoreSDNode>(N)->isUnindexed() && 10498 N->getOperand(1).getOpcode() == ISD::BSWAP && 10499 N->getOperand(1).getNode()->hasOneUse() && 10500 (N->getOperand(1).getValueType() == MVT::i32 || 10501 N->getOperand(1).getValueType() == MVT::i16 || 10502 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10503 N->getOperand(1).getValueType() == MVT::i64))) { 10504 SDValue BSwapOp = N->getOperand(1).getOperand(0); 10505 // Do an any-extend to 32-bits if this is a half-word input. 10506 if (BSwapOp.getValueType() == MVT::i16) 10507 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 10508 10509 SDValue Ops[] = { 10510 N->getOperand(0), BSwapOp, N->getOperand(2), 10511 DAG.getValueType(N->getOperand(1).getValueType()) 10512 }; 10513 return 10514 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 10515 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 10516 cast<StoreSDNode>(N)->getMemOperand()); 10517 } 10518 10519 // For little endian, VSX stores require generating xxswapd/lxvd2x. 10520 EVT VT = N->getOperand(1).getValueType(); 10521 if (VT.isSimple()) { 10522 MVT StoreVT = VT.getSimpleVT(); 10523 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10524 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 10525 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 10526 return expandVSXStoreForLE(N, DCI); 10527 } 10528 break; 10529 } 10530 case ISD::LOAD: { 10531 LoadSDNode *LD = cast<LoadSDNode>(N); 10532 EVT VT = LD->getValueType(0); 10533 10534 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10535 if (VT.isSimple()) { 10536 MVT LoadVT = VT.getSimpleVT(); 10537 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10538 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 10539 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 10540 return expandVSXLoadForLE(N, DCI); 10541 } 10542 10543 // We sometimes end up with a 64-bit integer load, from which we extract 10544 // two single-precision floating-point numbers. This happens with 10545 // std::complex<float>, and other similar structures, because of the way we 10546 // canonicalize structure copies. However, if we lack direct moves, 10547 // then the final bitcasts from the extracted integer values to the 10548 // floating-point numbers turn into store/load pairs. Even with direct moves, 10549 // just loading the two floating-point numbers is likely better. 10550 auto ReplaceTwoFloatLoad = [&]() { 10551 if (VT != MVT::i64) 10552 return false; 10553 10554 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 10555 LD->isVolatile()) 10556 return false; 10557 10558 // We're looking for a sequence like this: 10559 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 10560 // t16: i64 = srl t13, Constant:i32<32> 10561 // t17: i32 = truncate t16 10562 // t18: f32 = bitcast t17 10563 // t19: i32 = truncate t13 10564 // t20: f32 = bitcast t19 10565 10566 if (!LD->hasNUsesOfValue(2, 0)) 10567 return false; 10568 10569 auto UI = LD->use_begin(); 10570 while (UI.getUse().getResNo() != 0) ++UI; 10571 SDNode *Trunc = *UI++; 10572 while (UI.getUse().getResNo() != 0) ++UI; 10573 SDNode *RightShift = *UI; 10574 if (Trunc->getOpcode() != ISD::TRUNCATE) 10575 std::swap(Trunc, RightShift); 10576 10577 if (Trunc->getOpcode() != ISD::TRUNCATE || 10578 Trunc->getValueType(0) != MVT::i32 || 10579 !Trunc->hasOneUse()) 10580 return false; 10581 if (RightShift->getOpcode() != ISD::SRL || 10582 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 10583 RightShift->getConstantOperandVal(1) != 32 || 10584 !RightShift->hasOneUse()) 10585 return false; 10586 10587 SDNode *Trunc2 = *RightShift->use_begin(); 10588 if (Trunc2->getOpcode() != ISD::TRUNCATE || 10589 Trunc2->getValueType(0) != MVT::i32 || 10590 !Trunc2->hasOneUse()) 10591 return false; 10592 10593 SDNode *Bitcast = *Trunc->use_begin(); 10594 SDNode *Bitcast2 = *Trunc2->use_begin(); 10595 10596 if (Bitcast->getOpcode() != ISD::BITCAST || 10597 Bitcast->getValueType(0) != MVT::f32) 10598 return false; 10599 if (Bitcast2->getOpcode() != ISD::BITCAST || 10600 Bitcast2->getValueType(0) != MVT::f32) 10601 return false; 10602 10603 if (Subtarget.isLittleEndian()) 10604 std::swap(Bitcast, Bitcast2); 10605 10606 // Bitcast has the second float (in memory-layout order) and Bitcast2 10607 // has the first one. 10608 10609 SDValue BasePtr = LD->getBasePtr(); 10610 if (LD->isIndexed()) { 10611 assert(LD->getAddressingMode() == ISD::PRE_INC && 10612 "Non-pre-inc AM on PPC?"); 10613 BasePtr = 10614 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10615 LD->getOffset()); 10616 } 10617 10618 SDValue FloatLoad = 10619 DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 10620 LD->getPointerInfo(), false, LD->isNonTemporal(), 10621 LD->isInvariant(), LD->getAlignment(), LD->getAAInfo()); 10622 SDValue AddPtr = 10623 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 10624 BasePtr, DAG.getIntPtrConstant(4, dl)); 10625 SDValue FloatLoad2 = 10626 DAG.getLoad(MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 10627 LD->getPointerInfo().getWithOffset(4), false, 10628 LD->isNonTemporal(), LD->isInvariant(), 10629 MinAlign(LD->getAlignment(), 4), LD->getAAInfo()); 10630 10631 if (LD->isIndexed()) { 10632 // Note that DAGCombine should re-form any pre-increment load(s) from 10633 // what is produced here if that makes sense. 10634 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 10635 } 10636 10637 DCI.CombineTo(Bitcast2, FloatLoad); 10638 DCI.CombineTo(Bitcast, FloatLoad2); 10639 10640 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 10641 SDValue(FloatLoad2.getNode(), 1)); 10642 return true; 10643 }; 10644 10645 if (ReplaceTwoFloatLoad()) 10646 return SDValue(N, 0); 10647 10648 EVT MemVT = LD->getMemoryVT(); 10649 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 10650 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 10651 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 10652 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 10653 if (LD->isUnindexed() && VT.isVector() && 10654 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 10655 // P8 and later hardware should just use LOAD. 10656 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 10657 VT == MVT::v4i32 || VT == MVT::v4f32)) || 10658 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 10659 LD->getAlignment() >= ScalarABIAlignment)) && 10660 LD->getAlignment() < ABIAlignment) { 10661 // This is a type-legal unaligned Altivec or QPX load. 10662 SDValue Chain = LD->getChain(); 10663 SDValue Ptr = LD->getBasePtr(); 10664 bool isLittleEndian = Subtarget.isLittleEndian(); 10665 10666 // This implements the loading of unaligned vectors as described in 10667 // the venerable Apple Velocity Engine overview. Specifically: 10668 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 10669 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 10670 // 10671 // The general idea is to expand a sequence of one or more unaligned 10672 // loads into an alignment-based permutation-control instruction (lvsl 10673 // or lvsr), a series of regular vector loads (which always truncate 10674 // their input address to an aligned address), and a series of 10675 // permutations. The results of these permutations are the requested 10676 // loaded values. The trick is that the last "extra" load is not taken 10677 // from the address you might suspect (sizeof(vector) bytes after the 10678 // last requested load), but rather sizeof(vector) - 1 bytes after the 10679 // last requested vector. The point of this is to avoid a page fault if 10680 // the base address happened to be aligned. This works because if the 10681 // base address is aligned, then adding less than a full vector length 10682 // will cause the last vector in the sequence to be (re)loaded. 10683 // Otherwise, the next vector will be fetched as you might suspect was 10684 // necessary. 10685 10686 // We might be able to reuse the permutation generation from 10687 // a different base address offset from this one by an aligned amount. 10688 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 10689 // optimization later. 10690 Intrinsic::ID Intr, IntrLD, IntrPerm; 10691 MVT PermCntlTy, PermTy, LDTy; 10692 if (Subtarget.hasAltivec()) { 10693 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 10694 Intrinsic::ppc_altivec_lvsl; 10695 IntrLD = Intrinsic::ppc_altivec_lvx; 10696 IntrPerm = Intrinsic::ppc_altivec_vperm; 10697 PermCntlTy = MVT::v16i8; 10698 PermTy = MVT::v4i32; 10699 LDTy = MVT::v4i32; 10700 } else { 10701 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 10702 Intrinsic::ppc_qpx_qvlpcls; 10703 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 10704 Intrinsic::ppc_qpx_qvlfs; 10705 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 10706 PermCntlTy = MVT::v4f64; 10707 PermTy = MVT::v4f64; 10708 LDTy = MemVT.getSimpleVT(); 10709 } 10710 10711 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 10712 10713 // Create the new MMO for the new base load. It is like the original MMO, 10714 // but represents an area in memory almost twice the vector size centered 10715 // on the original address. If the address is unaligned, we might start 10716 // reading up to (sizeof(vector)-1) bytes below the address of the 10717 // original unaligned load. 10718 MachineFunction &MF = DAG.getMachineFunction(); 10719 MachineMemOperand *BaseMMO = 10720 MF.getMachineMemOperand(LD->getMemOperand(), 10721 -(long)MemVT.getStoreSize()+1, 10722 2*MemVT.getStoreSize()-1); 10723 10724 // Create the new base load. 10725 SDValue LDXIntID = 10726 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 10727 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 10728 SDValue BaseLoad = 10729 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10730 DAG.getVTList(PermTy, MVT::Other), 10731 BaseLoadOps, LDTy, BaseMMO); 10732 10733 // Note that the value of IncOffset (which is provided to the next 10734 // load's pointer info offset value, and thus used to calculate the 10735 // alignment), and the value of IncValue (which is actually used to 10736 // increment the pointer value) are different! This is because we 10737 // require the next load to appear to be aligned, even though it 10738 // is actually offset from the base pointer by a lesser amount. 10739 int IncOffset = VT.getSizeInBits() / 8; 10740 int IncValue = IncOffset; 10741 10742 // Walk (both up and down) the chain looking for another load at the real 10743 // (aligned) offset (the alignment of the other load does not matter in 10744 // this case). If found, then do not use the offset reduction trick, as 10745 // that will prevent the loads from being later combined (as they would 10746 // otherwise be duplicates). 10747 if (!findConsecutiveLoad(LD, DAG)) 10748 --IncValue; 10749 10750 SDValue Increment = 10751 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 10752 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 10753 10754 MachineMemOperand *ExtraMMO = 10755 MF.getMachineMemOperand(LD->getMemOperand(), 10756 1, 2*MemVT.getStoreSize()-1); 10757 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 10758 SDValue ExtraLoad = 10759 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10760 DAG.getVTList(PermTy, MVT::Other), 10761 ExtraLoadOps, LDTy, ExtraMMO); 10762 10763 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 10764 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 10765 10766 // Because vperm has a big-endian bias, we must reverse the order 10767 // of the input vectors and complement the permute control vector 10768 // when generating little endian code. We have already handled the 10769 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 10770 // and ExtraLoad here. 10771 SDValue Perm; 10772 if (isLittleEndian) 10773 Perm = BuildIntrinsicOp(IntrPerm, 10774 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 10775 else 10776 Perm = BuildIntrinsicOp(IntrPerm, 10777 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 10778 10779 if (VT != PermTy) 10780 Perm = Subtarget.hasAltivec() ? 10781 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 10782 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 10783 DAG.getTargetConstant(1, dl, MVT::i64)); 10784 // second argument is 1 because this rounding 10785 // is always exact. 10786 10787 // The output of the permutation is our loaded result, the TokenFactor is 10788 // our new chain. 10789 DCI.CombineTo(N, Perm, TF); 10790 return SDValue(N, 0); 10791 } 10792 } 10793 break; 10794 case ISD::INTRINSIC_WO_CHAIN: { 10795 bool isLittleEndian = Subtarget.isLittleEndian(); 10796 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10797 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 10798 : Intrinsic::ppc_altivec_lvsl); 10799 if ((IID == Intr || 10800 IID == Intrinsic::ppc_qpx_qvlpcld || 10801 IID == Intrinsic::ppc_qpx_qvlpcls) && 10802 N->getOperand(1)->getOpcode() == ISD::ADD) { 10803 SDValue Add = N->getOperand(1); 10804 10805 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 10806 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 10807 10808 if (DAG.MaskedValueIsZero( 10809 Add->getOperand(1), 10810 APInt::getAllOnesValue(Bits /* alignment */) 10811 .zext( 10812 Add.getValueType().getScalarType().getSizeInBits()))) { 10813 SDNode *BasePtr = Add->getOperand(0).getNode(); 10814 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10815 UE = BasePtr->use_end(); 10816 UI != UE; ++UI) { 10817 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10818 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 10819 // We've found another LVSL/LVSR, and this address is an aligned 10820 // multiple of that one. The results will be the same, so use the 10821 // one we've just found instead. 10822 10823 return SDValue(*UI, 0); 10824 } 10825 } 10826 } 10827 10828 if (isa<ConstantSDNode>(Add->getOperand(1))) { 10829 SDNode *BasePtr = Add->getOperand(0).getNode(); 10830 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10831 UE = BasePtr->use_end(); UI != UE; ++UI) { 10832 if (UI->getOpcode() == ISD::ADD && 10833 isa<ConstantSDNode>(UI->getOperand(1)) && 10834 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 10835 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 10836 (1ULL << Bits) == 0) { 10837 SDNode *OtherAdd = *UI; 10838 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 10839 VE = OtherAdd->use_end(); VI != VE; ++VI) { 10840 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10841 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 10842 return SDValue(*VI, 0); 10843 } 10844 } 10845 } 10846 } 10847 } 10848 } 10849 } 10850 10851 break; 10852 case ISD::INTRINSIC_W_CHAIN: { 10853 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10854 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10855 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10856 default: 10857 break; 10858 case Intrinsic::ppc_vsx_lxvw4x: 10859 case Intrinsic::ppc_vsx_lxvd2x: 10860 return expandVSXLoadForLE(N, DCI); 10861 } 10862 } 10863 break; 10864 } 10865 case ISD::INTRINSIC_VOID: { 10866 // For little endian, VSX stores require generating xxswapd/stxvd2x. 10867 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10868 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10869 default: 10870 break; 10871 case Intrinsic::ppc_vsx_stxvw4x: 10872 case Intrinsic::ppc_vsx_stxvd2x: 10873 return expandVSXStoreForLE(N, DCI); 10874 } 10875 } 10876 break; 10877 } 10878 case ISD::BSWAP: 10879 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 10880 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 10881 N->getOperand(0).hasOneUse() && 10882 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 10883 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10884 N->getValueType(0) == MVT::i64))) { 10885 SDValue Load = N->getOperand(0); 10886 LoadSDNode *LD = cast<LoadSDNode>(Load); 10887 // Create the byte-swapping load. 10888 SDValue Ops[] = { 10889 LD->getChain(), // Chain 10890 LD->getBasePtr(), // Ptr 10891 DAG.getValueType(N->getValueType(0)) // VT 10892 }; 10893 SDValue BSLoad = 10894 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 10895 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 10896 MVT::i64 : MVT::i32, MVT::Other), 10897 Ops, LD->getMemoryVT(), LD->getMemOperand()); 10898 10899 // If this is an i16 load, insert the truncate. 10900 SDValue ResVal = BSLoad; 10901 if (N->getValueType(0) == MVT::i16) 10902 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 10903 10904 // First, combine the bswap away. This makes the value produced by the 10905 // load dead. 10906 DCI.CombineTo(N, ResVal); 10907 10908 // Next, combine the load away, we give it a bogus result value but a real 10909 // chain result. The result value is dead because the bswap is dead. 10910 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 10911 10912 // Return N so it doesn't get rechecked! 10913 return SDValue(N, 0); 10914 } 10915 10916 break; 10917 case PPCISD::VCMP: { 10918 // If a VCMPo node already exists with exactly the same operands as this 10919 // node, use its result instead of this node (VCMPo computes both a CR6 and 10920 // a normal output). 10921 // 10922 if (!N->getOperand(0).hasOneUse() && 10923 !N->getOperand(1).hasOneUse() && 10924 !N->getOperand(2).hasOneUse()) { 10925 10926 // Scan all of the users of the LHS, looking for VCMPo's that match. 10927 SDNode *VCMPoNode = nullptr; 10928 10929 SDNode *LHSN = N->getOperand(0).getNode(); 10930 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 10931 UI != E; ++UI) 10932 if (UI->getOpcode() == PPCISD::VCMPo && 10933 UI->getOperand(1) == N->getOperand(1) && 10934 UI->getOperand(2) == N->getOperand(2) && 10935 UI->getOperand(0) == N->getOperand(0)) { 10936 VCMPoNode = *UI; 10937 break; 10938 } 10939 10940 // If there is no VCMPo node, or if the flag value has a single use, don't 10941 // transform this. 10942 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 10943 break; 10944 10945 // Look at the (necessarily single) use of the flag value. If it has a 10946 // chain, this transformation is more complex. Note that multiple things 10947 // could use the value result, which we should ignore. 10948 SDNode *FlagUser = nullptr; 10949 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 10950 FlagUser == nullptr; ++UI) { 10951 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 10952 SDNode *User = *UI; 10953 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 10954 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 10955 FlagUser = User; 10956 break; 10957 } 10958 } 10959 } 10960 10961 // If the user is a MFOCRF instruction, we know this is safe. 10962 // Otherwise we give up for right now. 10963 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 10964 return SDValue(VCMPoNode, 0); 10965 } 10966 break; 10967 } 10968 case ISD::BRCOND: { 10969 SDValue Cond = N->getOperand(1); 10970 SDValue Target = N->getOperand(2); 10971 10972 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10973 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 10974 Intrinsic::ppc_is_decremented_ctr_nonzero) { 10975 10976 // We now need to make the intrinsic dead (it cannot be instruction 10977 // selected). 10978 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 10979 assert(Cond.getNode()->hasOneUse() && 10980 "Counter decrement has more than one use"); 10981 10982 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 10983 N->getOperand(0), Target); 10984 } 10985 } 10986 break; 10987 case ISD::BR_CC: { 10988 // If this is a branch on an altivec predicate comparison, lower this so 10989 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 10990 // lowering is done pre-legalize, because the legalizer lowers the predicate 10991 // compare down to code that is difficult to reassemble. 10992 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 10993 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 10994 10995 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 10996 // value. If so, pass-through the AND to get to the intrinsic. 10997 if (LHS.getOpcode() == ISD::AND && 10998 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 10999 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 11000 Intrinsic::ppc_is_decremented_ctr_nonzero && 11001 isa<ConstantSDNode>(LHS.getOperand(1)) && 11002 !isNullConstant(LHS.getOperand(1))) 11003 LHS = LHS.getOperand(0); 11004 11005 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 11006 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 11007 Intrinsic::ppc_is_decremented_ctr_nonzero && 11008 isa<ConstantSDNode>(RHS)) { 11009 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 11010 "Counter decrement comparison is not EQ or NE"); 11011 11012 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 11013 bool isBDNZ = (CC == ISD::SETEQ && Val) || 11014 (CC == ISD::SETNE && !Val); 11015 11016 // We now need to make the intrinsic dead (it cannot be instruction 11017 // selected). 11018 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 11019 assert(LHS.getNode()->hasOneUse() && 11020 "Counter decrement has more than one use"); 11021 11022 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 11023 N->getOperand(0), N->getOperand(4)); 11024 } 11025 11026 int CompareOpc; 11027 bool isDot; 11028 11029 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 11030 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 11031 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 11032 assert(isDot && "Can't compare against a vector result!"); 11033 11034 // If this is a comparison against something other than 0/1, then we know 11035 // that the condition is never/always true. 11036 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 11037 if (Val != 0 && Val != 1) { 11038 if (CC == ISD::SETEQ) // Cond never true, remove branch. 11039 return N->getOperand(0); 11040 // Always !=, turn it into an unconditional branch. 11041 return DAG.getNode(ISD::BR, dl, MVT::Other, 11042 N->getOperand(0), N->getOperand(4)); 11043 } 11044 11045 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 11046 11047 // Create the PPCISD altivec 'dot' comparison node. 11048 SDValue Ops[] = { 11049 LHS.getOperand(2), // LHS of compare 11050 LHS.getOperand(3), // RHS of compare 11051 DAG.getConstant(CompareOpc, dl, MVT::i32) 11052 }; 11053 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 11054 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 11055 11056 // Unpack the result based on how the target uses it. 11057 PPC::Predicate CompOpc; 11058 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 11059 default: // Can't happen, don't crash on invalid number though. 11060 case 0: // Branch on the value of the EQ bit of CR6. 11061 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 11062 break; 11063 case 1: // Branch on the inverted value of the EQ bit of CR6. 11064 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 11065 break; 11066 case 2: // Branch on the value of the LT bit of CR6. 11067 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 11068 break; 11069 case 3: // Branch on the inverted value of the LT bit of CR6. 11070 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 11071 break; 11072 } 11073 11074 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 11075 DAG.getConstant(CompOpc, dl, MVT::i32), 11076 DAG.getRegister(PPC::CR6, MVT::i32), 11077 N->getOperand(4), CompNode.getValue(1)); 11078 } 11079 break; 11080 } 11081 } 11082 11083 return SDValue(); 11084 } 11085 11086 SDValue 11087 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 11088 SelectionDAG &DAG, 11089 std::vector<SDNode *> *Created) const { 11090 // fold (sdiv X, pow2) 11091 EVT VT = N->getValueType(0); 11092 if (VT == MVT::i64 && !Subtarget.isPPC64()) 11093 return SDValue(); 11094 if ((VT != MVT::i32 && VT != MVT::i64) || 11095 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 11096 return SDValue(); 11097 11098 SDLoc DL(N); 11099 SDValue N0 = N->getOperand(0); 11100 11101 bool IsNegPow2 = (-Divisor).isPowerOf2(); 11102 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 11103 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 11104 11105 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 11106 if (Created) 11107 Created->push_back(Op.getNode()); 11108 11109 if (IsNegPow2) { 11110 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 11111 if (Created) 11112 Created->push_back(Op.getNode()); 11113 } 11114 11115 return Op; 11116 } 11117 11118 //===----------------------------------------------------------------------===// 11119 // Inline Assembly Support 11120 //===----------------------------------------------------------------------===// 11121 11122 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 11123 APInt &KnownZero, 11124 APInt &KnownOne, 11125 const SelectionDAG &DAG, 11126 unsigned Depth) const { 11127 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 11128 switch (Op.getOpcode()) { 11129 default: break; 11130 case PPCISD::LBRX: { 11131 // lhbrx is known to have the top bits cleared out. 11132 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 11133 KnownZero = 0xFFFF0000; 11134 break; 11135 } 11136 case ISD::INTRINSIC_WO_CHAIN: { 11137 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 11138 default: break; 11139 case Intrinsic::ppc_altivec_vcmpbfp_p: 11140 case Intrinsic::ppc_altivec_vcmpeqfp_p: 11141 case Intrinsic::ppc_altivec_vcmpequb_p: 11142 case Intrinsic::ppc_altivec_vcmpequh_p: 11143 case Intrinsic::ppc_altivec_vcmpequw_p: 11144 case Intrinsic::ppc_altivec_vcmpequd_p: 11145 case Intrinsic::ppc_altivec_vcmpgefp_p: 11146 case Intrinsic::ppc_altivec_vcmpgtfp_p: 11147 case Intrinsic::ppc_altivec_vcmpgtsb_p: 11148 case Intrinsic::ppc_altivec_vcmpgtsh_p: 11149 case Intrinsic::ppc_altivec_vcmpgtsw_p: 11150 case Intrinsic::ppc_altivec_vcmpgtsd_p: 11151 case Intrinsic::ppc_altivec_vcmpgtub_p: 11152 case Intrinsic::ppc_altivec_vcmpgtuh_p: 11153 case Intrinsic::ppc_altivec_vcmpgtuw_p: 11154 case Intrinsic::ppc_altivec_vcmpgtud_p: 11155 KnownZero = ~1U; // All bits but the low one are known to be zero. 11156 break; 11157 } 11158 } 11159 } 11160 } 11161 11162 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 11163 switch (Subtarget.getDarwinDirective()) { 11164 default: break; 11165 case PPC::DIR_970: 11166 case PPC::DIR_PWR4: 11167 case PPC::DIR_PWR5: 11168 case PPC::DIR_PWR5X: 11169 case PPC::DIR_PWR6: 11170 case PPC::DIR_PWR6X: 11171 case PPC::DIR_PWR7: 11172 case PPC::DIR_PWR8: { 11173 if (!ML) 11174 break; 11175 11176 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 11177 11178 // For small loops (between 5 and 8 instructions), align to a 32-byte 11179 // boundary so that the entire loop fits in one instruction-cache line. 11180 uint64_t LoopSize = 0; 11181 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 11182 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 11183 LoopSize += TII->GetInstSizeInBytes(J); 11184 if (LoopSize > 32) 11185 break; 11186 } 11187 11188 if (LoopSize > 16 && LoopSize <= 32) 11189 return 5; 11190 11191 break; 11192 } 11193 } 11194 11195 return TargetLowering::getPrefLoopAlignment(ML); 11196 } 11197 11198 /// getConstraintType - Given a constraint, return the type of 11199 /// constraint it is for this target. 11200 PPCTargetLowering::ConstraintType 11201 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 11202 if (Constraint.size() == 1) { 11203 switch (Constraint[0]) { 11204 default: break; 11205 case 'b': 11206 case 'r': 11207 case 'f': 11208 case 'd': 11209 case 'v': 11210 case 'y': 11211 return C_RegisterClass; 11212 case 'Z': 11213 // FIXME: While Z does indicate a memory constraint, it specifically 11214 // indicates an r+r address (used in conjunction with the 'y' modifier 11215 // in the replacement string). Currently, we're forcing the base 11216 // register to be r0 in the asm printer (which is interpreted as zero) 11217 // and forming the complete address in the second register. This is 11218 // suboptimal. 11219 return C_Memory; 11220 } 11221 } else if (Constraint == "wc") { // individual CR bits. 11222 return C_RegisterClass; 11223 } else if (Constraint == "wa" || Constraint == "wd" || 11224 Constraint == "wf" || Constraint == "ws") { 11225 return C_RegisterClass; // VSX registers. 11226 } 11227 return TargetLowering::getConstraintType(Constraint); 11228 } 11229 11230 /// Examine constraint type and operand type and determine a weight value. 11231 /// This object must already have been set up with the operand type 11232 /// and the current alternative constraint selected. 11233 TargetLowering::ConstraintWeight 11234 PPCTargetLowering::getSingleConstraintMatchWeight( 11235 AsmOperandInfo &info, const char *constraint) const { 11236 ConstraintWeight weight = CW_Invalid; 11237 Value *CallOperandVal = info.CallOperandVal; 11238 // If we don't have a value, we can't do a match, 11239 // but allow it at the lowest weight. 11240 if (!CallOperandVal) 11241 return CW_Default; 11242 Type *type = CallOperandVal->getType(); 11243 11244 // Look at the constraint type. 11245 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 11246 return CW_Register; // an individual CR bit. 11247 else if ((StringRef(constraint) == "wa" || 11248 StringRef(constraint) == "wd" || 11249 StringRef(constraint) == "wf") && 11250 type->isVectorTy()) 11251 return CW_Register; 11252 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 11253 return CW_Register; 11254 11255 switch (*constraint) { 11256 default: 11257 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 11258 break; 11259 case 'b': 11260 if (type->isIntegerTy()) 11261 weight = CW_Register; 11262 break; 11263 case 'f': 11264 if (type->isFloatTy()) 11265 weight = CW_Register; 11266 break; 11267 case 'd': 11268 if (type->isDoubleTy()) 11269 weight = CW_Register; 11270 break; 11271 case 'v': 11272 if (type->isVectorTy()) 11273 weight = CW_Register; 11274 break; 11275 case 'y': 11276 weight = CW_Register; 11277 break; 11278 case 'Z': 11279 weight = CW_Memory; 11280 break; 11281 } 11282 return weight; 11283 } 11284 11285 std::pair<unsigned, const TargetRegisterClass *> 11286 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 11287 StringRef Constraint, 11288 MVT VT) const { 11289 if (Constraint.size() == 1) { 11290 // GCC RS6000 Constraint Letters 11291 switch (Constraint[0]) { 11292 case 'b': // R1-R31 11293 if (VT == MVT::i64 && Subtarget.isPPC64()) 11294 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 11295 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 11296 case 'r': // R0-R31 11297 if (VT == MVT::i64 && Subtarget.isPPC64()) 11298 return std::make_pair(0U, &PPC::G8RCRegClass); 11299 return std::make_pair(0U, &PPC::GPRCRegClass); 11300 // 'd' and 'f' constraints are both defined to be "the floating point 11301 // registers", where one is for 32-bit and the other for 64-bit. We don't 11302 // really care overly much here so just give them all the same reg classes. 11303 case 'd': 11304 case 'f': 11305 if (VT == MVT::f32 || VT == MVT::i32) 11306 return std::make_pair(0U, &PPC::F4RCRegClass); 11307 if (VT == MVT::f64 || VT == MVT::i64) 11308 return std::make_pair(0U, &PPC::F8RCRegClass); 11309 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 11310 return std::make_pair(0U, &PPC::QFRCRegClass); 11311 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 11312 return std::make_pair(0U, &PPC::QSRCRegClass); 11313 break; 11314 case 'v': 11315 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 11316 return std::make_pair(0U, &PPC::QFRCRegClass); 11317 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 11318 return std::make_pair(0U, &PPC::QSRCRegClass); 11319 if (Subtarget.hasAltivec()) 11320 return std::make_pair(0U, &PPC::VRRCRegClass); 11321 case 'y': // crrc 11322 return std::make_pair(0U, &PPC::CRRCRegClass); 11323 } 11324 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 11325 // An individual CR bit. 11326 return std::make_pair(0U, &PPC::CRBITRCRegClass); 11327 } else if ((Constraint == "wa" || Constraint == "wd" || 11328 Constraint == "wf") && Subtarget.hasVSX()) { 11329 return std::make_pair(0U, &PPC::VSRCRegClass); 11330 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 11331 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 11332 return std::make_pair(0U, &PPC::VSSRCRegClass); 11333 else 11334 return std::make_pair(0U, &PPC::VSFRCRegClass); 11335 } 11336 11337 std::pair<unsigned, const TargetRegisterClass *> R = 11338 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 11339 11340 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 11341 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 11342 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 11343 // register. 11344 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 11345 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 11346 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 11347 PPC::GPRCRegClass.contains(R.first)) 11348 return std::make_pair(TRI->getMatchingSuperReg(R.first, 11349 PPC::sub_32, &PPC::G8RCRegClass), 11350 &PPC::G8RCRegClass); 11351 11352 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 11353 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 11354 R.first = PPC::CR0; 11355 R.second = &PPC::CRRCRegClass; 11356 } 11357 11358 return R; 11359 } 11360 11361 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 11362 /// vector. If it is invalid, don't add anything to Ops. 11363 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 11364 std::string &Constraint, 11365 std::vector<SDValue>&Ops, 11366 SelectionDAG &DAG) const { 11367 SDValue Result; 11368 11369 // Only support length 1 constraints. 11370 if (Constraint.length() > 1) return; 11371 11372 char Letter = Constraint[0]; 11373 switch (Letter) { 11374 default: break; 11375 case 'I': 11376 case 'J': 11377 case 'K': 11378 case 'L': 11379 case 'M': 11380 case 'N': 11381 case 'O': 11382 case 'P': { 11383 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 11384 if (!CST) return; // Must be an immediate to match. 11385 SDLoc dl(Op); 11386 int64_t Value = CST->getSExtValue(); 11387 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 11388 // numbers are printed as such. 11389 switch (Letter) { 11390 default: llvm_unreachable("Unknown constraint letter!"); 11391 case 'I': // "I" is a signed 16-bit constant. 11392 if (isInt<16>(Value)) 11393 Result = DAG.getTargetConstant(Value, dl, TCVT); 11394 break; 11395 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 11396 if (isShiftedUInt<16, 16>(Value)) 11397 Result = DAG.getTargetConstant(Value, dl, TCVT); 11398 break; 11399 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 11400 if (isShiftedInt<16, 16>(Value)) 11401 Result = DAG.getTargetConstant(Value, dl, TCVT); 11402 break; 11403 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 11404 if (isUInt<16>(Value)) 11405 Result = DAG.getTargetConstant(Value, dl, TCVT); 11406 break; 11407 case 'M': // "M" is a constant that is greater than 31. 11408 if (Value > 31) 11409 Result = DAG.getTargetConstant(Value, dl, TCVT); 11410 break; 11411 case 'N': // "N" is a positive constant that is an exact power of two. 11412 if (Value > 0 && isPowerOf2_64(Value)) 11413 Result = DAG.getTargetConstant(Value, dl, TCVT); 11414 break; 11415 case 'O': // "O" is the constant zero. 11416 if (Value == 0) 11417 Result = DAG.getTargetConstant(Value, dl, TCVT); 11418 break; 11419 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 11420 if (isInt<16>(-Value)) 11421 Result = DAG.getTargetConstant(Value, dl, TCVT); 11422 break; 11423 } 11424 break; 11425 } 11426 } 11427 11428 if (Result.getNode()) { 11429 Ops.push_back(Result); 11430 return; 11431 } 11432 11433 // Handle standard constraint letters. 11434 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11435 } 11436 11437 // isLegalAddressingMode - Return true if the addressing mode represented 11438 // by AM is legal for this target, for a load/store of the specified type. 11439 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 11440 const AddrMode &AM, Type *Ty, 11441 unsigned AS) const { 11442 // PPC does not allow r+i addressing modes for vectors! 11443 if (Ty->isVectorTy() && AM.BaseOffs != 0) 11444 return false; 11445 11446 // PPC allows a sign-extended 16-bit immediate field. 11447 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 11448 return false; 11449 11450 // No global is ever allowed as a base. 11451 if (AM.BaseGV) 11452 return false; 11453 11454 // PPC only support r+r, 11455 switch (AM.Scale) { 11456 case 0: // "r+i" or just "i", depending on HasBaseReg. 11457 break; 11458 case 1: 11459 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 11460 return false; 11461 // Otherwise we have r+r or r+i. 11462 break; 11463 case 2: 11464 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 11465 return false; 11466 // Allow 2*r as r+r. 11467 break; 11468 default: 11469 // No other scales are supported. 11470 return false; 11471 } 11472 11473 return true; 11474 } 11475 11476 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 11477 SelectionDAG &DAG) const { 11478 MachineFunction &MF = DAG.getMachineFunction(); 11479 MachineFrameInfo *MFI = MF.getFrameInfo(); 11480 MFI->setReturnAddressIsTaken(true); 11481 11482 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 11483 return SDValue(); 11484 11485 SDLoc dl(Op); 11486 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11487 11488 // Make sure the function does not optimize away the store of the RA to 11489 // the stack. 11490 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 11491 FuncInfo->setLRStoreRequired(); 11492 bool isPPC64 = Subtarget.isPPC64(); 11493 auto PtrVT = getPointerTy(MF.getDataLayout()); 11494 11495 if (Depth > 0) { 11496 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11497 SDValue Offset = 11498 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 11499 isPPC64 ? MVT::i64 : MVT::i32); 11500 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11501 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 11502 MachinePointerInfo(), false, false, false, 0); 11503 } 11504 11505 // Just load the return address off the stack. 11506 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 11507 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 11508 MachinePointerInfo(), false, false, false, 0); 11509 } 11510 11511 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 11512 SelectionDAG &DAG) const { 11513 SDLoc dl(Op); 11514 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11515 11516 MachineFunction &MF = DAG.getMachineFunction(); 11517 MachineFrameInfo *MFI = MF.getFrameInfo(); 11518 MFI->setFrameAddressIsTaken(true); 11519 11520 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 11521 bool isPPC64 = PtrVT == MVT::i64; 11522 11523 // Naked functions never have a frame pointer, and so we use r1. For all 11524 // other functions, this decision must be delayed until during PEI. 11525 unsigned FrameReg; 11526 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 11527 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 11528 else 11529 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 11530 11531 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 11532 PtrVT); 11533 while (Depth--) 11534 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 11535 FrameAddr, MachinePointerInfo(), false, false, 11536 false, 0); 11537 return FrameAddr; 11538 } 11539 11540 // FIXME? Maybe this could be a TableGen attribute on some registers and 11541 // this table could be generated automatically from RegInfo. 11542 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 11543 SelectionDAG &DAG) const { 11544 bool isPPC64 = Subtarget.isPPC64(); 11545 bool isDarwinABI = Subtarget.isDarwinABI(); 11546 11547 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 11548 (!isPPC64 && VT != MVT::i32)) 11549 report_fatal_error("Invalid register global variable type"); 11550 11551 bool is64Bit = isPPC64 && VT == MVT::i64; 11552 unsigned Reg = StringSwitch<unsigned>(RegName) 11553 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 11554 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 11555 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 11556 (is64Bit ? PPC::X13 : PPC::R13)) 11557 .Default(0); 11558 11559 if (Reg) 11560 return Reg; 11561 report_fatal_error("Invalid register name global variable"); 11562 } 11563 11564 bool 11565 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11566 // The PowerPC target isn't yet aware of offsets. 11567 return false; 11568 } 11569 11570 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 11571 const CallInst &I, 11572 unsigned Intrinsic) const { 11573 11574 switch (Intrinsic) { 11575 case Intrinsic::ppc_qpx_qvlfd: 11576 case Intrinsic::ppc_qpx_qvlfs: 11577 case Intrinsic::ppc_qpx_qvlfcd: 11578 case Intrinsic::ppc_qpx_qvlfcs: 11579 case Intrinsic::ppc_qpx_qvlfiwa: 11580 case Intrinsic::ppc_qpx_qvlfiwz: 11581 case Intrinsic::ppc_altivec_lvx: 11582 case Intrinsic::ppc_altivec_lvxl: 11583 case Intrinsic::ppc_altivec_lvebx: 11584 case Intrinsic::ppc_altivec_lvehx: 11585 case Intrinsic::ppc_altivec_lvewx: 11586 case Intrinsic::ppc_vsx_lxvd2x: 11587 case Intrinsic::ppc_vsx_lxvw4x: { 11588 EVT VT; 11589 switch (Intrinsic) { 11590 case Intrinsic::ppc_altivec_lvebx: 11591 VT = MVT::i8; 11592 break; 11593 case Intrinsic::ppc_altivec_lvehx: 11594 VT = MVT::i16; 11595 break; 11596 case Intrinsic::ppc_altivec_lvewx: 11597 VT = MVT::i32; 11598 break; 11599 case Intrinsic::ppc_vsx_lxvd2x: 11600 VT = MVT::v2f64; 11601 break; 11602 case Intrinsic::ppc_qpx_qvlfd: 11603 VT = MVT::v4f64; 11604 break; 11605 case Intrinsic::ppc_qpx_qvlfs: 11606 VT = MVT::v4f32; 11607 break; 11608 case Intrinsic::ppc_qpx_qvlfcd: 11609 VT = MVT::v2f64; 11610 break; 11611 case Intrinsic::ppc_qpx_qvlfcs: 11612 VT = MVT::v2f32; 11613 break; 11614 default: 11615 VT = MVT::v4i32; 11616 break; 11617 } 11618 11619 Info.opc = ISD::INTRINSIC_W_CHAIN; 11620 Info.memVT = VT; 11621 Info.ptrVal = I.getArgOperand(0); 11622 Info.offset = -VT.getStoreSize()+1; 11623 Info.size = 2*VT.getStoreSize()-1; 11624 Info.align = 1; 11625 Info.vol = false; 11626 Info.readMem = true; 11627 Info.writeMem = false; 11628 return true; 11629 } 11630 case Intrinsic::ppc_qpx_qvlfda: 11631 case Intrinsic::ppc_qpx_qvlfsa: 11632 case Intrinsic::ppc_qpx_qvlfcda: 11633 case Intrinsic::ppc_qpx_qvlfcsa: 11634 case Intrinsic::ppc_qpx_qvlfiwaa: 11635 case Intrinsic::ppc_qpx_qvlfiwza: { 11636 EVT VT; 11637 switch (Intrinsic) { 11638 case Intrinsic::ppc_qpx_qvlfda: 11639 VT = MVT::v4f64; 11640 break; 11641 case Intrinsic::ppc_qpx_qvlfsa: 11642 VT = MVT::v4f32; 11643 break; 11644 case Intrinsic::ppc_qpx_qvlfcda: 11645 VT = MVT::v2f64; 11646 break; 11647 case Intrinsic::ppc_qpx_qvlfcsa: 11648 VT = MVT::v2f32; 11649 break; 11650 default: 11651 VT = MVT::v4i32; 11652 break; 11653 } 11654 11655 Info.opc = ISD::INTRINSIC_W_CHAIN; 11656 Info.memVT = VT; 11657 Info.ptrVal = I.getArgOperand(0); 11658 Info.offset = 0; 11659 Info.size = VT.getStoreSize(); 11660 Info.align = 1; 11661 Info.vol = false; 11662 Info.readMem = true; 11663 Info.writeMem = false; 11664 return true; 11665 } 11666 case Intrinsic::ppc_qpx_qvstfd: 11667 case Intrinsic::ppc_qpx_qvstfs: 11668 case Intrinsic::ppc_qpx_qvstfcd: 11669 case Intrinsic::ppc_qpx_qvstfcs: 11670 case Intrinsic::ppc_qpx_qvstfiw: 11671 case Intrinsic::ppc_altivec_stvx: 11672 case Intrinsic::ppc_altivec_stvxl: 11673 case Intrinsic::ppc_altivec_stvebx: 11674 case Intrinsic::ppc_altivec_stvehx: 11675 case Intrinsic::ppc_altivec_stvewx: 11676 case Intrinsic::ppc_vsx_stxvd2x: 11677 case Intrinsic::ppc_vsx_stxvw4x: { 11678 EVT VT; 11679 switch (Intrinsic) { 11680 case Intrinsic::ppc_altivec_stvebx: 11681 VT = MVT::i8; 11682 break; 11683 case Intrinsic::ppc_altivec_stvehx: 11684 VT = MVT::i16; 11685 break; 11686 case Intrinsic::ppc_altivec_stvewx: 11687 VT = MVT::i32; 11688 break; 11689 case Intrinsic::ppc_vsx_stxvd2x: 11690 VT = MVT::v2f64; 11691 break; 11692 case Intrinsic::ppc_qpx_qvstfd: 11693 VT = MVT::v4f64; 11694 break; 11695 case Intrinsic::ppc_qpx_qvstfs: 11696 VT = MVT::v4f32; 11697 break; 11698 case Intrinsic::ppc_qpx_qvstfcd: 11699 VT = MVT::v2f64; 11700 break; 11701 case Intrinsic::ppc_qpx_qvstfcs: 11702 VT = MVT::v2f32; 11703 break; 11704 default: 11705 VT = MVT::v4i32; 11706 break; 11707 } 11708 11709 Info.opc = ISD::INTRINSIC_VOID; 11710 Info.memVT = VT; 11711 Info.ptrVal = I.getArgOperand(1); 11712 Info.offset = -VT.getStoreSize()+1; 11713 Info.size = 2*VT.getStoreSize()-1; 11714 Info.align = 1; 11715 Info.vol = false; 11716 Info.readMem = false; 11717 Info.writeMem = true; 11718 return true; 11719 } 11720 case Intrinsic::ppc_qpx_qvstfda: 11721 case Intrinsic::ppc_qpx_qvstfsa: 11722 case Intrinsic::ppc_qpx_qvstfcda: 11723 case Intrinsic::ppc_qpx_qvstfcsa: 11724 case Intrinsic::ppc_qpx_qvstfiwa: { 11725 EVT VT; 11726 switch (Intrinsic) { 11727 case Intrinsic::ppc_qpx_qvstfda: 11728 VT = MVT::v4f64; 11729 break; 11730 case Intrinsic::ppc_qpx_qvstfsa: 11731 VT = MVT::v4f32; 11732 break; 11733 case Intrinsic::ppc_qpx_qvstfcda: 11734 VT = MVT::v2f64; 11735 break; 11736 case Intrinsic::ppc_qpx_qvstfcsa: 11737 VT = MVT::v2f32; 11738 break; 11739 default: 11740 VT = MVT::v4i32; 11741 break; 11742 } 11743 11744 Info.opc = ISD::INTRINSIC_VOID; 11745 Info.memVT = VT; 11746 Info.ptrVal = I.getArgOperand(1); 11747 Info.offset = 0; 11748 Info.size = VT.getStoreSize(); 11749 Info.align = 1; 11750 Info.vol = false; 11751 Info.readMem = false; 11752 Info.writeMem = true; 11753 return true; 11754 } 11755 default: 11756 break; 11757 } 11758 11759 return false; 11760 } 11761 11762 /// getOptimalMemOpType - Returns the target specific optimal type for load 11763 /// and store operations as a result of memset, memcpy, and memmove 11764 /// lowering. If DstAlign is zero that means it's safe to destination 11765 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 11766 /// means there isn't a need to check it against alignment requirement, 11767 /// probably because the source does not need to be loaded. If 'IsMemset' is 11768 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 11769 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 11770 /// source is constant so it does not need to be loaded. 11771 /// It returns EVT::Other if the type should be determined using generic 11772 /// target-independent logic. 11773 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 11774 unsigned DstAlign, unsigned SrcAlign, 11775 bool IsMemset, bool ZeroMemset, 11776 bool MemcpyStrSrc, 11777 MachineFunction &MF) const { 11778 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 11779 const Function *F = MF.getFunction(); 11780 // When expanding a memset, require at least two QPX instructions to cover 11781 // the cost of loading the value to be stored from the constant pool. 11782 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 11783 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 11784 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 11785 return MVT::v4f64; 11786 } 11787 11788 // We should use Altivec/VSX loads and stores when available. For unaligned 11789 // addresses, unaligned VSX loads are only fast starting with the P8. 11790 if (Subtarget.hasAltivec() && Size >= 16 && 11791 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 11792 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 11793 return MVT::v4i32; 11794 } 11795 11796 if (Subtarget.isPPC64()) { 11797 return MVT::i64; 11798 } 11799 11800 return MVT::i32; 11801 } 11802 11803 /// \brief Returns true if it is beneficial to convert a load of a constant 11804 /// to just the constant itself. 11805 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 11806 Type *Ty) const { 11807 assert(Ty->isIntegerTy()); 11808 11809 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 11810 return !(BitSize == 0 || BitSize > 64); 11811 } 11812 11813 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11814 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11815 return false; 11816 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11817 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11818 return NumBits1 == 64 && NumBits2 == 32; 11819 } 11820 11821 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11822 if (!VT1.isInteger() || !VT2.isInteger()) 11823 return false; 11824 unsigned NumBits1 = VT1.getSizeInBits(); 11825 unsigned NumBits2 = VT2.getSizeInBits(); 11826 return NumBits1 == 64 && NumBits2 == 32; 11827 } 11828 11829 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 11830 // Generally speaking, zexts are not free, but they are free when they can be 11831 // folded with other operations. 11832 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 11833 EVT MemVT = LD->getMemoryVT(); 11834 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 11835 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 11836 (LD->getExtensionType() == ISD::NON_EXTLOAD || 11837 LD->getExtensionType() == ISD::ZEXTLOAD)) 11838 return true; 11839 } 11840 11841 // FIXME: Add other cases... 11842 // - 32-bit shifts with a zext to i64 11843 // - zext after ctlz, bswap, etc. 11844 // - zext after and by a constant mask 11845 11846 return TargetLowering::isZExtFree(Val, VT2); 11847 } 11848 11849 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 11850 assert(VT.isFloatingPoint()); 11851 return true; 11852 } 11853 11854 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11855 return isInt<16>(Imm) || isUInt<16>(Imm); 11856 } 11857 11858 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11859 return isInt<16>(Imm) || isUInt<16>(Imm); 11860 } 11861 11862 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 11863 unsigned, 11864 unsigned, 11865 bool *Fast) const { 11866 if (DisablePPCUnaligned) 11867 return false; 11868 11869 // PowerPC supports unaligned memory access for simple non-vector types. 11870 // Although accessing unaligned addresses is not as efficient as accessing 11871 // aligned addresses, it is generally more efficient than manual expansion, 11872 // and generally only traps for software emulation when crossing page 11873 // boundaries. 11874 11875 if (!VT.isSimple()) 11876 return false; 11877 11878 if (VT.getSimpleVT().isVector()) { 11879 if (Subtarget.hasVSX()) { 11880 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 11881 VT != MVT::v4f32 && VT != MVT::v4i32) 11882 return false; 11883 } else { 11884 return false; 11885 } 11886 } 11887 11888 if (VT == MVT::ppcf128) 11889 return false; 11890 11891 if (Fast) 11892 *Fast = true; 11893 11894 return true; 11895 } 11896 11897 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 11898 VT = VT.getScalarType(); 11899 11900 if (!VT.isSimple()) 11901 return false; 11902 11903 switch (VT.getSimpleVT().SimpleTy) { 11904 case MVT::f32: 11905 case MVT::f64: 11906 return true; 11907 default: 11908 break; 11909 } 11910 11911 return false; 11912 } 11913 11914 const MCPhysReg * 11915 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 11916 // LR is a callee-save register, but we must treat it as clobbered by any call 11917 // site. Hence we include LR in the scratch registers, which are in turn added 11918 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 11919 // to CTR, which is used by any indirect call. 11920 static const MCPhysReg ScratchRegs[] = { 11921 PPC::X12, PPC::LR8, PPC::CTR8, 0 11922 }; 11923 11924 return ScratchRegs; 11925 } 11926 11927 unsigned PPCTargetLowering::getExceptionPointerRegister( 11928 const Constant *PersonalityFn) const { 11929 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 11930 } 11931 11932 unsigned PPCTargetLowering::getExceptionSelectorRegister( 11933 const Constant *PersonalityFn) const { 11934 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 11935 } 11936 11937 bool 11938 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 11939 EVT VT , unsigned DefinedValues) const { 11940 if (VT == MVT::v2i64) 11941 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 11942 11943 if (Subtarget.hasQPX()) { 11944 if (VT == MVT::v4f32 || VT == MVT::v4f64 || VT == MVT::v4i1) 11945 return true; 11946 } 11947 11948 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 11949 } 11950 11951 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 11952 if (DisableILPPref || Subtarget.enableMachineScheduler()) 11953 return TargetLowering::getSchedulingPreference(N); 11954 11955 return Sched::ILP; 11956 } 11957 11958 // Create a fast isel object. 11959 FastISel * 11960 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 11961 const TargetLibraryInfo *LibInfo) const { 11962 return PPC::createFastISel(FuncInfo, LibInfo); 11963 } 11964 11965 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 11966 if (Subtarget.isDarwinABI()) return; 11967 if (!Subtarget.isPPC64()) return; 11968 11969 // Update IsSplitCSR in PPCFunctionInfo 11970 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 11971 PFI->setIsSplitCSR(true); 11972 } 11973 11974 void PPCTargetLowering::insertCopiesSplitCSR( 11975 MachineBasicBlock *Entry, 11976 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 11977 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 11978 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 11979 if (!IStart) 11980 return; 11981 11982 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11983 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 11984 MachineBasicBlock::iterator MBBI = Entry->begin(); 11985 for (const MCPhysReg *I = IStart; *I; ++I) { 11986 const TargetRegisterClass *RC = nullptr; 11987 if (PPC::G8RCRegClass.contains(*I)) 11988 RC = &PPC::G8RCRegClass; 11989 else if (PPC::F8RCRegClass.contains(*I)) 11990 RC = &PPC::F8RCRegClass; 11991 else if (PPC::CRRCRegClass.contains(*I)) 11992 RC = &PPC::CRRCRegClass; 11993 else if (PPC::VRRCRegClass.contains(*I)) 11994 RC = &PPC::VRRCRegClass; 11995 else 11996 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 11997 11998 unsigned NewVR = MRI->createVirtualRegister(RC); 11999 // Create copy from CSR to a virtual register. 12000 // FIXME: this currently does not emit CFI pseudo-instructions, it works 12001 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 12002 // nounwind. If we want to generalize this later, we may need to emit 12003 // CFI pseudo-instructions. 12004 assert(Entry->getParent()->getFunction()->hasFnAttribute( 12005 Attribute::NoUnwind) && 12006 "Function should be nounwind in insertCopiesSplitCSR!"); 12007 Entry->addLiveIn(*I); 12008 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 12009 .addReg(*I); 12010 12011 // Insert the copy-back instructions right before the terminator 12012 for (auto *Exit : Exits) 12013 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 12014 TII->get(TargetOpcode::COPY), *I) 12015 .addReg(NewVR); 12016 } 12017 } 12018 12019 // Override to enable LOAD_STACK_GUARD lowering on Linux. 12020 bool PPCTargetLowering::useLoadStackGuardNode() const { 12021 if (!Subtarget.isTargetLinux()) 12022 return TargetLowering::useLoadStackGuardNode(); 12023 return true; 12024 } 12025 12026 // Override to disable global variable loading on Linux. 12027 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 12028 if (!Subtarget.isTargetLinux()) 12029 return TargetLowering::insertSSPDeclarations(M); 12030 } 12031