1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCCallingConv.h" 17 #include "PPCMachineFunctionInfo.h" 18 #include "PPCPerfectShuffle.h" 19 #include "PPCTargetMachine.h" 20 #include "PPCTargetObjectFile.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/StringSwitch.h" 23 #include "llvm/ADT/Triple.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineLoopInfo.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 32 #include "llvm/IR/CallingConv.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include "llvm/Target/TargetOptions.h" 42 43 using namespace llvm; 44 45 // FIXME: Remove this once soft-float is supported. 46 static cl::opt<bool> DisablePPCFloatInVariadic("disable-ppc-float-in-variadic", 47 cl::desc("disable saving float registers for va_start on PPC"), cl::Hidden); 48 49 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 50 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 51 52 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 53 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 54 55 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 56 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 57 58 // FIXME: Remove this once the bug has been fixed! 59 extern cl::opt<bool> ANDIGlueBug; 60 61 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 62 const PPCSubtarget &STI) 63 : TargetLowering(TM), Subtarget(STI) { 64 // Use _setjmp/_longjmp instead of setjmp/longjmp. 65 setUseUnderscoreSetJmp(true); 66 setUseUnderscoreLongJmp(true); 67 68 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 69 // arguments are at least 4/8 bytes aligned. 70 bool isPPC64 = Subtarget.isPPC64(); 71 setMinStackArgumentAlignment(isPPC64 ? 8:4); 72 73 // Set up the register classes. 74 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 75 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 76 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 77 78 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 79 for (MVT VT : MVT::integer_valuetypes()) { 80 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 81 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 82 } 83 84 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 85 86 // PowerPC has pre-inc load and store's. 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 91 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 92 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 93 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 96 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 97 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 98 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 99 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 100 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 101 102 if (Subtarget.useCRBits()) { 103 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 104 105 if (isPPC64 || Subtarget.hasFPCVT()) { 106 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 107 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 108 isPPC64 ? MVT::i64 : MVT::i32); 109 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 110 AddPromotedToType (ISD::UINT_TO_FP, MVT::i1, 111 isPPC64 ? MVT::i64 : MVT::i32); 112 } else { 113 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 114 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 115 } 116 117 // PowerPC does not support direct load / store of condition registers 118 setOperationAction(ISD::LOAD, MVT::i1, Custom); 119 setOperationAction(ISD::STORE, MVT::i1, Custom); 120 121 // FIXME: Remove this once the ANDI glue bug is fixed: 122 if (ANDIGlueBug) 123 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 124 125 for (MVT VT : MVT::integer_valuetypes()) { 126 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 127 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 128 setTruncStoreAction(VT, MVT::i1, Expand); 129 } 130 131 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 132 } 133 134 // This is used in the ppcf128->int sequence. Note it has different semantics 135 // from FP_ROUND: that rounds to nearest, this rounds to zero. 136 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 137 138 // We do not currently implement these libm ops for PowerPC. 139 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 140 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 141 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 142 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 143 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 144 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 145 146 // PowerPC has no SREM/UREM instructions 147 setOperationAction(ISD::SREM, MVT::i32, Expand); 148 setOperationAction(ISD::UREM, MVT::i32, Expand); 149 setOperationAction(ISD::SREM, MVT::i64, Expand); 150 setOperationAction(ISD::UREM, MVT::i64, Expand); 151 152 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 153 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 154 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 155 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 156 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 157 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 158 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 159 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 160 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 161 162 // We don't support sin/cos/sqrt/fmod/pow 163 setOperationAction(ISD::FSIN , MVT::f64, Expand); 164 setOperationAction(ISD::FCOS , MVT::f64, Expand); 165 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 166 setOperationAction(ISD::FREM , MVT::f64, Expand); 167 setOperationAction(ISD::FPOW , MVT::f64, Expand); 168 setOperationAction(ISD::FMA , MVT::f64, Legal); 169 setOperationAction(ISD::FSIN , MVT::f32, Expand); 170 setOperationAction(ISD::FCOS , MVT::f32, Expand); 171 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 172 setOperationAction(ISD::FREM , MVT::f32, Expand); 173 setOperationAction(ISD::FPOW , MVT::f32, Expand); 174 setOperationAction(ISD::FMA , MVT::f32, Legal); 175 176 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 177 178 // If we're enabling GP optimizations, use hardware square root 179 if (!Subtarget.hasFSQRT() && 180 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 181 Subtarget.hasFRE())) 182 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 183 184 if (!Subtarget.hasFSQRT() && 185 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 186 Subtarget.hasFRES())) 187 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 188 189 if (Subtarget.hasFCPSGN()) { 190 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 191 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 192 } else { 193 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 194 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 195 } 196 197 if (Subtarget.hasFPRND()) { 198 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 199 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 200 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 201 setOperationAction(ISD::FROUND, MVT::f64, Legal); 202 203 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 204 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 205 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 206 setOperationAction(ISD::FROUND, MVT::f32, Legal); 207 } 208 209 // PowerPC does not have BSWAP, CTPOP or CTTZ 210 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 211 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 212 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 213 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 214 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 215 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 216 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 217 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 218 219 if (Subtarget.hasPOPCNTD()) { 220 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 221 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 222 } else { 223 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 224 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 225 } 226 227 // PowerPC does not have ROTR 228 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 229 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 230 231 if (!Subtarget.useCRBits()) { 232 // PowerPC does not have Select 233 setOperationAction(ISD::SELECT, MVT::i32, Expand); 234 setOperationAction(ISD::SELECT, MVT::i64, Expand); 235 setOperationAction(ISD::SELECT, MVT::f32, Expand); 236 setOperationAction(ISD::SELECT, MVT::f64, Expand); 237 } 238 239 // PowerPC wants to turn select_cc of FP into fsel when possible. 240 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 241 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 242 243 // PowerPC wants to optimize integer setcc a bit 244 if (!Subtarget.useCRBits()) 245 setOperationAction(ISD::SETCC, MVT::i32, Custom); 246 247 // PowerPC does not have BRCOND which requires SetCC 248 if (!Subtarget.useCRBits()) 249 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 250 251 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 252 253 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 254 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 255 256 // PowerPC does not have [U|S]INT_TO_FP 257 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 258 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 259 260 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 261 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 262 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 263 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 264 265 // We cannot sextinreg(i1). Expand to shifts. 266 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 267 268 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 269 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 270 // support continuation, user-level threading, and etc.. As a result, no 271 // other SjLj exception interfaces are implemented and please don't build 272 // your own exception handling based on them. 273 // LLVM/Clang supports zero-cost DWARF exception handling. 274 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 275 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 276 277 // We want to legalize GlobalAddress and ConstantPool nodes into the 278 // appropriate instructions to materialize the address. 279 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 280 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 281 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 282 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 283 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 284 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 285 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 286 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 287 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 288 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 289 290 // TRAP is legal. 291 setOperationAction(ISD::TRAP, MVT::Other, Legal); 292 293 // TRAMPOLINE is custom lowered. 294 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 295 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 296 297 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 298 setOperationAction(ISD::VASTART , MVT::Other, Custom); 299 300 if (Subtarget.isSVR4ABI()) { 301 if (isPPC64) { 302 // VAARG always uses double-word chunks, so promote anything smaller. 303 setOperationAction(ISD::VAARG, MVT::i1, Promote); 304 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 305 setOperationAction(ISD::VAARG, MVT::i8, Promote); 306 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 307 setOperationAction(ISD::VAARG, MVT::i16, Promote); 308 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 309 setOperationAction(ISD::VAARG, MVT::i32, Promote); 310 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 311 setOperationAction(ISD::VAARG, MVT::Other, Expand); 312 } else { 313 // VAARG is custom lowered with the 32-bit SVR4 ABI. 314 setOperationAction(ISD::VAARG, MVT::Other, Custom); 315 setOperationAction(ISD::VAARG, MVT::i64, Custom); 316 } 317 } else 318 setOperationAction(ISD::VAARG, MVT::Other, Expand); 319 320 if (Subtarget.isSVR4ABI() && !isPPC64) 321 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 322 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 323 else 324 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 325 326 // Use the default implementation. 327 setOperationAction(ISD::VAEND , MVT::Other, Expand); 328 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 329 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 330 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 331 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 332 333 // We want to custom lower some of our intrinsics. 334 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 335 336 // To handle counter-based loop conditions. 337 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 338 339 // Comparisons that require checking two conditions. 340 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 341 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 342 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 343 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 344 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 345 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 346 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 347 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 348 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 349 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 350 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 351 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 352 353 if (Subtarget.has64BitSupport()) { 354 // They also have instructions for converting between i64 and fp. 355 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 356 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 357 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 358 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 359 // This is just the low 32 bits of a (signed) fp->i64 conversion. 360 // We cannot do this with Promote because i64 is not a legal type. 361 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 362 363 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 364 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 365 } else { 366 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 367 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 368 } 369 370 // With the instructions enabled under FPCVT, we can do everything. 371 if (Subtarget.hasFPCVT()) { 372 if (Subtarget.has64BitSupport()) { 373 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 374 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 375 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 376 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 377 } 378 379 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 380 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 381 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 382 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 383 } 384 385 if (Subtarget.use64BitRegs()) { 386 // 64-bit PowerPC implementations can support i64 types directly 387 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 388 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 389 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 390 // 64-bit PowerPC wants to expand i128 shifts itself. 391 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 392 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 393 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 394 } else { 395 // 32-bit PowerPC wants to expand i64 shifts itself. 396 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 397 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 398 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 399 } 400 401 if (Subtarget.hasAltivec()) { 402 // First set operation action for all vector types to expand. Then we 403 // will selectively turn on ones that can be effectively codegen'd. 404 for (MVT VT : MVT::vector_valuetypes()) { 405 // add/sub are legal for all supported vector VT's. 406 setOperationAction(ISD::ADD , VT, Legal); 407 setOperationAction(ISD::SUB , VT, Legal); 408 409 // Vector instructions introduced in P8 410 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 411 setOperationAction(ISD::CTPOP, VT, Legal); 412 setOperationAction(ISD::CTLZ, VT, Legal); 413 } 414 else { 415 setOperationAction(ISD::CTPOP, VT, Expand); 416 setOperationAction(ISD::CTLZ, VT, Expand); 417 } 418 419 // We promote all shuffles to v16i8. 420 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 421 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 422 423 // We promote all non-typed operations to v4i32. 424 setOperationAction(ISD::AND , VT, Promote); 425 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 426 setOperationAction(ISD::OR , VT, Promote); 427 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 428 setOperationAction(ISD::XOR , VT, Promote); 429 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 430 setOperationAction(ISD::LOAD , VT, Promote); 431 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 432 setOperationAction(ISD::SELECT, VT, Promote); 433 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 434 setOperationAction(ISD::SELECT_CC, VT, Promote); 435 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 436 setOperationAction(ISD::STORE, VT, Promote); 437 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 438 439 // No other operations are legal. 440 setOperationAction(ISD::MUL , VT, Expand); 441 setOperationAction(ISD::SDIV, VT, Expand); 442 setOperationAction(ISD::SREM, VT, Expand); 443 setOperationAction(ISD::UDIV, VT, Expand); 444 setOperationAction(ISD::UREM, VT, Expand); 445 setOperationAction(ISD::FDIV, VT, Expand); 446 setOperationAction(ISD::FREM, VT, Expand); 447 setOperationAction(ISD::FNEG, VT, Expand); 448 setOperationAction(ISD::FSQRT, VT, Expand); 449 setOperationAction(ISD::FLOG, VT, Expand); 450 setOperationAction(ISD::FLOG10, VT, Expand); 451 setOperationAction(ISD::FLOG2, VT, Expand); 452 setOperationAction(ISD::FEXP, VT, Expand); 453 setOperationAction(ISD::FEXP2, VT, Expand); 454 setOperationAction(ISD::FSIN, VT, Expand); 455 setOperationAction(ISD::FCOS, VT, Expand); 456 setOperationAction(ISD::FABS, VT, Expand); 457 setOperationAction(ISD::FPOWI, VT, Expand); 458 setOperationAction(ISD::FFLOOR, VT, Expand); 459 setOperationAction(ISD::FCEIL, VT, Expand); 460 setOperationAction(ISD::FTRUNC, VT, Expand); 461 setOperationAction(ISD::FRINT, VT, Expand); 462 setOperationAction(ISD::FNEARBYINT, VT, Expand); 463 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 464 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 465 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 466 setOperationAction(ISD::MULHU, VT, Expand); 467 setOperationAction(ISD::MULHS, VT, Expand); 468 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 469 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 470 setOperationAction(ISD::UDIVREM, VT, Expand); 471 setOperationAction(ISD::SDIVREM, VT, Expand); 472 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 473 setOperationAction(ISD::FPOW, VT, Expand); 474 setOperationAction(ISD::BSWAP, VT, Expand); 475 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 476 setOperationAction(ISD::CTTZ, VT, Expand); 477 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 478 setOperationAction(ISD::VSELECT, VT, Expand); 479 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 480 481 for (MVT InnerVT : MVT::vector_valuetypes()) { 482 setTruncStoreAction(VT, InnerVT, Expand); 483 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 484 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 485 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 486 } 487 } 488 489 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 490 // with merges, splats, etc. 491 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 492 493 setOperationAction(ISD::AND , MVT::v4i32, Legal); 494 setOperationAction(ISD::OR , MVT::v4i32, Legal); 495 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 496 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 497 setOperationAction(ISD::SELECT, MVT::v4i32, 498 Subtarget.useCRBits() ? Legal : Expand); 499 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 500 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 501 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 502 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 503 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 504 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 505 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 506 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 507 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 508 509 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 510 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 511 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 512 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 513 514 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 515 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 516 517 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 518 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 519 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 520 } 521 522 523 if (Subtarget.hasP8Altivec()) 524 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 525 else 526 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 527 528 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 529 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 530 531 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 532 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 533 534 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 535 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 536 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 537 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 538 539 // Altivec does not contain unordered floating-point compare instructions 540 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 541 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 542 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 543 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 544 545 if (Subtarget.hasVSX()) { 546 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 547 if (Subtarget.hasP8Vector()) 548 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 549 if (Subtarget.hasDirectMove()) { 550 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 551 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 552 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 553 // FIXME: this is causing bootstrap failures, disable temporarily 554 //setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 555 } 556 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 557 558 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 559 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 560 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 561 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 562 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 563 564 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 565 566 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 567 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 568 569 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 570 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 571 572 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 573 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 574 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 575 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 576 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 577 578 // Share the Altivec comparison restrictions. 579 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 580 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 581 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 582 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 583 584 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 585 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 586 587 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 588 589 if (Subtarget.hasP8Vector()) 590 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 591 592 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 593 594 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 595 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 596 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 597 598 if (Subtarget.hasP8Altivec()) { 599 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 600 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 601 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 602 603 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 604 } 605 else { 606 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 607 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 608 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 609 610 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 611 612 // VSX v2i64 only supports non-arithmetic operations. 613 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 614 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 615 } 616 617 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 618 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 619 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 620 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 621 622 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 623 624 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 625 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 626 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 627 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 628 629 // Vector operation legalization checks the result type of 630 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 631 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 632 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 633 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 634 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 635 636 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 637 } 638 639 if (Subtarget.hasP8Altivec()) { 640 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 641 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 642 } 643 } 644 645 if (Subtarget.hasQPX()) { 646 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 647 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 648 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 649 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 650 651 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 652 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 653 654 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 655 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 656 657 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 658 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 659 660 if (!Subtarget.useCRBits()) 661 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 662 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 663 664 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 665 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 666 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 667 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 668 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 669 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 670 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 671 672 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 673 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 674 675 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 676 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 677 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 678 679 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 680 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 681 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 682 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 683 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 684 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 685 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 686 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 687 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 688 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 689 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 690 691 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 692 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 693 694 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 695 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 696 697 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 698 699 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 700 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 701 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 702 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 703 704 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 705 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 706 707 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 708 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 709 710 if (!Subtarget.useCRBits()) 711 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 712 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 713 714 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 715 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 716 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 717 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 718 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 719 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 720 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 721 722 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 723 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 724 725 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 726 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 727 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 728 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 729 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 730 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 731 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 732 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 733 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 734 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 735 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 736 737 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 738 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 739 740 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 741 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 742 743 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 744 745 setOperationAction(ISD::AND , MVT::v4i1, Legal); 746 setOperationAction(ISD::OR , MVT::v4i1, Legal); 747 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 748 749 if (!Subtarget.useCRBits()) 750 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 751 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 752 753 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 754 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 755 756 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 757 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 758 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 759 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 760 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 761 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 762 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 763 764 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 765 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 766 767 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 768 769 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 770 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 771 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 772 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 773 774 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 775 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 776 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 777 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 778 779 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 780 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 781 782 // These need to set FE_INEXACT, and so cannot be vectorized here. 783 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 784 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 785 786 if (TM.Options.UnsafeFPMath) { 787 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 788 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 789 790 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 791 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 792 } else { 793 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 794 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 795 796 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 797 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 798 } 799 } 800 801 if (Subtarget.has64BitSupport()) 802 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 803 804 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 805 806 if (!isPPC64) { 807 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 808 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 809 } 810 811 setBooleanContents(ZeroOrOneBooleanContent); 812 813 if (Subtarget.hasAltivec()) { 814 // Altivec instructions set fields to all zeros or all ones. 815 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 816 } 817 818 if (!isPPC64) { 819 // These libcalls are not available in 32-bit. 820 setLibcallName(RTLIB::SHL_I128, nullptr); 821 setLibcallName(RTLIB::SRL_I128, nullptr); 822 setLibcallName(RTLIB::SRA_I128, nullptr); 823 } 824 825 if (isPPC64) { 826 setStackPointerRegisterToSaveRestore(PPC::X1); 827 setExceptionPointerRegister(PPC::X3); 828 setExceptionSelectorRegister(PPC::X4); 829 } else { 830 setStackPointerRegisterToSaveRestore(PPC::R1); 831 setExceptionPointerRegister(PPC::R3); 832 setExceptionSelectorRegister(PPC::R4); 833 } 834 835 // We have target-specific dag combine patterns for the following nodes: 836 setTargetDAGCombine(ISD::SINT_TO_FP); 837 if (Subtarget.hasFPCVT()) 838 setTargetDAGCombine(ISD::UINT_TO_FP); 839 setTargetDAGCombine(ISD::LOAD); 840 setTargetDAGCombine(ISD::STORE); 841 setTargetDAGCombine(ISD::BR_CC); 842 if (Subtarget.useCRBits()) 843 setTargetDAGCombine(ISD::BRCOND); 844 setTargetDAGCombine(ISD::BSWAP); 845 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 846 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 847 setTargetDAGCombine(ISD::INTRINSIC_VOID); 848 849 setTargetDAGCombine(ISD::SIGN_EXTEND); 850 setTargetDAGCombine(ISD::ZERO_EXTEND); 851 setTargetDAGCombine(ISD::ANY_EXTEND); 852 853 if (Subtarget.useCRBits()) { 854 setTargetDAGCombine(ISD::TRUNCATE); 855 setTargetDAGCombine(ISD::SETCC); 856 setTargetDAGCombine(ISD::SELECT_CC); 857 } 858 859 // Use reciprocal estimates. 860 if (TM.Options.UnsafeFPMath) { 861 setTargetDAGCombine(ISD::FDIV); 862 setTargetDAGCombine(ISD::FSQRT); 863 } 864 865 // Darwin long double math library functions have $LDBL128 appended. 866 if (Subtarget.isDarwin()) { 867 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 868 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 869 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 870 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 871 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 872 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 873 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 874 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 875 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 876 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 877 } 878 879 // With 32 condition bits, we don't need to sink (and duplicate) compares 880 // aggressively in CodeGenPrep. 881 if (Subtarget.useCRBits()) { 882 setHasMultipleConditionRegisters(); 883 setJumpIsExpensive(); 884 } 885 886 setMinFunctionAlignment(2); 887 if (Subtarget.isDarwin()) 888 setPrefFunctionAlignment(4); 889 890 switch (Subtarget.getDarwinDirective()) { 891 default: break; 892 case PPC::DIR_970: 893 case PPC::DIR_A2: 894 case PPC::DIR_E500mc: 895 case PPC::DIR_E5500: 896 case PPC::DIR_PWR4: 897 case PPC::DIR_PWR5: 898 case PPC::DIR_PWR5X: 899 case PPC::DIR_PWR6: 900 case PPC::DIR_PWR6X: 901 case PPC::DIR_PWR7: 902 case PPC::DIR_PWR8: 903 setPrefFunctionAlignment(4); 904 setPrefLoopAlignment(4); 905 break; 906 } 907 908 setInsertFencesForAtomic(true); 909 910 if (Subtarget.enableMachineScheduler()) 911 setSchedulingPreference(Sched::Source); 912 else 913 setSchedulingPreference(Sched::Hybrid); 914 915 computeRegisterProperties(STI.getRegisterInfo()); 916 917 // The Freescale cores do better with aggressive inlining of memcpy and 918 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 919 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 920 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 921 MaxStoresPerMemset = 32; 922 MaxStoresPerMemsetOptSize = 16; 923 MaxStoresPerMemcpy = 32; 924 MaxStoresPerMemcpyOptSize = 8; 925 MaxStoresPerMemmove = 32; 926 MaxStoresPerMemmoveOptSize = 8; 927 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 928 // The A2 also benefits from (very) aggressive inlining of memcpy and 929 // friends. The overhead of a the function call, even when warm, can be 930 // over one hundred cycles. 931 MaxStoresPerMemset = 128; 932 MaxStoresPerMemcpy = 128; 933 MaxStoresPerMemmove = 128; 934 } 935 } 936 937 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 938 /// the desired ByVal argument alignment. 939 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 940 unsigned MaxMaxAlign) { 941 if (MaxAlign == MaxMaxAlign) 942 return; 943 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 944 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 945 MaxAlign = 32; 946 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 947 MaxAlign = 16; 948 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 949 unsigned EltAlign = 0; 950 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 951 if (EltAlign > MaxAlign) 952 MaxAlign = EltAlign; 953 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 954 for (auto *EltTy : STy->elements()) { 955 unsigned EltAlign = 0; 956 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 957 if (EltAlign > MaxAlign) 958 MaxAlign = EltAlign; 959 if (MaxAlign == MaxMaxAlign) 960 break; 961 } 962 } 963 } 964 965 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 966 /// function arguments in the caller parameter area. 967 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 968 const DataLayout &DL) const { 969 // Darwin passes everything on 4 byte boundary. 970 if (Subtarget.isDarwin()) 971 return 4; 972 973 // 16byte and wider vectors are passed on 16byte boundary. 974 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 975 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 976 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 977 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 978 return Align; 979 } 980 981 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 982 switch ((PPCISD::NodeType)Opcode) { 983 case PPCISD::FIRST_NUMBER: break; 984 case PPCISD::FSEL: return "PPCISD::FSEL"; 985 case PPCISD::FCFID: return "PPCISD::FCFID"; 986 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 987 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 988 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 989 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 990 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 991 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 992 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 993 case PPCISD::FRE: return "PPCISD::FRE"; 994 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 995 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 996 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 997 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 998 case PPCISD::VPERM: return "PPCISD::VPERM"; 999 case PPCISD::CMPB: return "PPCISD::CMPB"; 1000 case PPCISD::Hi: return "PPCISD::Hi"; 1001 case PPCISD::Lo: return "PPCISD::Lo"; 1002 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1003 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1004 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1005 case PPCISD::SRL: return "PPCISD::SRL"; 1006 case PPCISD::SRA: return "PPCISD::SRA"; 1007 case PPCISD::SHL: return "PPCISD::SHL"; 1008 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1009 case PPCISD::CALL: return "PPCISD::CALL"; 1010 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1011 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1012 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1013 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1014 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1015 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1016 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1017 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1018 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1019 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1020 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1021 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1022 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1023 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1024 case PPCISD::VCMP: return "PPCISD::VCMP"; 1025 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1026 case PPCISD::LBRX: return "PPCISD::LBRX"; 1027 case PPCISD::STBRX: return "PPCISD::STBRX"; 1028 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1029 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1030 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1031 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1032 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1033 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1034 case PPCISD::BDZ: return "PPCISD::BDZ"; 1035 case PPCISD::MFFS: return "PPCISD::MFFS"; 1036 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1037 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1038 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1039 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1040 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1041 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1042 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1043 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1044 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1045 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1046 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1047 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1048 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1049 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1050 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1051 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1052 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1053 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1054 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1055 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1056 case PPCISD::SC: return "PPCISD::SC"; 1057 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1058 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1059 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1060 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1061 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1062 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1063 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1064 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1065 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1066 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1067 } 1068 return nullptr; 1069 } 1070 1071 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1072 EVT VT) const { 1073 if (!VT.isVector()) 1074 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1075 1076 if (Subtarget.hasQPX()) 1077 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1078 1079 return VT.changeVectorElementTypeToInteger(); 1080 } 1081 1082 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1083 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1084 return true; 1085 } 1086 1087 //===----------------------------------------------------------------------===// 1088 // Node matching predicates, for use by the tblgen matching code. 1089 //===----------------------------------------------------------------------===// 1090 1091 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1092 static bool isFloatingPointZero(SDValue Op) { 1093 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1094 return CFP->getValueAPF().isZero(); 1095 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1096 // Maybe this has already been legalized into the constant pool? 1097 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1098 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1099 return CFP->getValueAPF().isZero(); 1100 } 1101 return false; 1102 } 1103 1104 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1105 /// true if Op is undef or if it matches the specified value. 1106 static bool isConstantOrUndef(int Op, int Val) { 1107 return Op < 0 || Op == Val; 1108 } 1109 1110 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1111 /// VPKUHUM instruction. 1112 /// The ShuffleKind distinguishes between big-endian operations with 1113 /// two different inputs (0), either-endian operations with two identical 1114 /// inputs (1), and little-endian operations with two different inputs (2). 1115 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1116 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1117 SelectionDAG &DAG) { 1118 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1119 if (ShuffleKind == 0) { 1120 if (IsLE) 1121 return false; 1122 for (unsigned i = 0; i != 16; ++i) 1123 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1124 return false; 1125 } else if (ShuffleKind == 2) { 1126 if (!IsLE) 1127 return false; 1128 for (unsigned i = 0; i != 16; ++i) 1129 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1130 return false; 1131 } else if (ShuffleKind == 1) { 1132 unsigned j = IsLE ? 0 : 1; 1133 for (unsigned i = 0; i != 8; ++i) 1134 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1135 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1136 return false; 1137 } 1138 return true; 1139 } 1140 1141 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1142 /// VPKUWUM instruction. 1143 /// The ShuffleKind distinguishes between big-endian operations with 1144 /// two different inputs (0), either-endian operations with two identical 1145 /// inputs (1), and little-endian operations with two different inputs (2). 1146 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1147 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1148 SelectionDAG &DAG) { 1149 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1150 if (ShuffleKind == 0) { 1151 if (IsLE) 1152 return false; 1153 for (unsigned i = 0; i != 16; i += 2) 1154 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1155 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1156 return false; 1157 } else if (ShuffleKind == 2) { 1158 if (!IsLE) 1159 return false; 1160 for (unsigned i = 0; i != 16; i += 2) 1161 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1162 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1163 return false; 1164 } else if (ShuffleKind == 1) { 1165 unsigned j = IsLE ? 0 : 2; 1166 for (unsigned i = 0; i != 8; i += 2) 1167 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1168 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1169 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1170 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1171 return false; 1172 } 1173 return true; 1174 } 1175 1176 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1177 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1178 /// current subtarget. 1179 /// 1180 /// The ShuffleKind distinguishes between big-endian operations with 1181 /// two different inputs (0), either-endian operations with two identical 1182 /// inputs (1), and little-endian operations with two different inputs (2). 1183 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1184 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1185 SelectionDAG &DAG) { 1186 const PPCSubtarget& Subtarget = 1187 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1188 if (!Subtarget.hasP8Vector()) 1189 return false; 1190 1191 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1192 if (ShuffleKind == 0) { 1193 if (IsLE) 1194 return false; 1195 for (unsigned i = 0; i != 16; i += 4) 1196 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1197 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1198 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1199 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1200 return false; 1201 } else if (ShuffleKind == 2) { 1202 if (!IsLE) 1203 return false; 1204 for (unsigned i = 0; i != 16; i += 4) 1205 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1206 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1207 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1208 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1209 return false; 1210 } else if (ShuffleKind == 1) { 1211 unsigned j = IsLE ? 0 : 4; 1212 for (unsigned i = 0; i != 8; i += 4) 1213 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1214 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1215 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1216 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1217 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1218 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1219 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1220 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1221 return false; 1222 } 1223 return true; 1224 } 1225 1226 /// isVMerge - Common function, used to match vmrg* shuffles. 1227 /// 1228 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1229 unsigned LHSStart, unsigned RHSStart) { 1230 if (N->getValueType(0) != MVT::v16i8) 1231 return false; 1232 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1233 "Unsupported merge size!"); 1234 1235 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1236 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1237 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1238 LHSStart+j+i*UnitSize) || 1239 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1240 RHSStart+j+i*UnitSize)) 1241 return false; 1242 } 1243 return true; 1244 } 1245 1246 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1247 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1248 /// The ShuffleKind distinguishes between big-endian merges with two 1249 /// different inputs (0), either-endian merges with two identical inputs (1), 1250 /// and little-endian merges with two different inputs (2). For the latter, 1251 /// the input operands are swapped (see PPCInstrAltivec.td). 1252 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1253 unsigned ShuffleKind, SelectionDAG &DAG) { 1254 if (DAG.getDataLayout().isLittleEndian()) { 1255 if (ShuffleKind == 1) // unary 1256 return isVMerge(N, UnitSize, 0, 0); 1257 else if (ShuffleKind == 2) // swapped 1258 return isVMerge(N, UnitSize, 0, 16); 1259 else 1260 return false; 1261 } else { 1262 if (ShuffleKind == 1) // unary 1263 return isVMerge(N, UnitSize, 8, 8); 1264 else if (ShuffleKind == 0) // normal 1265 return isVMerge(N, UnitSize, 8, 24); 1266 else 1267 return false; 1268 } 1269 } 1270 1271 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1272 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1273 /// The ShuffleKind distinguishes between big-endian merges with two 1274 /// different inputs (0), either-endian merges with two identical inputs (1), 1275 /// and little-endian merges with two different inputs (2). For the latter, 1276 /// the input operands are swapped (see PPCInstrAltivec.td). 1277 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1278 unsigned ShuffleKind, SelectionDAG &DAG) { 1279 if (DAG.getDataLayout().isLittleEndian()) { 1280 if (ShuffleKind == 1) // unary 1281 return isVMerge(N, UnitSize, 8, 8); 1282 else if (ShuffleKind == 2) // swapped 1283 return isVMerge(N, UnitSize, 8, 24); 1284 else 1285 return false; 1286 } else { 1287 if (ShuffleKind == 1) // unary 1288 return isVMerge(N, UnitSize, 0, 0); 1289 else if (ShuffleKind == 0) // normal 1290 return isVMerge(N, UnitSize, 0, 16); 1291 else 1292 return false; 1293 } 1294 } 1295 1296 /** 1297 * \brief Common function used to match vmrgew and vmrgow shuffles 1298 * 1299 * The indexOffset determines whether to look for even or odd words in 1300 * the shuffle mask. This is based on the of the endianness of the target 1301 * machine. 1302 * - Little Endian: 1303 * - Use offset of 0 to check for odd elements 1304 * - Use offset of 4 to check for even elements 1305 * - Big Endian: 1306 * - Use offset of 0 to check for even elements 1307 * - Use offset of 4 to check for odd elements 1308 * A detailed description of the vector element ordering for little endian and 1309 * big endian can be found at 1310 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1311 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1312 * compiler differences mean to you 1313 * 1314 * The mask to the shuffle vector instruction specifies the indices of the 1315 * elements from the two input vectors to place in the result. The elements are 1316 * numbered in array-access order, starting with the first vector. These vectors 1317 * are always of type v16i8, thus each vector will contain 16 elements of size 1318 * 8. More info on the shuffle vector can be found in the 1319 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1320 * Language Reference. 1321 * 1322 * The RHSStartValue indicates whether the same input vectors are used (unary) 1323 * or two different input vectors are used, based on the following: 1324 * - If the instruction uses the same vector for both inputs, the range of the 1325 * indices will be 0 to 15. In this case, the RHSStart value passed should 1326 * be 0. 1327 * - If the instruction has two different vectors then the range of the 1328 * indices will be 0 to 31. In this case, the RHSStart value passed should 1329 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1330 * to 31 specify elements in the second vector). 1331 * 1332 * \param[in] N The shuffle vector SD Node to analyze 1333 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1334 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1335 * vector to the shuffle_vector instruction 1336 * \return true iff this shuffle vector represents an even or odd word merge 1337 */ 1338 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1339 unsigned RHSStartValue) { 1340 if (N->getValueType(0) != MVT::v16i8) 1341 return false; 1342 1343 for (unsigned i = 0; i < 2; ++i) 1344 for (unsigned j = 0; j < 4; ++j) 1345 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1346 i*RHSStartValue+j+IndexOffset) || 1347 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1348 i*RHSStartValue+j+IndexOffset+8)) 1349 return false; 1350 return true; 1351 } 1352 1353 /** 1354 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1355 * vmrgow instructions. 1356 * 1357 * \param[in] N The shuffle vector SD Node to analyze 1358 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1359 * \param[in] ShuffleKind Identify the type of merge: 1360 * - 0 = big-endian merge with two different inputs; 1361 * - 1 = either-endian merge with two identical inputs; 1362 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1363 * little-endian merges). 1364 * \param[in] DAG The current SelectionDAG 1365 * \return true iff this shuffle mask 1366 */ 1367 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1368 unsigned ShuffleKind, SelectionDAG &DAG) { 1369 if (DAG.getDataLayout().isLittleEndian()) { 1370 unsigned indexOffset = CheckEven ? 4 : 0; 1371 if (ShuffleKind == 1) // Unary 1372 return isVMerge(N, indexOffset, 0); 1373 else if (ShuffleKind == 2) // swapped 1374 return isVMerge(N, indexOffset, 16); 1375 else 1376 return false; 1377 } 1378 else { 1379 unsigned indexOffset = CheckEven ? 0 : 4; 1380 if (ShuffleKind == 1) // Unary 1381 return isVMerge(N, indexOffset, 0); 1382 else if (ShuffleKind == 0) // Normal 1383 return isVMerge(N, indexOffset, 16); 1384 else 1385 return false; 1386 } 1387 return false; 1388 } 1389 1390 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1391 /// amount, otherwise return -1. 1392 /// The ShuffleKind distinguishes between big-endian operations with two 1393 /// different inputs (0), either-endian operations with two identical inputs 1394 /// (1), and little-endian operations with two different inputs (2). For the 1395 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1396 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1397 SelectionDAG &DAG) { 1398 if (N->getValueType(0) != MVT::v16i8) 1399 return -1; 1400 1401 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1402 1403 // Find the first non-undef value in the shuffle mask. 1404 unsigned i; 1405 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1406 /*search*/; 1407 1408 if (i == 16) return -1; // all undef. 1409 1410 // Otherwise, check to see if the rest of the elements are consecutively 1411 // numbered from this value. 1412 unsigned ShiftAmt = SVOp->getMaskElt(i); 1413 if (ShiftAmt < i) return -1; 1414 1415 ShiftAmt -= i; 1416 bool isLE = DAG.getDataLayout().isLittleEndian(); 1417 1418 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1419 // Check the rest of the elements to see if they are consecutive. 1420 for (++i; i != 16; ++i) 1421 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1422 return -1; 1423 } else if (ShuffleKind == 1) { 1424 // Check the rest of the elements to see if they are consecutive. 1425 for (++i; i != 16; ++i) 1426 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1427 return -1; 1428 } else 1429 return -1; 1430 1431 if (isLE) 1432 ShiftAmt = 16 - ShiftAmt; 1433 1434 return ShiftAmt; 1435 } 1436 1437 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1438 /// specifies a splat of a single element that is suitable for input to 1439 /// VSPLTB/VSPLTH/VSPLTW. 1440 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1441 assert(N->getValueType(0) == MVT::v16i8 && 1442 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1443 1444 // The consecutive indices need to specify an element, not part of two 1445 // different elements. So abandon ship early if this isn't the case. 1446 if (N->getMaskElt(0) % EltSize != 0) 1447 return false; 1448 1449 // This is a splat operation if each element of the permute is the same, and 1450 // if the value doesn't reference the second vector. 1451 unsigned ElementBase = N->getMaskElt(0); 1452 1453 // FIXME: Handle UNDEF elements too! 1454 if (ElementBase >= 16) 1455 return false; 1456 1457 // Check that the indices are consecutive, in the case of a multi-byte element 1458 // splatted with a v16i8 mask. 1459 for (unsigned i = 1; i != EltSize; ++i) 1460 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1461 return false; 1462 1463 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1464 if (N->getMaskElt(i) < 0) continue; 1465 for (unsigned j = 0; j != EltSize; ++j) 1466 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1467 return false; 1468 } 1469 return true; 1470 } 1471 1472 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1473 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1474 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1475 SelectionDAG &DAG) { 1476 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1477 assert(isSplatShuffleMask(SVOp, EltSize)); 1478 if (DAG.getDataLayout().isLittleEndian()) 1479 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1480 else 1481 return SVOp->getMaskElt(0) / EltSize; 1482 } 1483 1484 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1485 /// by using a vspltis[bhw] instruction of the specified element size, return 1486 /// the constant being splatted. The ByteSize field indicates the number of 1487 /// bytes of each element [124] -> [bhw]. 1488 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1489 SDValue OpVal(nullptr, 0); 1490 1491 // If ByteSize of the splat is bigger than the element size of the 1492 // build_vector, then we have a case where we are checking for a splat where 1493 // multiple elements of the buildvector are folded together into a single 1494 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1495 unsigned EltSize = 16/N->getNumOperands(); 1496 if (EltSize < ByteSize) { 1497 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1498 SDValue UniquedVals[4]; 1499 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1500 1501 // See if all of the elements in the buildvector agree across. 1502 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1503 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1504 // If the element isn't a constant, bail fully out. 1505 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1506 1507 1508 if (!UniquedVals[i&(Multiple-1)].getNode()) 1509 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1510 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1511 return SDValue(); // no match. 1512 } 1513 1514 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1515 // either constant or undef values that are identical for each chunk. See 1516 // if these chunks can form into a larger vspltis*. 1517 1518 // Check to see if all of the leading entries are either 0 or -1. If 1519 // neither, then this won't fit into the immediate field. 1520 bool LeadingZero = true; 1521 bool LeadingOnes = true; 1522 for (unsigned i = 0; i != Multiple-1; ++i) { 1523 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1524 1525 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 1526 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 1527 } 1528 // Finally, check the least significant entry. 1529 if (LeadingZero) { 1530 if (!UniquedVals[Multiple-1].getNode()) 1531 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1532 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1533 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1534 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1535 } 1536 if (LeadingOnes) { 1537 if (!UniquedVals[Multiple-1].getNode()) 1538 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1539 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1540 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1541 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1542 } 1543 1544 return SDValue(); 1545 } 1546 1547 // Check to see if this buildvec has a single non-undef value in its elements. 1548 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1549 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1550 if (!OpVal.getNode()) 1551 OpVal = N->getOperand(i); 1552 else if (OpVal != N->getOperand(i)) 1553 return SDValue(); 1554 } 1555 1556 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1557 1558 unsigned ValSizeInBytes = EltSize; 1559 uint64_t Value = 0; 1560 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1561 Value = CN->getZExtValue(); 1562 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1563 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1564 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1565 } 1566 1567 // If the splat value is larger than the element value, then we can never do 1568 // this splat. The only case that we could fit the replicated bits into our 1569 // immediate field for would be zero, and we prefer to use vxor for it. 1570 if (ValSizeInBytes < ByteSize) return SDValue(); 1571 1572 // If the element value is larger than the splat value, check if it consists 1573 // of a repeated bit pattern of size ByteSize. 1574 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1575 return SDValue(); 1576 1577 // Properly sign extend the value. 1578 int MaskVal = SignExtend32(Value, ByteSize * 8); 1579 1580 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1581 if (MaskVal == 0) return SDValue(); 1582 1583 // Finally, if this value fits in a 5 bit sext field, return it 1584 if (SignExtend32<5>(MaskVal) == MaskVal) 1585 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1586 return SDValue(); 1587 } 1588 1589 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1590 /// amount, otherwise return -1. 1591 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1592 EVT VT = N->getValueType(0); 1593 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1594 return -1; 1595 1596 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1597 1598 // Find the first non-undef value in the shuffle mask. 1599 unsigned i; 1600 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1601 /*search*/; 1602 1603 if (i == 4) return -1; // all undef. 1604 1605 // Otherwise, check to see if the rest of the elements are consecutively 1606 // numbered from this value. 1607 unsigned ShiftAmt = SVOp->getMaskElt(i); 1608 if (ShiftAmt < i) return -1; 1609 ShiftAmt -= i; 1610 1611 // Check the rest of the elements to see if they are consecutive. 1612 for (++i; i != 4; ++i) 1613 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1614 return -1; 1615 1616 return ShiftAmt; 1617 } 1618 1619 //===----------------------------------------------------------------------===// 1620 // Addressing Mode Selection 1621 //===----------------------------------------------------------------------===// 1622 1623 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1624 /// or 64-bit immediate, and if the value can be accurately represented as a 1625 /// sign extension from a 16-bit value. If so, this returns true and the 1626 /// immediate. 1627 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1628 if (!isa<ConstantSDNode>(N)) 1629 return false; 1630 1631 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1632 if (N->getValueType(0) == MVT::i32) 1633 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1634 else 1635 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1636 } 1637 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1638 return isIntS16Immediate(Op.getNode(), Imm); 1639 } 1640 1641 1642 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1643 /// can be represented as an indexed [r+r] operation. Returns false if it 1644 /// can be more efficiently represented with [r+imm]. 1645 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1646 SDValue &Index, 1647 SelectionDAG &DAG) const { 1648 short imm = 0; 1649 if (N.getOpcode() == ISD::ADD) { 1650 if (isIntS16Immediate(N.getOperand(1), imm)) 1651 return false; // r+i 1652 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1653 return false; // r+i 1654 1655 Base = N.getOperand(0); 1656 Index = N.getOperand(1); 1657 return true; 1658 } else if (N.getOpcode() == ISD::OR) { 1659 if (isIntS16Immediate(N.getOperand(1), imm)) 1660 return false; // r+i can fold it if we can. 1661 1662 // If this is an or of disjoint bitfields, we can codegen this as an add 1663 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1664 // disjoint. 1665 APInt LHSKnownZero, LHSKnownOne; 1666 APInt RHSKnownZero, RHSKnownOne; 1667 DAG.computeKnownBits(N.getOperand(0), 1668 LHSKnownZero, LHSKnownOne); 1669 1670 if (LHSKnownZero.getBoolValue()) { 1671 DAG.computeKnownBits(N.getOperand(1), 1672 RHSKnownZero, RHSKnownOne); 1673 // If all of the bits are known zero on the LHS or RHS, the add won't 1674 // carry. 1675 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1676 Base = N.getOperand(0); 1677 Index = N.getOperand(1); 1678 return true; 1679 } 1680 } 1681 } 1682 1683 return false; 1684 } 1685 1686 // If we happen to be doing an i64 load or store into a stack slot that has 1687 // less than a 4-byte alignment, then the frame-index elimination may need to 1688 // use an indexed load or store instruction (because the offset may not be a 1689 // multiple of 4). The extra register needed to hold the offset comes from the 1690 // register scavenger, and it is possible that the scavenger will need to use 1691 // an emergency spill slot. As a result, we need to make sure that a spill slot 1692 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1693 // stack slot. 1694 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1695 // FIXME: This does not handle the LWA case. 1696 if (VT != MVT::i64) 1697 return; 1698 1699 // NOTE: We'll exclude negative FIs here, which come from argument 1700 // lowering, because there are no known test cases triggering this problem 1701 // using packed structures (or similar). We can remove this exclusion if 1702 // we find such a test case. The reason why this is so test-case driven is 1703 // because this entire 'fixup' is only to prevent crashes (from the 1704 // register scavenger) on not-really-valid inputs. For example, if we have: 1705 // %a = alloca i1 1706 // %b = bitcast i1* %a to i64* 1707 // store i64* a, i64 b 1708 // then the store should really be marked as 'align 1', but is not. If it 1709 // were marked as 'align 1' then the indexed form would have been 1710 // instruction-selected initially, and the problem this 'fixup' is preventing 1711 // won't happen regardless. 1712 if (FrameIdx < 0) 1713 return; 1714 1715 MachineFunction &MF = DAG.getMachineFunction(); 1716 MachineFrameInfo *MFI = MF.getFrameInfo(); 1717 1718 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1719 if (Align >= 4) 1720 return; 1721 1722 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1723 FuncInfo->setHasNonRISpills(); 1724 } 1725 1726 /// Returns true if the address N can be represented by a base register plus 1727 /// a signed 16-bit displacement [r+imm], and if it is not better 1728 /// represented as reg+reg. If Aligned is true, only accept displacements 1729 /// suitable for STD and friends, i.e. multiples of 4. 1730 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1731 SDValue &Base, 1732 SelectionDAG &DAG, 1733 bool Aligned) const { 1734 // FIXME dl should come from parent load or store, not from address 1735 SDLoc dl(N); 1736 // If this can be more profitably realized as r+r, fail. 1737 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1738 return false; 1739 1740 if (N.getOpcode() == ISD::ADD) { 1741 short imm = 0; 1742 if (isIntS16Immediate(N.getOperand(1), imm) && 1743 (!Aligned || (imm & 3) == 0)) { 1744 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1745 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1746 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1747 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1748 } else { 1749 Base = N.getOperand(0); 1750 } 1751 return true; // [r+i] 1752 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1753 // Match LOAD (ADD (X, Lo(G))). 1754 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1755 && "Cannot handle constant offsets yet!"); 1756 Disp = N.getOperand(1).getOperand(0); // The global address. 1757 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1758 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1759 Disp.getOpcode() == ISD::TargetConstantPool || 1760 Disp.getOpcode() == ISD::TargetJumpTable); 1761 Base = N.getOperand(0); 1762 return true; // [&g+r] 1763 } 1764 } else if (N.getOpcode() == ISD::OR) { 1765 short imm = 0; 1766 if (isIntS16Immediate(N.getOperand(1), imm) && 1767 (!Aligned || (imm & 3) == 0)) { 1768 // If this is an or of disjoint bitfields, we can codegen this as an add 1769 // (for better address arithmetic) if the LHS and RHS of the OR are 1770 // provably disjoint. 1771 APInt LHSKnownZero, LHSKnownOne; 1772 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1773 1774 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1775 // If all of the bits are known zero on the LHS or RHS, the add won't 1776 // carry. 1777 if (FrameIndexSDNode *FI = 1778 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1779 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1780 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1781 } else { 1782 Base = N.getOperand(0); 1783 } 1784 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1785 return true; 1786 } 1787 } 1788 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1789 // Loading from a constant address. 1790 1791 // If this address fits entirely in a 16-bit sext immediate field, codegen 1792 // this as "d, 0" 1793 short Imm; 1794 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1795 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1796 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1797 CN->getValueType(0)); 1798 return true; 1799 } 1800 1801 // Handle 32-bit sext immediates with LIS + addr mode. 1802 if ((CN->getValueType(0) == MVT::i32 || 1803 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1804 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1805 int Addr = (int)CN->getZExtValue(); 1806 1807 // Otherwise, break this down into an LIS + disp. 1808 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1809 1810 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1811 MVT::i32); 1812 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1813 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1814 return true; 1815 } 1816 } 1817 1818 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 1819 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1820 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1821 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1822 } else 1823 Base = N; 1824 return true; // [r+0] 1825 } 1826 1827 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1828 /// represented as an indexed [r+r] operation. 1829 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1830 SDValue &Index, 1831 SelectionDAG &DAG) const { 1832 // Check to see if we can easily represent this as an [r+r] address. This 1833 // will fail if it thinks that the address is more profitably represented as 1834 // reg+imm, e.g. where imm = 0. 1835 if (SelectAddressRegReg(N, Base, Index, DAG)) 1836 return true; 1837 1838 // If the operand is an addition, always emit this as [r+r], since this is 1839 // better (for code size, and execution, as the memop does the add for free) 1840 // than emitting an explicit add. 1841 if (N.getOpcode() == ISD::ADD) { 1842 Base = N.getOperand(0); 1843 Index = N.getOperand(1); 1844 return true; 1845 } 1846 1847 // Otherwise, do it the hard way, using R0 as the base register. 1848 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1849 N.getValueType()); 1850 Index = N; 1851 return true; 1852 } 1853 1854 /// getPreIndexedAddressParts - returns true by value, base pointer and 1855 /// offset pointer and addressing mode by reference if the node's address 1856 /// can be legally represented as pre-indexed load / store address. 1857 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1858 SDValue &Offset, 1859 ISD::MemIndexedMode &AM, 1860 SelectionDAG &DAG) const { 1861 if (DisablePPCPreinc) return false; 1862 1863 bool isLoad = true; 1864 SDValue Ptr; 1865 EVT VT; 1866 unsigned Alignment; 1867 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1868 Ptr = LD->getBasePtr(); 1869 VT = LD->getMemoryVT(); 1870 Alignment = LD->getAlignment(); 1871 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1872 Ptr = ST->getBasePtr(); 1873 VT = ST->getMemoryVT(); 1874 Alignment = ST->getAlignment(); 1875 isLoad = false; 1876 } else 1877 return false; 1878 1879 // PowerPC doesn't have preinc load/store instructions for vectors (except 1880 // for QPX, which does have preinc r+r forms). 1881 if (VT.isVector()) { 1882 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 1883 return false; 1884 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 1885 AM = ISD::PRE_INC; 1886 return true; 1887 } 1888 } 1889 1890 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1891 1892 // Common code will reject creating a pre-inc form if the base pointer 1893 // is a frame index, or if N is a store and the base pointer is either 1894 // the same as or a predecessor of the value being stored. Check for 1895 // those situations here, and try with swapped Base/Offset instead. 1896 bool Swap = false; 1897 1898 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1899 Swap = true; 1900 else if (!isLoad) { 1901 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1902 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1903 Swap = true; 1904 } 1905 1906 if (Swap) 1907 std::swap(Base, Offset); 1908 1909 AM = ISD::PRE_INC; 1910 return true; 1911 } 1912 1913 // LDU/STU can only handle immediates that are a multiple of 4. 1914 if (VT != MVT::i64) { 1915 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1916 return false; 1917 } else { 1918 // LDU/STU need an address with at least 4-byte alignment. 1919 if (Alignment < 4) 1920 return false; 1921 1922 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1923 return false; 1924 } 1925 1926 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1927 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1928 // sext i32 to i64 when addr mode is r+i. 1929 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1930 LD->getExtensionType() == ISD::SEXTLOAD && 1931 isa<ConstantSDNode>(Offset)) 1932 return false; 1933 } 1934 1935 AM = ISD::PRE_INC; 1936 return true; 1937 } 1938 1939 //===----------------------------------------------------------------------===// 1940 // LowerOperation implementation 1941 //===----------------------------------------------------------------------===// 1942 1943 /// GetLabelAccessInfo - Return true if we should reference labels using a 1944 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1945 static bool GetLabelAccessInfo(const TargetMachine &TM, 1946 const PPCSubtarget &Subtarget, 1947 unsigned &HiOpFlags, unsigned &LoOpFlags, 1948 const GlobalValue *GV = nullptr) { 1949 HiOpFlags = PPCII::MO_HA; 1950 LoOpFlags = PPCII::MO_LO; 1951 1952 // Don't use the pic base if not in PIC relocation model. 1953 bool isPIC = TM.getRelocationModel() == Reloc::PIC_; 1954 1955 if (isPIC) { 1956 HiOpFlags |= PPCII::MO_PIC_FLAG; 1957 LoOpFlags |= PPCII::MO_PIC_FLAG; 1958 } 1959 1960 // If this is a reference to a global value that requires a non-lazy-ptr, make 1961 // sure that instruction lowering adds it. 1962 if (GV && Subtarget.hasLazyResolverStub(GV)) { 1963 HiOpFlags |= PPCII::MO_NLP_FLAG; 1964 LoOpFlags |= PPCII::MO_NLP_FLAG; 1965 1966 if (GV->hasHiddenVisibility()) { 1967 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1968 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1969 } 1970 } 1971 1972 return isPIC; 1973 } 1974 1975 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1976 SelectionDAG &DAG) { 1977 SDLoc DL(HiPart); 1978 EVT PtrVT = HiPart.getValueType(); 1979 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 1980 1981 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1982 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1983 1984 // With PIC, the first instruction is actually "GR+hi(&G)". 1985 if (isPIC) 1986 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1987 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1988 1989 // Generate non-pic code that has direct accesses to the constant pool. 1990 // The address of the global is just (hi(&g)+lo(&g)). 1991 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1992 } 1993 1994 static void setUsesTOCBasePtr(MachineFunction &MF) { 1995 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1996 FuncInfo->setUsesTOCBasePtr(); 1997 } 1998 1999 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2000 setUsesTOCBasePtr(DAG.getMachineFunction()); 2001 } 2002 2003 static SDValue getTOCEntry(SelectionDAG &DAG, SDLoc dl, bool Is64Bit, 2004 SDValue GA) { 2005 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2006 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2007 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2008 2009 SDValue Ops[] = { GA, Reg }; 2010 return DAG.getMemIntrinsicNode( 2011 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2012 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2013 false, 0); 2014 } 2015 2016 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2017 SelectionDAG &DAG) const { 2018 EVT PtrVT = Op.getValueType(); 2019 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2020 const Constant *C = CP->getConstVal(); 2021 2022 // 64-bit SVR4 ABI code is always position-independent. 2023 // The actual address of the GlobalValue is stored in the TOC. 2024 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2025 setUsesTOCBasePtr(DAG); 2026 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2027 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2028 } 2029 2030 unsigned MOHiFlag, MOLoFlag; 2031 bool isPIC = 2032 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2033 2034 if (isPIC && Subtarget.isSVR4ABI()) { 2035 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2036 PPCII::MO_PIC_FLAG); 2037 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2038 } 2039 2040 SDValue CPIHi = 2041 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2042 SDValue CPILo = 2043 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2044 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 2045 } 2046 2047 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2048 EVT PtrVT = Op.getValueType(); 2049 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2050 2051 // 64-bit SVR4 ABI code is always position-independent. 2052 // The actual address of the GlobalValue is stored in the TOC. 2053 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2054 setUsesTOCBasePtr(DAG); 2055 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2056 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2057 } 2058 2059 unsigned MOHiFlag, MOLoFlag; 2060 bool isPIC = 2061 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2062 2063 if (isPIC && Subtarget.isSVR4ABI()) { 2064 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2065 PPCII::MO_PIC_FLAG); 2066 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2067 } 2068 2069 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2070 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2071 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 2072 } 2073 2074 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2075 SelectionDAG &DAG) const { 2076 EVT PtrVT = Op.getValueType(); 2077 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2078 const BlockAddress *BA = BASDN->getBlockAddress(); 2079 2080 // 64-bit SVR4 ABI code is always position-independent. 2081 // The actual BlockAddress is stored in the TOC. 2082 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2083 setUsesTOCBasePtr(DAG); 2084 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2085 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2086 } 2087 2088 unsigned MOHiFlag, MOLoFlag; 2089 bool isPIC = 2090 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2091 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2092 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2093 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 2094 } 2095 2096 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2097 SelectionDAG &DAG) const { 2098 2099 // FIXME: TLS addresses currently use medium model code sequences, 2100 // which is the most useful form. Eventually support for small and 2101 // large models could be added if users need it, at the cost of 2102 // additional complexity. 2103 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2104 if (DAG.getTarget().Options.EmulatedTLS) 2105 return LowerToTLSEmulatedModel(GA, DAG); 2106 2107 SDLoc dl(GA); 2108 const GlobalValue *GV = GA->getGlobal(); 2109 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2110 bool is64bit = Subtarget.isPPC64(); 2111 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2112 PICLevel::Level picLevel = M->getPICLevel(); 2113 2114 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2115 2116 if (Model == TLSModel::LocalExec) { 2117 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2118 PPCII::MO_TPREL_HA); 2119 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2120 PPCII::MO_TPREL_LO); 2121 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2122 is64bit ? MVT::i64 : MVT::i32); 2123 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2124 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2125 } 2126 2127 if (Model == TLSModel::InitialExec) { 2128 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2129 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2130 PPCII::MO_TLS); 2131 SDValue GOTPtr; 2132 if (is64bit) { 2133 setUsesTOCBasePtr(DAG); 2134 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2135 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2136 PtrVT, GOTReg, TGA); 2137 } else 2138 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2139 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2140 PtrVT, TGA, GOTPtr); 2141 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2142 } 2143 2144 if (Model == TLSModel::GeneralDynamic) { 2145 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2146 SDValue GOTPtr; 2147 if (is64bit) { 2148 setUsesTOCBasePtr(DAG); 2149 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2150 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2151 GOTReg, TGA); 2152 } else { 2153 if (picLevel == PICLevel::Small) 2154 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2155 else 2156 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2157 } 2158 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2159 GOTPtr, TGA, TGA); 2160 } 2161 2162 if (Model == TLSModel::LocalDynamic) { 2163 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2164 SDValue GOTPtr; 2165 if (is64bit) { 2166 setUsesTOCBasePtr(DAG); 2167 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2168 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2169 GOTReg, TGA); 2170 } else { 2171 if (picLevel == PICLevel::Small) 2172 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2173 else 2174 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2175 } 2176 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2177 PtrVT, GOTPtr, TGA, TGA); 2178 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2179 PtrVT, TLSAddr, TGA); 2180 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2181 } 2182 2183 llvm_unreachable("Unknown TLS model!"); 2184 } 2185 2186 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2187 SelectionDAG &DAG) const { 2188 EVT PtrVT = Op.getValueType(); 2189 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2190 SDLoc DL(GSDN); 2191 const GlobalValue *GV = GSDN->getGlobal(); 2192 2193 // 64-bit SVR4 ABI code is always position-independent. 2194 // The actual address of the GlobalValue is stored in the TOC. 2195 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2196 setUsesTOCBasePtr(DAG); 2197 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2198 return getTOCEntry(DAG, DL, true, GA); 2199 } 2200 2201 unsigned MOHiFlag, MOLoFlag; 2202 bool isPIC = 2203 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag, GV); 2204 2205 if (isPIC && Subtarget.isSVR4ABI()) { 2206 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2207 GSDN->getOffset(), 2208 PPCII::MO_PIC_FLAG); 2209 return getTOCEntry(DAG, DL, false, GA); 2210 } 2211 2212 SDValue GAHi = 2213 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2214 SDValue GALo = 2215 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2216 2217 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 2218 2219 // If the global reference is actually to a non-lazy-pointer, we have to do an 2220 // extra load to get the address of the global. 2221 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2222 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 2223 false, false, false, 0); 2224 return Ptr; 2225 } 2226 2227 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2228 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2229 SDLoc dl(Op); 2230 2231 if (Op.getValueType() == MVT::v2i64) { 2232 // When the operands themselves are v2i64 values, we need to do something 2233 // special because VSX has no underlying comparison operations for these. 2234 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2235 // Equality can be handled by casting to the legal type for Altivec 2236 // comparisons, everything else needs to be expanded. 2237 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2238 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2239 DAG.getSetCC(dl, MVT::v4i32, 2240 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2241 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2242 CC)); 2243 } 2244 2245 return SDValue(); 2246 } 2247 2248 // We handle most of these in the usual way. 2249 return Op; 2250 } 2251 2252 // If we're comparing for equality to zero, expose the fact that this is 2253 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 2254 // fold the new nodes. 2255 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2256 if (C->isNullValue() && CC == ISD::SETEQ) { 2257 EVT VT = Op.getOperand(0).getValueType(); 2258 SDValue Zext = Op.getOperand(0); 2259 if (VT.bitsLT(MVT::i32)) { 2260 VT = MVT::i32; 2261 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 2262 } 2263 unsigned Log2b = Log2_32(VT.getSizeInBits()); 2264 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 2265 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 2266 DAG.getConstant(Log2b, dl, MVT::i32)); 2267 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 2268 } 2269 // Leave comparisons against 0 and -1 alone for now, since they're usually 2270 // optimized. FIXME: revisit this when we can custom lower all setcc 2271 // optimizations. 2272 if (C->isAllOnesValue() || C->isNullValue()) 2273 return SDValue(); 2274 } 2275 2276 // If we have an integer seteq/setne, turn it into a compare against zero 2277 // by xor'ing the rhs with the lhs, which is faster than setting a 2278 // condition register, reading it back out, and masking the correct bit. The 2279 // normal approach here uses sub to do this instead of xor. Using xor exposes 2280 // the result to other bit-twiddling opportunities. 2281 EVT LHSVT = Op.getOperand(0).getValueType(); 2282 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2283 EVT VT = Op.getValueType(); 2284 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2285 Op.getOperand(1)); 2286 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2287 } 2288 return SDValue(); 2289 } 2290 2291 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 2292 const PPCSubtarget &Subtarget) const { 2293 SDNode *Node = Op.getNode(); 2294 EVT VT = Node->getValueType(0); 2295 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2296 SDValue InChain = Node->getOperand(0); 2297 SDValue VAListPtr = Node->getOperand(1); 2298 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2299 SDLoc dl(Node); 2300 2301 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2302 2303 // gpr_index 2304 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2305 VAListPtr, MachinePointerInfo(SV), MVT::i8, 2306 false, false, false, 0); 2307 InChain = GprIndex.getValue(1); 2308 2309 if (VT == MVT::i64) { 2310 // Check if GprIndex is even 2311 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2312 DAG.getConstant(1, dl, MVT::i32)); 2313 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2314 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2315 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2316 DAG.getConstant(1, dl, MVT::i32)); 2317 // Align GprIndex to be even if it isn't 2318 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2319 GprIndex); 2320 } 2321 2322 // fpr index is 1 byte after gpr 2323 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2324 DAG.getConstant(1, dl, MVT::i32)); 2325 2326 // fpr 2327 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2328 FprPtr, MachinePointerInfo(SV), MVT::i8, 2329 false, false, false, 0); 2330 InChain = FprIndex.getValue(1); 2331 2332 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2333 DAG.getConstant(8, dl, MVT::i32)); 2334 2335 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2336 DAG.getConstant(4, dl, MVT::i32)); 2337 2338 // areas 2339 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 2340 MachinePointerInfo(), false, false, 2341 false, 0); 2342 InChain = OverflowArea.getValue(1); 2343 2344 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 2345 MachinePointerInfo(), false, false, 2346 false, 0); 2347 InChain = RegSaveArea.getValue(1); 2348 2349 // select overflow_area if index > 8 2350 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2351 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2352 2353 // adjustment constant gpr_index * 4/8 2354 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2355 VT.isInteger() ? GprIndex : FprIndex, 2356 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2357 MVT::i32)); 2358 2359 // OurReg = RegSaveArea + RegConstant 2360 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2361 RegConstant); 2362 2363 // Floating types are 32 bytes into RegSaveArea 2364 if (VT.isFloatingPoint()) 2365 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2366 DAG.getConstant(32, dl, MVT::i32)); 2367 2368 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2369 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2370 VT.isInteger() ? GprIndex : FprIndex, 2371 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2372 MVT::i32)); 2373 2374 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2375 VT.isInteger() ? VAListPtr : FprPtr, 2376 MachinePointerInfo(SV), 2377 MVT::i8, false, false, 0); 2378 2379 // determine if we should load from reg_save_area or overflow_area 2380 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2381 2382 // increase overflow_area by 4/8 if gpr/fpr > 8 2383 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2384 DAG.getConstant(VT.isInteger() ? 4 : 8, 2385 dl, MVT::i32)); 2386 2387 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2388 OverflowAreaPlusN); 2389 2390 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 2391 OverflowAreaPtr, 2392 MachinePointerInfo(), 2393 MVT::i32, false, false, 0); 2394 2395 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 2396 false, false, false, 0); 2397 } 2398 2399 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 2400 const PPCSubtarget &Subtarget) const { 2401 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2402 2403 // We have to copy the entire va_list struct: 2404 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2405 return DAG.getMemcpy(Op.getOperand(0), Op, 2406 Op.getOperand(1), Op.getOperand(2), 2407 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2408 false, MachinePointerInfo(), MachinePointerInfo()); 2409 } 2410 2411 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2412 SelectionDAG &DAG) const { 2413 return Op.getOperand(0); 2414 } 2415 2416 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2417 SelectionDAG &DAG) const { 2418 SDValue Chain = Op.getOperand(0); 2419 SDValue Trmp = Op.getOperand(1); // trampoline 2420 SDValue FPtr = Op.getOperand(2); // nested function 2421 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2422 SDLoc dl(Op); 2423 2424 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2425 bool isPPC64 = (PtrVT == MVT::i64); 2426 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2427 2428 TargetLowering::ArgListTy Args; 2429 TargetLowering::ArgListEntry Entry; 2430 2431 Entry.Ty = IntPtrTy; 2432 Entry.Node = Trmp; Args.push_back(Entry); 2433 2434 // TrampSize == (isPPC64 ? 48 : 40); 2435 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2436 isPPC64 ? MVT::i64 : MVT::i32); 2437 Args.push_back(Entry); 2438 2439 Entry.Node = FPtr; Args.push_back(Entry); 2440 Entry.Node = Nest; Args.push_back(Entry); 2441 2442 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2443 TargetLowering::CallLoweringInfo CLI(DAG); 2444 CLI.setDebugLoc(dl).setChain(Chain) 2445 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2446 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2447 std::move(Args), 0); 2448 2449 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2450 return CallResult.second; 2451 } 2452 2453 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 2454 const PPCSubtarget &Subtarget) const { 2455 MachineFunction &MF = DAG.getMachineFunction(); 2456 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2457 2458 SDLoc dl(Op); 2459 2460 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2461 // vastart just stores the address of the VarArgsFrameIndex slot into the 2462 // memory location argument. 2463 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2464 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2465 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2466 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2467 MachinePointerInfo(SV), 2468 false, false, 0); 2469 } 2470 2471 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2472 // We suppose the given va_list is already allocated. 2473 // 2474 // typedef struct { 2475 // char gpr; /* index into the array of 8 GPRs 2476 // * stored in the register save area 2477 // * gpr=0 corresponds to r3, 2478 // * gpr=1 to r4, etc. 2479 // */ 2480 // char fpr; /* index into the array of 8 FPRs 2481 // * stored in the register save area 2482 // * fpr=0 corresponds to f1, 2483 // * fpr=1 to f2, etc. 2484 // */ 2485 // char *overflow_arg_area; 2486 // /* location on stack that holds 2487 // * the next overflow argument 2488 // */ 2489 // char *reg_save_area; 2490 // /* where r3:r10 and f1:f8 (if saved) 2491 // * are stored 2492 // */ 2493 // } va_list[1]; 2494 2495 2496 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2497 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2498 2499 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2500 2501 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2502 PtrVT); 2503 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2504 PtrVT); 2505 2506 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2507 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2508 2509 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2510 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2511 2512 uint64_t FPROffset = 1; 2513 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2514 2515 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2516 2517 // Store first byte : number of int regs 2518 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2519 Op.getOperand(1), 2520 MachinePointerInfo(SV), 2521 MVT::i8, false, false, 0); 2522 uint64_t nextOffset = FPROffset; 2523 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2524 ConstFPROffset); 2525 2526 // Store second byte : number of float regs 2527 SDValue secondStore = 2528 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2529 MachinePointerInfo(SV, nextOffset), MVT::i8, 2530 false, false, 0); 2531 nextOffset += StackOffset; 2532 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2533 2534 // Store second word : arguments given on stack 2535 SDValue thirdStore = 2536 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2537 MachinePointerInfo(SV, nextOffset), 2538 false, false, 0); 2539 nextOffset += FrameOffset; 2540 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2541 2542 // Store third word : arguments given in registers 2543 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2544 MachinePointerInfo(SV, nextOffset), 2545 false, false, 0); 2546 2547 } 2548 2549 #include "PPCGenCallingConv.inc" 2550 2551 // Function whose sole purpose is to kill compiler warnings 2552 // stemming from unused functions included from PPCGenCallingConv.inc. 2553 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2554 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2555 } 2556 2557 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2558 CCValAssign::LocInfo &LocInfo, 2559 ISD::ArgFlagsTy &ArgFlags, 2560 CCState &State) { 2561 return true; 2562 } 2563 2564 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2565 MVT &LocVT, 2566 CCValAssign::LocInfo &LocInfo, 2567 ISD::ArgFlagsTy &ArgFlags, 2568 CCState &State) { 2569 static const MCPhysReg ArgRegs[] = { 2570 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2571 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2572 }; 2573 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2574 2575 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2576 2577 // Skip one register if the first unallocated register has an even register 2578 // number and there are still argument registers available which have not been 2579 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2580 // need to skip a register if RegNum is odd. 2581 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2582 State.AllocateReg(ArgRegs[RegNum]); 2583 } 2584 2585 // Always return false here, as this function only makes sure that the first 2586 // unallocated register has an odd register number and does not actually 2587 // allocate a register for the current argument. 2588 return false; 2589 } 2590 2591 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2592 MVT &LocVT, 2593 CCValAssign::LocInfo &LocInfo, 2594 ISD::ArgFlagsTy &ArgFlags, 2595 CCState &State) { 2596 static const MCPhysReg ArgRegs[] = { 2597 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2598 PPC::F8 2599 }; 2600 2601 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2602 2603 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2604 2605 // If there is only one Floating-point register left we need to put both f64 2606 // values of a split ppc_fp128 value on the stack. 2607 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2608 State.AllocateReg(ArgRegs[RegNum]); 2609 } 2610 2611 // Always return false here, as this function only makes sure that the two f64 2612 // values a ppc_fp128 value is split into are both passed in registers or both 2613 // passed on the stack and does not actually allocate a register for the 2614 // current argument. 2615 return false; 2616 } 2617 2618 /// FPR - The set of FP registers that should be allocated for arguments, 2619 /// on Darwin. 2620 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2621 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2622 PPC::F11, PPC::F12, PPC::F13}; 2623 2624 /// QFPR - The set of QPX registers that should be allocated for arguments. 2625 static const MCPhysReg QFPR[] = { 2626 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2627 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2628 2629 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2630 /// the stack. 2631 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2632 unsigned PtrByteSize) { 2633 unsigned ArgSize = ArgVT.getStoreSize(); 2634 if (Flags.isByVal()) 2635 ArgSize = Flags.getByValSize(); 2636 2637 // Round up to multiples of the pointer size, except for array members, 2638 // which are always packed. 2639 if (!Flags.isInConsecutiveRegs()) 2640 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2641 2642 return ArgSize; 2643 } 2644 2645 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2646 /// on the stack. 2647 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2648 ISD::ArgFlagsTy Flags, 2649 unsigned PtrByteSize) { 2650 unsigned Align = PtrByteSize; 2651 2652 // Altivec parameters are padded to a 16 byte boundary. 2653 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2654 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2655 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2656 ArgVT == MVT::v1i128) 2657 Align = 16; 2658 // QPX vector types stored in double-precision are padded to a 32 byte 2659 // boundary. 2660 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2661 Align = 32; 2662 2663 // ByVal parameters are aligned as requested. 2664 if (Flags.isByVal()) { 2665 unsigned BVAlign = Flags.getByValAlign(); 2666 if (BVAlign > PtrByteSize) { 2667 if (BVAlign % PtrByteSize != 0) 2668 llvm_unreachable( 2669 "ByVal alignment is not a multiple of the pointer size"); 2670 2671 Align = BVAlign; 2672 } 2673 } 2674 2675 // Array members are always packed to their original alignment. 2676 if (Flags.isInConsecutiveRegs()) { 2677 // If the array member was split into multiple registers, the first 2678 // needs to be aligned to the size of the full type. (Except for 2679 // ppcf128, which is only aligned as its f64 components.) 2680 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2681 Align = OrigVT.getStoreSize(); 2682 else 2683 Align = ArgVT.getStoreSize(); 2684 } 2685 2686 return Align; 2687 } 2688 2689 /// CalculateStackSlotUsed - Return whether this argument will use its 2690 /// stack slot (instead of being passed in registers). ArgOffset, 2691 /// AvailableFPRs, and AvailableVRs must hold the current argument 2692 /// position, and will be updated to account for this argument. 2693 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2694 ISD::ArgFlagsTy Flags, 2695 unsigned PtrByteSize, 2696 unsigned LinkageSize, 2697 unsigned ParamAreaSize, 2698 unsigned &ArgOffset, 2699 unsigned &AvailableFPRs, 2700 unsigned &AvailableVRs, bool HasQPX) { 2701 bool UseMemory = false; 2702 2703 // Respect alignment of argument on the stack. 2704 unsigned Align = 2705 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2706 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2707 // If there's no space left in the argument save area, we must 2708 // use memory (this check also catches zero-sized arguments). 2709 if (ArgOffset >= LinkageSize + ParamAreaSize) 2710 UseMemory = true; 2711 2712 // Allocate argument on the stack. 2713 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2714 if (Flags.isInConsecutiveRegsLast()) 2715 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2716 // If we overran the argument save area, we must use memory 2717 // (this check catches arguments passed partially in memory) 2718 if (ArgOffset > LinkageSize + ParamAreaSize) 2719 UseMemory = true; 2720 2721 // However, if the argument is actually passed in an FPR or a VR, 2722 // we don't use memory after all. 2723 if (!Flags.isByVal()) { 2724 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2725 // QPX registers overlap with the scalar FP registers. 2726 (HasQPX && (ArgVT == MVT::v4f32 || 2727 ArgVT == MVT::v4f64 || 2728 ArgVT == MVT::v4i1))) 2729 if (AvailableFPRs > 0) { 2730 --AvailableFPRs; 2731 return false; 2732 } 2733 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2734 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2735 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2736 ArgVT == MVT::v1i128) 2737 if (AvailableVRs > 0) { 2738 --AvailableVRs; 2739 return false; 2740 } 2741 } 2742 2743 return UseMemory; 2744 } 2745 2746 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2747 /// ensure minimum alignment required for target. 2748 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2749 unsigned NumBytes) { 2750 unsigned TargetAlign = Lowering->getStackAlignment(); 2751 unsigned AlignMask = TargetAlign - 1; 2752 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2753 return NumBytes; 2754 } 2755 2756 SDValue 2757 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 2758 CallingConv::ID CallConv, bool isVarArg, 2759 const SmallVectorImpl<ISD::InputArg> 2760 &Ins, 2761 SDLoc dl, SelectionDAG &DAG, 2762 SmallVectorImpl<SDValue> &InVals) 2763 const { 2764 if (Subtarget.isSVR4ABI()) { 2765 if (Subtarget.isPPC64()) 2766 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2767 dl, DAG, InVals); 2768 else 2769 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2770 dl, DAG, InVals); 2771 } else { 2772 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2773 dl, DAG, InVals); 2774 } 2775 } 2776 2777 SDValue 2778 PPCTargetLowering::LowerFormalArguments_32SVR4( 2779 SDValue Chain, 2780 CallingConv::ID CallConv, bool isVarArg, 2781 const SmallVectorImpl<ISD::InputArg> 2782 &Ins, 2783 SDLoc dl, SelectionDAG &DAG, 2784 SmallVectorImpl<SDValue> &InVals) const { 2785 2786 // 32-bit SVR4 ABI Stack Frame Layout: 2787 // +-----------------------------------+ 2788 // +--> | Back chain | 2789 // | +-----------------------------------+ 2790 // | | Floating-point register save area | 2791 // | +-----------------------------------+ 2792 // | | General register save area | 2793 // | +-----------------------------------+ 2794 // | | CR save word | 2795 // | +-----------------------------------+ 2796 // | | VRSAVE save word | 2797 // | +-----------------------------------+ 2798 // | | Alignment padding | 2799 // | +-----------------------------------+ 2800 // | | Vector register save area | 2801 // | +-----------------------------------+ 2802 // | | Local variable space | 2803 // | +-----------------------------------+ 2804 // | | Parameter list area | 2805 // | +-----------------------------------+ 2806 // | | LR save word | 2807 // | +-----------------------------------+ 2808 // SP--> +--- | Back chain | 2809 // +-----------------------------------+ 2810 // 2811 // Specifications: 2812 // System V Application Binary Interface PowerPC Processor Supplement 2813 // AltiVec Technology Programming Interface Manual 2814 2815 MachineFunction &MF = DAG.getMachineFunction(); 2816 MachineFrameInfo *MFI = MF.getFrameInfo(); 2817 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2818 2819 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2820 // Potential tail calls could cause overwriting of argument stack slots. 2821 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2822 (CallConv == CallingConv::Fast)); 2823 unsigned PtrByteSize = 4; 2824 2825 // Assign locations to all of the incoming arguments. 2826 SmallVector<CCValAssign, 16> ArgLocs; 2827 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2828 *DAG.getContext()); 2829 2830 // Reserve space for the linkage area on the stack. 2831 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2832 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2833 2834 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2835 2836 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2837 CCValAssign &VA = ArgLocs[i]; 2838 2839 // Arguments stored in registers. 2840 if (VA.isRegLoc()) { 2841 const TargetRegisterClass *RC; 2842 EVT ValVT = VA.getValVT(); 2843 2844 switch (ValVT.getSimpleVT().SimpleTy) { 2845 default: 2846 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2847 case MVT::i1: 2848 case MVT::i32: 2849 RC = &PPC::GPRCRegClass; 2850 break; 2851 case MVT::f32: 2852 if (Subtarget.hasP8Vector()) 2853 RC = &PPC::VSSRCRegClass; 2854 else 2855 RC = &PPC::F4RCRegClass; 2856 break; 2857 case MVT::f64: 2858 if (Subtarget.hasVSX()) 2859 RC = &PPC::VSFRCRegClass; 2860 else 2861 RC = &PPC::F8RCRegClass; 2862 break; 2863 case MVT::v16i8: 2864 case MVT::v8i16: 2865 case MVT::v4i32: 2866 RC = &PPC::VRRCRegClass; 2867 break; 2868 case MVT::v4f32: 2869 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 2870 break; 2871 case MVT::v2f64: 2872 case MVT::v2i64: 2873 RC = &PPC::VSHRCRegClass; 2874 break; 2875 case MVT::v4f64: 2876 RC = &PPC::QFRCRegClass; 2877 break; 2878 case MVT::v4i1: 2879 RC = &PPC::QBRCRegClass; 2880 break; 2881 } 2882 2883 // Transform the arguments stored in physical registers into virtual ones. 2884 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2885 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2886 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2887 2888 if (ValVT == MVT::i1) 2889 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2890 2891 InVals.push_back(ArgValue); 2892 } else { 2893 // Argument stored in memory. 2894 assert(VA.isMemLoc()); 2895 2896 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2897 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2898 isImmutable); 2899 2900 // Create load nodes to retrieve arguments from the stack. 2901 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2902 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2903 MachinePointerInfo(), 2904 false, false, false, 0)); 2905 } 2906 } 2907 2908 // Assign locations to all of the incoming aggregate by value arguments. 2909 // Aggregates passed by value are stored in the local variable space of the 2910 // caller's stack frame, right above the parameter list area. 2911 SmallVector<CCValAssign, 16> ByValArgLocs; 2912 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2913 ByValArgLocs, *DAG.getContext()); 2914 2915 // Reserve stack space for the allocations in CCInfo. 2916 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2917 2918 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2919 2920 // Area that is at least reserved in the caller of this function. 2921 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2922 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2923 2924 // Set the size that is at least reserved in caller of this function. Tail 2925 // call optimized function's reserved stack space needs to be aligned so that 2926 // taking the difference between two stack areas will result in an aligned 2927 // stack. 2928 MinReservedArea = 2929 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 2930 FuncInfo->setMinReservedArea(MinReservedArea); 2931 2932 SmallVector<SDValue, 8> MemOps; 2933 2934 // If the function takes variable number of arguments, make a frame index for 2935 // the start of the first vararg value... for expansion of llvm.va_start. 2936 if (isVarArg) { 2937 static const MCPhysReg GPArgRegs[] = { 2938 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2939 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2940 }; 2941 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2942 2943 static const MCPhysReg FPArgRegs[] = { 2944 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2945 PPC::F8 2946 }; 2947 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2948 if (DisablePPCFloatInVariadic) 2949 NumFPArgRegs = 0; 2950 2951 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 2952 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 2953 2954 // Make room for NumGPArgRegs and NumFPArgRegs. 2955 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2956 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 2957 2958 FuncInfo->setVarArgsStackOffset( 2959 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2960 CCInfo.getNextStackOffset(), true)); 2961 2962 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2963 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2964 2965 // The fixed integer arguments of a variadic function are stored to the 2966 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2967 // the result of va_next. 2968 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2969 // Get an existing live-in vreg, or add a new one. 2970 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2971 if (!VReg) 2972 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2973 2974 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2975 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2976 MachinePointerInfo(), false, false, 0); 2977 MemOps.push_back(Store); 2978 // Increment the address by four for the next argument to store 2979 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 2980 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2981 } 2982 2983 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2984 // is set. 2985 // The double arguments are stored to the VarArgsFrameIndex 2986 // on the stack. 2987 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2988 // Get an existing live-in vreg, or add a new one. 2989 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2990 if (!VReg) 2991 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2992 2993 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2994 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2995 MachinePointerInfo(), false, false, 0); 2996 MemOps.push_back(Store); 2997 // Increment the address by eight for the next argument to store 2998 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 2999 PtrVT); 3000 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3001 } 3002 } 3003 3004 if (!MemOps.empty()) 3005 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3006 3007 return Chain; 3008 } 3009 3010 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3011 // value to MVT::i64 and then truncate to the correct register size. 3012 SDValue 3013 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 3014 SelectionDAG &DAG, SDValue ArgVal, 3015 SDLoc dl) const { 3016 if (Flags.isSExt()) 3017 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3018 DAG.getValueType(ObjectVT)); 3019 else if (Flags.isZExt()) 3020 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3021 DAG.getValueType(ObjectVT)); 3022 3023 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3024 } 3025 3026 SDValue 3027 PPCTargetLowering::LowerFormalArguments_64SVR4( 3028 SDValue Chain, 3029 CallingConv::ID CallConv, bool isVarArg, 3030 const SmallVectorImpl<ISD::InputArg> 3031 &Ins, 3032 SDLoc dl, SelectionDAG &DAG, 3033 SmallVectorImpl<SDValue> &InVals) const { 3034 // TODO: add description of PPC stack frame format, or at least some docs. 3035 // 3036 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3037 bool isLittleEndian = Subtarget.isLittleEndian(); 3038 MachineFunction &MF = DAG.getMachineFunction(); 3039 MachineFrameInfo *MFI = MF.getFrameInfo(); 3040 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3041 3042 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3043 "fastcc not supported on varargs functions"); 3044 3045 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3046 // Potential tail calls could cause overwriting of argument stack slots. 3047 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3048 (CallConv == CallingConv::Fast)); 3049 unsigned PtrByteSize = 8; 3050 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3051 3052 static const MCPhysReg GPR[] = { 3053 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3054 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3055 }; 3056 static const MCPhysReg VR[] = { 3057 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3058 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3059 }; 3060 static const MCPhysReg VSRH[] = { 3061 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 3062 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 3063 }; 3064 3065 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3066 const unsigned Num_FPR_Regs = 13; 3067 const unsigned Num_VR_Regs = array_lengthof(VR); 3068 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3069 3070 // Do a first pass over the arguments to determine whether the ABI 3071 // guarantees that our caller has allocated the parameter save area 3072 // on its stack frame. In the ELFv1 ABI, this is always the case; 3073 // in the ELFv2 ABI, it is true if this is a vararg function or if 3074 // any parameter is located in a stack slot. 3075 3076 bool HasParameterArea = !isELFv2ABI || isVarArg; 3077 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3078 unsigned NumBytes = LinkageSize; 3079 unsigned AvailableFPRs = Num_FPR_Regs; 3080 unsigned AvailableVRs = Num_VR_Regs; 3081 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3082 if (Ins[i].Flags.isNest()) 3083 continue; 3084 3085 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3086 PtrByteSize, LinkageSize, ParamAreaSize, 3087 NumBytes, AvailableFPRs, AvailableVRs, 3088 Subtarget.hasQPX())) 3089 HasParameterArea = true; 3090 } 3091 3092 // Add DAG nodes to load the arguments or copy them out of registers. On 3093 // entry to a function on PPC, the arguments start after the linkage area, 3094 // although the first ones are often in registers. 3095 3096 unsigned ArgOffset = LinkageSize; 3097 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3098 unsigned &QFPR_idx = FPR_idx; 3099 SmallVector<SDValue, 8> MemOps; 3100 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3101 unsigned CurArgIdx = 0; 3102 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3103 SDValue ArgVal; 3104 bool needsLoad = false; 3105 EVT ObjectVT = Ins[ArgNo].VT; 3106 EVT OrigVT = Ins[ArgNo].ArgVT; 3107 unsigned ObjSize = ObjectVT.getStoreSize(); 3108 unsigned ArgSize = ObjSize; 3109 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3110 if (Ins[ArgNo].isOrigArg()) { 3111 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3112 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3113 } 3114 // We re-align the argument offset for each argument, except when using the 3115 // fast calling convention, when we need to make sure we do that only when 3116 // we'll actually use a stack slot. 3117 unsigned CurArgOffset, Align; 3118 auto ComputeArgOffset = [&]() { 3119 /* Respect alignment of argument on the stack. */ 3120 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3121 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3122 CurArgOffset = ArgOffset; 3123 }; 3124 3125 if (CallConv != CallingConv::Fast) { 3126 ComputeArgOffset(); 3127 3128 /* Compute GPR index associated with argument offset. */ 3129 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3130 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3131 } 3132 3133 // FIXME the codegen can be much improved in some cases. 3134 // We do not have to keep everything in memory. 3135 if (Flags.isByVal()) { 3136 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3137 3138 if (CallConv == CallingConv::Fast) 3139 ComputeArgOffset(); 3140 3141 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3142 ObjSize = Flags.getByValSize(); 3143 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3144 // Empty aggregate parameters do not take up registers. Examples: 3145 // struct { } a; 3146 // union { } b; 3147 // int c[0]; 3148 // etc. However, we have to provide a place-holder in InVals, so 3149 // pretend we have an 8-byte item at the current address for that 3150 // purpose. 3151 if (!ObjSize) { 3152 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3153 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3154 InVals.push_back(FIN); 3155 continue; 3156 } 3157 3158 // Create a stack object covering all stack doublewords occupied 3159 // by the argument. If the argument is (fully or partially) on 3160 // the stack, or if the argument is fully in registers but the 3161 // caller has allocated the parameter save anyway, we can refer 3162 // directly to the caller's stack frame. Otherwise, create a 3163 // local copy in our own frame. 3164 int FI; 3165 if (HasParameterArea || 3166 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3167 FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true); 3168 else 3169 FI = MFI->CreateStackObject(ArgSize, Align, false); 3170 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3171 3172 // Handle aggregates smaller than 8 bytes. 3173 if (ObjSize < PtrByteSize) { 3174 // The value of the object is its address, which differs from the 3175 // address of the enclosing doubleword on big-endian systems. 3176 SDValue Arg = FIN; 3177 if (!isLittleEndian) { 3178 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3179 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3180 } 3181 InVals.push_back(Arg); 3182 3183 if (GPR_idx != Num_GPR_Regs) { 3184 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3185 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3186 SDValue Store; 3187 3188 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3189 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3190 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3191 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3192 MachinePointerInfo(FuncArg), 3193 ObjType, false, false, 0); 3194 } else { 3195 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3196 // store the whole register as-is to the parameter save area 3197 // slot. 3198 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3199 MachinePointerInfo(FuncArg), 3200 false, false, 0); 3201 } 3202 3203 MemOps.push_back(Store); 3204 } 3205 // Whether we copied from a register or not, advance the offset 3206 // into the parameter save area by a full doubleword. 3207 ArgOffset += PtrByteSize; 3208 continue; 3209 } 3210 3211 // The value of the object is its address, which is the address of 3212 // its first stack doubleword. 3213 InVals.push_back(FIN); 3214 3215 // Store whatever pieces of the object are in registers to memory. 3216 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3217 if (GPR_idx == Num_GPR_Regs) 3218 break; 3219 3220 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3221 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3222 SDValue Addr = FIN; 3223 if (j) { 3224 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3225 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3226 } 3227 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3228 MachinePointerInfo(FuncArg, j), 3229 false, false, 0); 3230 MemOps.push_back(Store); 3231 ++GPR_idx; 3232 } 3233 ArgOffset += ArgSize; 3234 continue; 3235 } 3236 3237 switch (ObjectVT.getSimpleVT().SimpleTy) { 3238 default: llvm_unreachable("Unhandled argument type!"); 3239 case MVT::i1: 3240 case MVT::i32: 3241 case MVT::i64: 3242 if (Flags.isNest()) { 3243 // The 'nest' parameter, if any, is passed in R11. 3244 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3245 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3246 3247 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3248 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3249 3250 break; 3251 } 3252 3253 // These can be scalar arguments or elements of an integer array type 3254 // passed directly. Clang may use those instead of "byval" aggregate 3255 // types to avoid forcing arguments to memory unnecessarily. 3256 if (GPR_idx != Num_GPR_Regs) { 3257 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3258 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3259 3260 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3261 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3262 // value to MVT::i64 and then truncate to the correct register size. 3263 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3264 } else { 3265 if (CallConv == CallingConv::Fast) 3266 ComputeArgOffset(); 3267 3268 needsLoad = true; 3269 ArgSize = PtrByteSize; 3270 } 3271 if (CallConv != CallingConv::Fast || needsLoad) 3272 ArgOffset += 8; 3273 break; 3274 3275 case MVT::f32: 3276 case MVT::f64: 3277 // These can be scalar arguments or elements of a float array type 3278 // passed directly. The latter are used to implement ELFv2 homogenous 3279 // float aggregates. 3280 if (FPR_idx != Num_FPR_Regs) { 3281 unsigned VReg; 3282 3283 if (ObjectVT == MVT::f32) 3284 VReg = MF.addLiveIn(FPR[FPR_idx], 3285 Subtarget.hasP8Vector() 3286 ? &PPC::VSSRCRegClass 3287 : &PPC::F4RCRegClass); 3288 else 3289 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3290 ? &PPC::VSFRCRegClass 3291 : &PPC::F8RCRegClass); 3292 3293 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3294 ++FPR_idx; 3295 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3296 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3297 // once we support fp <-> gpr moves. 3298 3299 // This can only ever happen in the presence of f32 array types, 3300 // since otherwise we never run out of FPRs before running out 3301 // of GPRs. 3302 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3303 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3304 3305 if (ObjectVT == MVT::f32) { 3306 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3307 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3308 DAG.getConstant(32, dl, MVT::i32)); 3309 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3310 } 3311 3312 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3313 } else { 3314 if (CallConv == CallingConv::Fast) 3315 ComputeArgOffset(); 3316 3317 needsLoad = true; 3318 } 3319 3320 // When passing an array of floats, the array occupies consecutive 3321 // space in the argument area; only round up to the next doubleword 3322 // at the end of the array. Otherwise, each float takes 8 bytes. 3323 if (CallConv != CallingConv::Fast || needsLoad) { 3324 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3325 ArgOffset += ArgSize; 3326 if (Flags.isInConsecutiveRegsLast()) 3327 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3328 } 3329 break; 3330 case MVT::v4f32: 3331 case MVT::v4i32: 3332 case MVT::v8i16: 3333 case MVT::v16i8: 3334 case MVT::v2f64: 3335 case MVT::v2i64: 3336 case MVT::v1i128: 3337 if (!Subtarget.hasQPX()) { 3338 // These can be scalar arguments or elements of a vector array type 3339 // passed directly. The latter are used to implement ELFv2 homogenous 3340 // vector aggregates. 3341 if (VR_idx != Num_VR_Regs) { 3342 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 3343 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 3344 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3345 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3346 ++VR_idx; 3347 } else { 3348 if (CallConv == CallingConv::Fast) 3349 ComputeArgOffset(); 3350 3351 needsLoad = true; 3352 } 3353 if (CallConv != CallingConv::Fast || needsLoad) 3354 ArgOffset += 16; 3355 break; 3356 } // not QPX 3357 3358 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3359 "Invalid QPX parameter type"); 3360 /* fall through */ 3361 3362 case MVT::v4f64: 3363 case MVT::v4i1: 3364 // QPX vectors are treated like their scalar floating-point subregisters 3365 // (except that they're larger). 3366 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3367 if (QFPR_idx != Num_QFPR_Regs) { 3368 const TargetRegisterClass *RC; 3369 switch (ObjectVT.getSimpleVT().SimpleTy) { 3370 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3371 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3372 default: RC = &PPC::QBRCRegClass; break; 3373 } 3374 3375 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3376 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3377 ++QFPR_idx; 3378 } else { 3379 if (CallConv == CallingConv::Fast) 3380 ComputeArgOffset(); 3381 needsLoad = true; 3382 } 3383 if (CallConv != CallingConv::Fast || needsLoad) 3384 ArgOffset += Sz; 3385 break; 3386 } 3387 3388 // We need to load the argument to a virtual register if we determined 3389 // above that we ran out of physical registers of the appropriate type. 3390 if (needsLoad) { 3391 if (ObjSize < ArgSize && !isLittleEndian) 3392 CurArgOffset += ArgSize - ObjSize; 3393 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3394 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3395 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3396 false, false, false, 0); 3397 } 3398 3399 InVals.push_back(ArgVal); 3400 } 3401 3402 // Area that is at least reserved in the caller of this function. 3403 unsigned MinReservedArea; 3404 if (HasParameterArea) 3405 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3406 else 3407 MinReservedArea = LinkageSize; 3408 3409 // Set the size that is at least reserved in caller of this function. Tail 3410 // call optimized functions' reserved stack space needs to be aligned so that 3411 // taking the difference between two stack areas will result in an aligned 3412 // stack. 3413 MinReservedArea = 3414 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3415 FuncInfo->setMinReservedArea(MinReservedArea); 3416 3417 // If the function takes variable number of arguments, make a frame index for 3418 // the start of the first vararg value... for expansion of llvm.va_start. 3419 if (isVarArg) { 3420 int Depth = ArgOffset; 3421 3422 FuncInfo->setVarArgsFrameIndex( 3423 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 3424 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3425 3426 // If this function is vararg, store any remaining integer argument regs 3427 // to their spots on the stack so that they may be loaded by deferencing the 3428 // result of va_next. 3429 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3430 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3431 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3432 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3433 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3434 MachinePointerInfo(), false, false, 0); 3435 MemOps.push_back(Store); 3436 // Increment the address by four for the next argument to store 3437 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3438 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3439 } 3440 } 3441 3442 if (!MemOps.empty()) 3443 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3444 3445 return Chain; 3446 } 3447 3448 SDValue 3449 PPCTargetLowering::LowerFormalArguments_Darwin( 3450 SDValue Chain, 3451 CallingConv::ID CallConv, bool isVarArg, 3452 const SmallVectorImpl<ISD::InputArg> 3453 &Ins, 3454 SDLoc dl, SelectionDAG &DAG, 3455 SmallVectorImpl<SDValue> &InVals) const { 3456 // TODO: add description of PPC stack frame format, or at least some docs. 3457 // 3458 MachineFunction &MF = DAG.getMachineFunction(); 3459 MachineFrameInfo *MFI = MF.getFrameInfo(); 3460 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3461 3462 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3463 bool isPPC64 = PtrVT == MVT::i64; 3464 // Potential tail calls could cause overwriting of argument stack slots. 3465 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3466 (CallConv == CallingConv::Fast)); 3467 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3468 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3469 unsigned ArgOffset = LinkageSize; 3470 // Area that is at least reserved in caller of this function. 3471 unsigned MinReservedArea = ArgOffset; 3472 3473 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3474 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3475 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3476 }; 3477 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3478 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3479 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3480 }; 3481 static const MCPhysReg VR[] = { 3482 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3483 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3484 }; 3485 3486 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3487 const unsigned Num_FPR_Regs = 13; 3488 const unsigned Num_VR_Regs = array_lengthof( VR); 3489 3490 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3491 3492 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3493 3494 // In 32-bit non-varargs functions, the stack space for vectors is after the 3495 // stack space for non-vectors. We do not use this space unless we have 3496 // too many vectors to fit in registers, something that only occurs in 3497 // constructed examples:), but we have to walk the arglist to figure 3498 // that out...for the pathological case, compute VecArgOffset as the 3499 // start of the vector parameter area. Computing VecArgOffset is the 3500 // entire point of the following loop. 3501 unsigned VecArgOffset = ArgOffset; 3502 if (!isVarArg && !isPPC64) { 3503 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3504 ++ArgNo) { 3505 EVT ObjectVT = Ins[ArgNo].VT; 3506 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3507 3508 if (Flags.isByVal()) { 3509 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3510 unsigned ObjSize = Flags.getByValSize(); 3511 unsigned ArgSize = 3512 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3513 VecArgOffset += ArgSize; 3514 continue; 3515 } 3516 3517 switch(ObjectVT.getSimpleVT().SimpleTy) { 3518 default: llvm_unreachable("Unhandled argument type!"); 3519 case MVT::i1: 3520 case MVT::i32: 3521 case MVT::f32: 3522 VecArgOffset += 4; 3523 break; 3524 case MVT::i64: // PPC64 3525 case MVT::f64: 3526 // FIXME: We are guaranteed to be !isPPC64 at this point. 3527 // Does MVT::i64 apply? 3528 VecArgOffset += 8; 3529 break; 3530 case MVT::v4f32: 3531 case MVT::v4i32: 3532 case MVT::v8i16: 3533 case MVT::v16i8: 3534 // Nothing to do, we're only looking at Nonvector args here. 3535 break; 3536 } 3537 } 3538 } 3539 // We've found where the vector parameter area in memory is. Skip the 3540 // first 12 parameters; these don't use that memory. 3541 VecArgOffset = ((VecArgOffset+15)/16)*16; 3542 VecArgOffset += 12*16; 3543 3544 // Add DAG nodes to load the arguments or copy them out of registers. On 3545 // entry to a function on PPC, the arguments start after the linkage area, 3546 // although the first ones are often in registers. 3547 3548 SmallVector<SDValue, 8> MemOps; 3549 unsigned nAltivecParamsAtEnd = 0; 3550 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3551 unsigned CurArgIdx = 0; 3552 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3553 SDValue ArgVal; 3554 bool needsLoad = false; 3555 EVT ObjectVT = Ins[ArgNo].VT; 3556 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3557 unsigned ArgSize = ObjSize; 3558 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3559 if (Ins[ArgNo].isOrigArg()) { 3560 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3561 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3562 } 3563 unsigned CurArgOffset = ArgOffset; 3564 3565 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3566 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3567 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3568 if (isVarArg || isPPC64) { 3569 MinReservedArea = ((MinReservedArea+15)/16)*16; 3570 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3571 Flags, 3572 PtrByteSize); 3573 } else nAltivecParamsAtEnd++; 3574 } else 3575 // Calculate min reserved area. 3576 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3577 Flags, 3578 PtrByteSize); 3579 3580 // FIXME the codegen can be much improved in some cases. 3581 // We do not have to keep everything in memory. 3582 if (Flags.isByVal()) { 3583 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3584 3585 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3586 ObjSize = Flags.getByValSize(); 3587 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3588 // Objects of size 1 and 2 are right justified, everything else is 3589 // left justified. This means the memory address is adjusted forwards. 3590 if (ObjSize==1 || ObjSize==2) { 3591 CurArgOffset = CurArgOffset + (4 - ObjSize); 3592 } 3593 // The value of the object is its address. 3594 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true); 3595 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3596 InVals.push_back(FIN); 3597 if (ObjSize==1 || ObjSize==2) { 3598 if (GPR_idx != Num_GPR_Regs) { 3599 unsigned VReg; 3600 if (isPPC64) 3601 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3602 else 3603 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3604 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3605 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3606 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3607 MachinePointerInfo(FuncArg), 3608 ObjType, false, false, 0); 3609 MemOps.push_back(Store); 3610 ++GPR_idx; 3611 } 3612 3613 ArgOffset += PtrByteSize; 3614 3615 continue; 3616 } 3617 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3618 // Store whatever pieces of the object are in registers 3619 // to memory. ArgOffset will be the address of the beginning 3620 // of the object. 3621 if (GPR_idx != Num_GPR_Regs) { 3622 unsigned VReg; 3623 if (isPPC64) 3624 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3625 else 3626 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3627 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3628 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3629 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3630 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3631 MachinePointerInfo(FuncArg, j), 3632 false, false, 0); 3633 MemOps.push_back(Store); 3634 ++GPR_idx; 3635 ArgOffset += PtrByteSize; 3636 } else { 3637 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3638 break; 3639 } 3640 } 3641 continue; 3642 } 3643 3644 switch (ObjectVT.getSimpleVT().SimpleTy) { 3645 default: llvm_unreachable("Unhandled argument type!"); 3646 case MVT::i1: 3647 case MVT::i32: 3648 if (!isPPC64) { 3649 if (GPR_idx != Num_GPR_Regs) { 3650 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3651 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3652 3653 if (ObjectVT == MVT::i1) 3654 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3655 3656 ++GPR_idx; 3657 } else { 3658 needsLoad = true; 3659 ArgSize = PtrByteSize; 3660 } 3661 // All int arguments reserve stack space in the Darwin ABI. 3662 ArgOffset += PtrByteSize; 3663 break; 3664 } 3665 // FALLTHROUGH 3666 case MVT::i64: // PPC64 3667 if (GPR_idx != Num_GPR_Regs) { 3668 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3669 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3670 3671 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3672 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3673 // value to MVT::i64 and then truncate to the correct register size. 3674 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3675 3676 ++GPR_idx; 3677 } else { 3678 needsLoad = true; 3679 ArgSize = PtrByteSize; 3680 } 3681 // All int arguments reserve stack space in the Darwin ABI. 3682 ArgOffset += 8; 3683 break; 3684 3685 case MVT::f32: 3686 case MVT::f64: 3687 // Every 4 bytes of argument space consumes one of the GPRs available for 3688 // argument passing. 3689 if (GPR_idx != Num_GPR_Regs) { 3690 ++GPR_idx; 3691 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3692 ++GPR_idx; 3693 } 3694 if (FPR_idx != Num_FPR_Regs) { 3695 unsigned VReg; 3696 3697 if (ObjectVT == MVT::f32) 3698 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3699 else 3700 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3701 3702 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3703 ++FPR_idx; 3704 } else { 3705 needsLoad = true; 3706 } 3707 3708 // All FP arguments reserve stack space in the Darwin ABI. 3709 ArgOffset += isPPC64 ? 8 : ObjSize; 3710 break; 3711 case MVT::v4f32: 3712 case MVT::v4i32: 3713 case MVT::v8i16: 3714 case MVT::v16i8: 3715 // Note that vector arguments in registers don't reserve stack space, 3716 // except in varargs functions. 3717 if (VR_idx != Num_VR_Regs) { 3718 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3719 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3720 if (isVarArg) { 3721 while ((ArgOffset % 16) != 0) { 3722 ArgOffset += PtrByteSize; 3723 if (GPR_idx != Num_GPR_Regs) 3724 GPR_idx++; 3725 } 3726 ArgOffset += 16; 3727 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3728 } 3729 ++VR_idx; 3730 } else { 3731 if (!isVarArg && !isPPC64) { 3732 // Vectors go after all the nonvectors. 3733 CurArgOffset = VecArgOffset; 3734 VecArgOffset += 16; 3735 } else { 3736 // Vectors are aligned. 3737 ArgOffset = ((ArgOffset+15)/16)*16; 3738 CurArgOffset = ArgOffset; 3739 ArgOffset += 16; 3740 } 3741 needsLoad = true; 3742 } 3743 break; 3744 } 3745 3746 // We need to load the argument to a virtual register if we determined above 3747 // that we ran out of physical registers of the appropriate type. 3748 if (needsLoad) { 3749 int FI = MFI->CreateFixedObject(ObjSize, 3750 CurArgOffset + (ArgSize - ObjSize), 3751 isImmutable); 3752 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3753 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3754 false, false, false, 0); 3755 } 3756 3757 InVals.push_back(ArgVal); 3758 } 3759 3760 // Allow for Altivec parameters at the end, if needed. 3761 if (nAltivecParamsAtEnd) { 3762 MinReservedArea = ((MinReservedArea+15)/16)*16; 3763 MinReservedArea += 16*nAltivecParamsAtEnd; 3764 } 3765 3766 // Area that is at least reserved in the caller of this function. 3767 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3768 3769 // Set the size that is at least reserved in caller of this function. Tail 3770 // call optimized functions' reserved stack space needs to be aligned so that 3771 // taking the difference between two stack areas will result in an aligned 3772 // stack. 3773 MinReservedArea = 3774 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3775 FuncInfo->setMinReservedArea(MinReservedArea); 3776 3777 // If the function takes variable number of arguments, make a frame index for 3778 // the start of the first vararg value... for expansion of llvm.va_start. 3779 if (isVarArg) { 3780 int Depth = ArgOffset; 3781 3782 FuncInfo->setVarArgsFrameIndex( 3783 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3784 Depth, true)); 3785 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3786 3787 // If this function is vararg, store any remaining integer argument regs 3788 // to their spots on the stack so that they may be loaded by deferencing the 3789 // result of va_next. 3790 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3791 unsigned VReg; 3792 3793 if (isPPC64) 3794 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3795 else 3796 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3797 3798 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3799 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3800 MachinePointerInfo(), false, false, 0); 3801 MemOps.push_back(Store); 3802 // Increment the address by four for the next argument to store 3803 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3804 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3805 } 3806 } 3807 3808 if (!MemOps.empty()) 3809 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3810 3811 return Chain; 3812 } 3813 3814 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3815 /// adjusted to accommodate the arguments for the tailcall. 3816 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3817 unsigned ParamSize) { 3818 3819 if (!isTailCall) return 0; 3820 3821 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3822 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3823 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3824 // Remember only if the new adjustement is bigger. 3825 if (SPDiff < FI->getTailCallSPDelta()) 3826 FI->setTailCallSPDelta(SPDiff); 3827 3828 return SPDiff; 3829 } 3830 3831 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 3832 /// for tail call optimization. Targets which want to do tail call 3833 /// optimization should implement this function. 3834 bool 3835 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 3836 CallingConv::ID CalleeCC, 3837 bool isVarArg, 3838 const SmallVectorImpl<ISD::InputArg> &Ins, 3839 SelectionDAG& DAG) const { 3840 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 3841 return false; 3842 3843 // Variable argument functions are not supported. 3844 if (isVarArg) 3845 return false; 3846 3847 MachineFunction &MF = DAG.getMachineFunction(); 3848 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3849 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 3850 // Functions containing by val parameters are not supported. 3851 for (unsigned i = 0; i != Ins.size(); i++) { 3852 ISD::ArgFlagsTy Flags = Ins[i].Flags; 3853 if (Flags.isByVal()) return false; 3854 } 3855 3856 // Non-PIC/GOT tail calls are supported. 3857 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 3858 return true; 3859 3860 // At the moment we can only do local tail calls (in same module, hidden 3861 // or protected) if we are generating PIC. 3862 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3863 return G->getGlobal()->hasHiddenVisibility() 3864 || G->getGlobal()->hasProtectedVisibility(); 3865 } 3866 3867 return false; 3868 } 3869 3870 /// isCallCompatibleAddress - Return the immediate to use if the specified 3871 /// 32-bit value is representable in the immediate field of a BxA instruction. 3872 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 3873 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 3874 if (!C) return nullptr; 3875 3876 int Addr = C->getZExtValue(); 3877 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 3878 SignExtend32<26>(Addr) != Addr) 3879 return nullptr; // Top 6 bits have to be sext of immediate. 3880 3881 return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op), 3882 DAG.getTargetLoweringInfo().getPointerTy( 3883 DAG.getDataLayout())).getNode(); 3884 } 3885 3886 namespace { 3887 3888 struct TailCallArgumentInfo { 3889 SDValue Arg; 3890 SDValue FrameIdxOp; 3891 int FrameIdx; 3892 3893 TailCallArgumentInfo() : FrameIdx(0) {} 3894 }; 3895 3896 } 3897 3898 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 3899 static void 3900 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 3901 SDValue Chain, 3902 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 3903 SmallVectorImpl<SDValue> &MemOpChains, 3904 SDLoc dl) { 3905 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 3906 SDValue Arg = TailCallArgs[i].Arg; 3907 SDValue FIN = TailCallArgs[i].FrameIdxOp; 3908 int FI = TailCallArgs[i].FrameIdx; 3909 // Store relative to framepointer. 3910 MemOpChains.push_back(DAG.getStore( 3911 Chain, dl, Arg, FIN, 3912 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 3913 false, 0)); 3914 } 3915 } 3916 3917 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 3918 /// the appropriate stack slot for the tail call optimized function call. 3919 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 3920 MachineFunction &MF, 3921 SDValue Chain, 3922 SDValue OldRetAddr, 3923 SDValue OldFP, 3924 int SPDiff, 3925 bool isPPC64, 3926 bool isDarwinABI, 3927 SDLoc dl) { 3928 if (SPDiff) { 3929 // Calculate the new stack slot for the return address. 3930 int SlotSize = isPPC64 ? 8 : 4; 3931 const PPCFrameLowering *FL = 3932 MF.getSubtarget<PPCSubtarget>().getFrameLowering(); 3933 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 3934 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3935 NewRetAddrLoc, true); 3936 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3937 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 3938 Chain = DAG.getStore( 3939 Chain, dl, OldRetAddr, NewRetAddrFrIdx, 3940 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewRetAddr), 3941 false, false, 0); 3942 3943 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 3944 // slot as the FP is never overwritten. 3945 if (isDarwinABI) { 3946 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 3947 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3948 true); 3949 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3950 Chain = DAG.getStore( 3951 Chain, dl, OldFP, NewFramePtrIdx, 3952 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFPIdx), 3953 false, false, 0); 3954 } 3955 } 3956 return Chain; 3957 } 3958 3959 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3960 /// the position of the argument. 3961 static void 3962 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3963 SDValue Arg, int SPDiff, unsigned ArgOffset, 3964 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3965 int Offset = ArgOffset + SPDiff; 3966 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3967 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3968 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3969 SDValue FIN = DAG.getFrameIndex(FI, VT); 3970 TailCallArgumentInfo Info; 3971 Info.Arg = Arg; 3972 Info.FrameIdxOp = FIN; 3973 Info.FrameIdx = FI; 3974 TailCallArguments.push_back(Info); 3975 } 3976 3977 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3978 /// stack slot. Returns the chain as result and the loaded frame pointers in 3979 /// LROpOut/FPOpout. Used when tail calling. 3980 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3981 int SPDiff, 3982 SDValue Chain, 3983 SDValue &LROpOut, 3984 SDValue &FPOpOut, 3985 bool isDarwinABI, 3986 SDLoc dl) const { 3987 if (SPDiff) { 3988 // Load the LR and FP stack slot for later adjusting. 3989 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 3990 LROpOut = getReturnAddrFrameIndex(DAG); 3991 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3992 false, false, false, 0); 3993 Chain = SDValue(LROpOut.getNode(), 1); 3994 3995 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3996 // slot as the FP is never overwritten. 3997 if (isDarwinABI) { 3998 FPOpOut = getFramePointerFrameIndex(DAG); 3999 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 4000 false, false, false, 0); 4001 Chain = SDValue(FPOpOut.getNode(), 1); 4002 } 4003 } 4004 return Chain; 4005 } 4006 4007 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4008 /// by "Src" to address "Dst" of size "Size". Alignment information is 4009 /// specified by the specific parameter attribute. The copy will be passed as 4010 /// a byval function parameter. 4011 /// Sometimes what we are copying is the end of a larger object, the part that 4012 /// does not fit in registers. 4013 static SDValue 4014 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 4015 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 4016 SDLoc dl) { 4017 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4018 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4019 false, false, false, MachinePointerInfo(), 4020 MachinePointerInfo()); 4021 } 4022 4023 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4024 /// tail calls. 4025 static void 4026 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 4027 SDValue Arg, SDValue PtrOff, int SPDiff, 4028 unsigned ArgOffset, bool isPPC64, bool isTailCall, 4029 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4030 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 4031 SDLoc dl) { 4032 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4033 if (!isTailCall) { 4034 if (isVector) { 4035 SDValue StackPtr; 4036 if (isPPC64) 4037 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4038 else 4039 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4040 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4041 DAG.getConstant(ArgOffset, dl, PtrVT)); 4042 } 4043 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4044 MachinePointerInfo(), false, false, 0)); 4045 // Calculate and remember argument location. 4046 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4047 TailCallArguments); 4048 } 4049 4050 static 4051 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4052 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 4053 SDValue LROp, SDValue FPOp, bool isDarwinABI, 4054 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4055 MachineFunction &MF = DAG.getMachineFunction(); 4056 4057 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4058 // might overwrite each other in case of tail call optimization. 4059 SmallVector<SDValue, 8> MemOpChains2; 4060 // Do not flag preceding copytoreg stuff together with the following stuff. 4061 InFlag = SDValue(); 4062 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4063 MemOpChains2, dl); 4064 if (!MemOpChains2.empty()) 4065 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4066 4067 // Store the return address to the appropriate stack slot. 4068 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 4069 isPPC64, isDarwinABI, dl); 4070 4071 // Emit callseq_end just before tailcall node. 4072 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4073 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4074 InFlag = Chain.getValue(1); 4075 } 4076 4077 // Is this global address that of a function that can be called by name? (as 4078 // opposed to something that must hold a descriptor for an indirect call). 4079 static bool isFunctionGlobalAddress(SDValue Callee) { 4080 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4081 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4082 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4083 return false; 4084 4085 return G->getGlobal()->getType()->getElementType()->isFunctionTy(); 4086 } 4087 4088 return false; 4089 } 4090 4091 static 4092 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 4093 SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff, 4094 bool isTailCall, bool IsPatchPoint, bool hasNest, 4095 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 4096 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4097 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4098 4099 bool isPPC64 = Subtarget.isPPC64(); 4100 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4101 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4102 4103 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4104 NodeTys.push_back(MVT::Other); // Returns a chain 4105 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4106 4107 unsigned CallOpc = PPCISD::CALL; 4108 4109 bool needIndirectCall = true; 4110 if (!isSVR4ABI || !isPPC64) 4111 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4112 // If this is an absolute destination address, use the munged value. 4113 Callee = SDValue(Dest, 0); 4114 needIndirectCall = false; 4115 } 4116 4117 if (isFunctionGlobalAddress(Callee)) { 4118 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4119 // A call to a TLS address is actually an indirect call to a 4120 // thread-specific pointer. 4121 unsigned OpFlags = 0; 4122 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4123 (Subtarget.getTargetTriple().isMacOSX() && 4124 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 4125 !G->getGlobal()->isStrongDefinitionForLinker()) || 4126 (Subtarget.isTargetELF() && !isPPC64 && 4127 !G->getGlobal()->hasLocalLinkage() && 4128 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4129 // PC-relative references to external symbols should go through $stub, 4130 // unless we're building with the leopard linker or later, which 4131 // automatically synthesizes these stubs. 4132 OpFlags = PPCII::MO_PLT_OR_STUB; 4133 } 4134 4135 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4136 // every direct call is) turn it into a TargetGlobalAddress / 4137 // TargetExternalSymbol node so that legalize doesn't hack it. 4138 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4139 Callee.getValueType(), 0, OpFlags); 4140 needIndirectCall = false; 4141 } 4142 4143 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4144 unsigned char OpFlags = 0; 4145 4146 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4147 (Subtarget.getTargetTriple().isMacOSX() && 4148 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) || 4149 (Subtarget.isTargetELF() && !isPPC64 && 4150 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4151 // PC-relative references to external symbols should go through $stub, 4152 // unless we're building with the leopard linker or later, which 4153 // automatically synthesizes these stubs. 4154 OpFlags = PPCII::MO_PLT_OR_STUB; 4155 } 4156 4157 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4158 OpFlags); 4159 needIndirectCall = false; 4160 } 4161 4162 if (IsPatchPoint) { 4163 // We'll form an invalid direct call when lowering a patchpoint; the full 4164 // sequence for an indirect call is complicated, and many of the 4165 // instructions introduced might have side effects (and, thus, can't be 4166 // removed later). The call itself will be removed as soon as the 4167 // argument/return lowering is complete, so the fact that it has the wrong 4168 // kind of operands should not really matter. 4169 needIndirectCall = false; 4170 } 4171 4172 if (needIndirectCall) { 4173 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4174 // to do the call, we can't use PPCISD::CALL. 4175 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4176 4177 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4178 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4179 // entry point, but to the function descriptor (the function entry point 4180 // address is part of the function descriptor though). 4181 // The function descriptor is a three doubleword structure with the 4182 // following fields: function entry point, TOC base address and 4183 // environment pointer. 4184 // Thus for a call through a function pointer, the following actions need 4185 // to be performed: 4186 // 1. Save the TOC of the caller in the TOC save area of its stack 4187 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4188 // 2. Load the address of the function entry point from the function 4189 // descriptor. 4190 // 3. Load the TOC of the callee from the function descriptor into r2. 4191 // 4. Load the environment pointer from the function descriptor into 4192 // r11. 4193 // 5. Branch to the function entry point address. 4194 // 6. On return of the callee, the TOC of the caller needs to be 4195 // restored (this is done in FinishCall()). 4196 // 4197 // The loads are scheduled at the beginning of the call sequence, and the 4198 // register copies are flagged together to ensure that no other 4199 // operations can be scheduled in between. E.g. without flagging the 4200 // copies together, a TOC access in the caller could be scheduled between 4201 // the assignment of the callee TOC and the branch to the callee, which 4202 // results in the TOC access going through the TOC of the callee instead 4203 // of going through the TOC of the caller, which leads to incorrect code. 4204 4205 // Load the address of the function entry point from the function 4206 // descriptor. 4207 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4208 if (LDChain.getValueType() == MVT::Glue) 4209 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4210 4211 bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors(); 4212 4213 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4214 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4215 false, false, LoadsInv, 8); 4216 4217 // Load environment pointer into r11. 4218 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4219 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4220 SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, 4221 MPI.getWithOffset(16), false, false, 4222 LoadsInv, 8); 4223 4224 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4225 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4226 SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, 4227 MPI.getWithOffset(8), false, false, 4228 LoadsInv, 8); 4229 4230 setUsesTOCBasePtr(DAG); 4231 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4232 InFlag); 4233 Chain = TOCVal.getValue(0); 4234 InFlag = TOCVal.getValue(1); 4235 4236 // If the function call has an explicit 'nest' parameter, it takes the 4237 // place of the environment pointer. 4238 if (!hasNest) { 4239 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4240 InFlag); 4241 4242 Chain = EnvVal.getValue(0); 4243 InFlag = EnvVal.getValue(1); 4244 } 4245 4246 MTCTROps[0] = Chain; 4247 MTCTROps[1] = LoadFuncPtr; 4248 MTCTROps[2] = InFlag; 4249 } 4250 4251 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4252 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4253 InFlag = Chain.getValue(1); 4254 4255 NodeTys.clear(); 4256 NodeTys.push_back(MVT::Other); 4257 NodeTys.push_back(MVT::Glue); 4258 Ops.push_back(Chain); 4259 CallOpc = PPCISD::BCTRL; 4260 Callee.setNode(nullptr); 4261 // Add use of X11 (holding environment pointer) 4262 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4263 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4264 // Add CTR register as callee so a bctr can be emitted later. 4265 if (isTailCall) 4266 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4267 } 4268 4269 // If this is a direct call, pass the chain and the callee. 4270 if (Callee.getNode()) { 4271 Ops.push_back(Chain); 4272 Ops.push_back(Callee); 4273 } 4274 // If this is a tail call add stack pointer delta. 4275 if (isTailCall) 4276 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4277 4278 // Add argument registers to the end of the list so that they are known live 4279 // into the call. 4280 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4281 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4282 RegsToPass[i].second.getValueType())); 4283 4284 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4285 // into the call. 4286 if (isSVR4ABI && isPPC64 && !IsPatchPoint) { 4287 setUsesTOCBasePtr(DAG); 4288 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4289 } 4290 4291 return CallOpc; 4292 } 4293 4294 static 4295 bool isLocalCall(const SDValue &Callee) 4296 { 4297 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4298 return G->getGlobal()->isStrongDefinitionForLinker(); 4299 return false; 4300 } 4301 4302 SDValue 4303 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 4304 CallingConv::ID CallConv, bool isVarArg, 4305 const SmallVectorImpl<ISD::InputArg> &Ins, 4306 SDLoc dl, SelectionDAG &DAG, 4307 SmallVectorImpl<SDValue> &InVals) const { 4308 4309 SmallVector<CCValAssign, 16> RVLocs; 4310 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4311 *DAG.getContext()); 4312 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4313 4314 // Copy all of the result registers out of their specified physreg. 4315 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4316 CCValAssign &VA = RVLocs[i]; 4317 assert(VA.isRegLoc() && "Can only return in registers!"); 4318 4319 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4320 VA.getLocReg(), VA.getLocVT(), InFlag); 4321 Chain = Val.getValue(1); 4322 InFlag = Val.getValue(2); 4323 4324 switch (VA.getLocInfo()) { 4325 default: llvm_unreachable("Unknown loc info!"); 4326 case CCValAssign::Full: break; 4327 case CCValAssign::AExt: 4328 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4329 break; 4330 case CCValAssign::ZExt: 4331 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4332 DAG.getValueType(VA.getValVT())); 4333 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4334 break; 4335 case CCValAssign::SExt: 4336 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4337 DAG.getValueType(VA.getValVT())); 4338 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4339 break; 4340 } 4341 4342 InVals.push_back(Val); 4343 } 4344 4345 return Chain; 4346 } 4347 4348 SDValue 4349 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 4350 bool isTailCall, bool isVarArg, bool IsPatchPoint, 4351 bool hasNest, SelectionDAG &DAG, 4352 SmallVector<std::pair<unsigned, SDValue>, 8> 4353 &RegsToPass, 4354 SDValue InFlag, SDValue Chain, 4355 SDValue CallSeqStart, SDValue &Callee, 4356 int SPDiff, unsigned NumBytes, 4357 const SmallVectorImpl<ISD::InputArg> &Ins, 4358 SmallVectorImpl<SDValue> &InVals, 4359 ImmutableCallSite *CS) const { 4360 4361 std::vector<EVT> NodeTys; 4362 SmallVector<SDValue, 8> Ops; 4363 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4364 SPDiff, isTailCall, IsPatchPoint, hasNest, 4365 RegsToPass, Ops, NodeTys, CS, Subtarget); 4366 4367 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4368 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4369 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4370 4371 // When performing tail call optimization the callee pops its arguments off 4372 // the stack. Account for this here so these bytes can be pushed back on in 4373 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4374 int BytesCalleePops = 4375 (CallConv == CallingConv::Fast && 4376 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4377 4378 // Add a register mask operand representing the call-preserved registers. 4379 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4380 const uint32_t *Mask = 4381 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4382 assert(Mask && "Missing call preserved mask for calling convention"); 4383 Ops.push_back(DAG.getRegisterMask(Mask)); 4384 4385 if (InFlag.getNode()) 4386 Ops.push_back(InFlag); 4387 4388 // Emit tail call. 4389 if (isTailCall) { 4390 assert(((Callee.getOpcode() == ISD::Register && 4391 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4392 Callee.getOpcode() == ISD::TargetExternalSymbol || 4393 Callee.getOpcode() == ISD::TargetGlobalAddress || 4394 isa<ConstantSDNode>(Callee)) && 4395 "Expecting an global address, external symbol, absolute value or register"); 4396 4397 DAG.getMachineFunction().getFrameInfo()->setHasTailCall(); 4398 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4399 } 4400 4401 // Add a NOP immediately after the branch instruction when using the 64-bit 4402 // SVR4 ABI. At link time, if caller and callee are in a different module and 4403 // thus have a different TOC, the call will be replaced with a call to a stub 4404 // function which saves the current TOC, loads the TOC of the callee and 4405 // branches to the callee. The NOP will be replaced with a load instruction 4406 // which restores the TOC of the caller from the TOC save slot of the current 4407 // stack frame. If caller and callee belong to the same module (and have the 4408 // same TOC), the NOP will remain unchanged. 4409 4410 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4411 !IsPatchPoint) { 4412 if (CallOpc == PPCISD::BCTRL) { 4413 // This is a call through a function pointer. 4414 // Restore the caller TOC from the save area into R2. 4415 // See PrepareCall() for more information about calls through function 4416 // pointers in the 64-bit SVR4 ABI. 4417 // We are using a target-specific load with r2 hard coded, because the 4418 // result of a target-independent load would never go directly into r2, 4419 // since r2 is a reserved register (which prevents the register allocator 4420 // from allocating it), resulting in an additional register being 4421 // allocated and an unnecessary move instruction being generated. 4422 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4423 4424 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4425 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4426 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4427 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4428 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4429 4430 // The address needs to go after the chain input but before the flag (or 4431 // any other variadic arguments). 4432 Ops.insert(std::next(Ops.begin()), AddTOC); 4433 } else if ((CallOpc == PPCISD::CALL) && 4434 (!isLocalCall(Callee) || 4435 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) 4436 // Otherwise insert NOP for non-local calls. 4437 CallOpc = PPCISD::CALL_NOP; 4438 } 4439 4440 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4441 InFlag = Chain.getValue(1); 4442 4443 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4444 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4445 InFlag, dl); 4446 if (!Ins.empty()) 4447 InFlag = Chain.getValue(1); 4448 4449 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4450 Ins, dl, DAG, InVals); 4451 } 4452 4453 SDValue 4454 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4455 SmallVectorImpl<SDValue> &InVals) const { 4456 SelectionDAG &DAG = CLI.DAG; 4457 SDLoc &dl = CLI.DL; 4458 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4459 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4460 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4461 SDValue Chain = CLI.Chain; 4462 SDValue Callee = CLI.Callee; 4463 bool &isTailCall = CLI.IsTailCall; 4464 CallingConv::ID CallConv = CLI.CallConv; 4465 bool isVarArg = CLI.IsVarArg; 4466 bool IsPatchPoint = CLI.IsPatchPoint; 4467 ImmutableCallSite *CS = CLI.CS; 4468 4469 if (isTailCall) 4470 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4471 Ins, DAG); 4472 4473 if (!isTailCall && CS && CS->isMustTailCall()) 4474 report_fatal_error("failed to perform tail call elimination on a call " 4475 "site marked musttail"); 4476 4477 if (Subtarget.isSVR4ABI()) { 4478 if (Subtarget.isPPC64()) 4479 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4480 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4481 dl, DAG, InVals, CS); 4482 else 4483 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4484 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4485 dl, DAG, InVals, CS); 4486 } 4487 4488 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4489 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4490 dl, DAG, InVals, CS); 4491 } 4492 4493 SDValue 4494 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 4495 CallingConv::ID CallConv, bool isVarArg, 4496 bool isTailCall, bool IsPatchPoint, 4497 const SmallVectorImpl<ISD::OutputArg> &Outs, 4498 const SmallVectorImpl<SDValue> &OutVals, 4499 const SmallVectorImpl<ISD::InputArg> &Ins, 4500 SDLoc dl, SelectionDAG &DAG, 4501 SmallVectorImpl<SDValue> &InVals, 4502 ImmutableCallSite *CS) const { 4503 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4504 // of the 32-bit SVR4 ABI stack frame layout. 4505 4506 assert((CallConv == CallingConv::C || 4507 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4508 4509 unsigned PtrByteSize = 4; 4510 4511 MachineFunction &MF = DAG.getMachineFunction(); 4512 4513 // Mark this function as potentially containing a function that contains a 4514 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4515 // and restoring the callers stack pointer in this functions epilog. This is 4516 // done because by tail calling the called function might overwrite the value 4517 // in this function's (MF) stack pointer stack slot 0(SP). 4518 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4519 CallConv == CallingConv::Fast) 4520 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4521 4522 // Count how many bytes are to be pushed on the stack, including the linkage 4523 // area, parameter list area and the part of the local variable space which 4524 // contains copies of aggregates which are passed by value. 4525 4526 // Assign locations to all of the outgoing arguments. 4527 SmallVector<CCValAssign, 16> ArgLocs; 4528 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4529 *DAG.getContext()); 4530 4531 // Reserve space for the linkage area on the stack. 4532 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4533 PtrByteSize); 4534 4535 if (isVarArg) { 4536 // Handle fixed and variable vector arguments differently. 4537 // Fixed vector arguments go into registers as long as registers are 4538 // available. Variable vector arguments always go into memory. 4539 unsigned NumArgs = Outs.size(); 4540 4541 for (unsigned i = 0; i != NumArgs; ++i) { 4542 MVT ArgVT = Outs[i].VT; 4543 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4544 bool Result; 4545 4546 if (Outs[i].IsFixed) { 4547 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4548 CCInfo); 4549 } else { 4550 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4551 ArgFlags, CCInfo); 4552 } 4553 4554 if (Result) { 4555 #ifndef NDEBUG 4556 errs() << "Call operand #" << i << " has unhandled type " 4557 << EVT(ArgVT).getEVTString() << "\n"; 4558 #endif 4559 llvm_unreachable(nullptr); 4560 } 4561 } 4562 } else { 4563 // All arguments are treated the same. 4564 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4565 } 4566 4567 // Assign locations to all of the outgoing aggregate by value arguments. 4568 SmallVector<CCValAssign, 16> ByValArgLocs; 4569 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4570 ByValArgLocs, *DAG.getContext()); 4571 4572 // Reserve stack space for the allocations in CCInfo. 4573 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4574 4575 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4576 4577 // Size of the linkage area, parameter list area and the part of the local 4578 // space variable where copies of aggregates which are passed by value are 4579 // stored. 4580 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4581 4582 // Calculate by how many bytes the stack has to be adjusted in case of tail 4583 // call optimization. 4584 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4585 4586 // Adjust the stack pointer for the new arguments... 4587 // These operations are automatically eliminated by the prolog/epilog pass 4588 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4589 dl); 4590 SDValue CallSeqStart = Chain; 4591 4592 // Load the return address and frame pointer so it can be moved somewhere else 4593 // later. 4594 SDValue LROp, FPOp; 4595 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 4596 dl); 4597 4598 // Set up a copy of the stack pointer for use loading and storing any 4599 // arguments that may not fit in the registers available for argument 4600 // passing. 4601 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4602 4603 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4604 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4605 SmallVector<SDValue, 8> MemOpChains; 4606 4607 bool seenFloatArg = false; 4608 // Walk the register/memloc assignments, inserting copies/loads. 4609 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4610 i != e; 4611 ++i) { 4612 CCValAssign &VA = ArgLocs[i]; 4613 SDValue Arg = OutVals[i]; 4614 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4615 4616 if (Flags.isByVal()) { 4617 // Argument is an aggregate which is passed by value, thus we need to 4618 // create a copy of it in the local variable space of the current stack 4619 // frame (which is the stack frame of the caller) and pass the address of 4620 // this copy to the callee. 4621 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4622 CCValAssign &ByValVA = ByValArgLocs[j++]; 4623 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4624 4625 // Memory reserved in the local variable space of the callers stack frame. 4626 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4627 4628 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4629 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4630 StackPtr, PtrOff); 4631 4632 // Create a copy of the argument in the local area of the current 4633 // stack frame. 4634 SDValue MemcpyCall = 4635 CreateCopyOfByValArgument(Arg, PtrOff, 4636 CallSeqStart.getNode()->getOperand(0), 4637 Flags, DAG, dl); 4638 4639 // This must go outside the CALLSEQ_START..END. 4640 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4641 CallSeqStart.getNode()->getOperand(1), 4642 SDLoc(MemcpyCall)); 4643 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4644 NewCallSeqStart.getNode()); 4645 Chain = CallSeqStart = NewCallSeqStart; 4646 4647 // Pass the address of the aggregate copy on the stack either in a 4648 // physical register or in the parameter list area of the current stack 4649 // frame to the callee. 4650 Arg = PtrOff; 4651 } 4652 4653 if (VA.isRegLoc()) { 4654 if (Arg.getValueType() == MVT::i1) 4655 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4656 4657 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4658 // Put argument in a physical register. 4659 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4660 } else { 4661 // Put argument in the parameter list area of the current stack frame. 4662 assert(VA.isMemLoc()); 4663 unsigned LocMemOffset = VA.getLocMemOffset(); 4664 4665 if (!isTailCall) { 4666 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4667 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4668 StackPtr, PtrOff); 4669 4670 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4671 MachinePointerInfo(), 4672 false, false, 0)); 4673 } else { 4674 // Calculate and remember argument location. 4675 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4676 TailCallArguments); 4677 } 4678 } 4679 } 4680 4681 if (!MemOpChains.empty()) 4682 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4683 4684 // Build a sequence of copy-to-reg nodes chained together with token chain 4685 // and flag operands which copy the outgoing args into the appropriate regs. 4686 SDValue InFlag; 4687 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4688 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4689 RegsToPass[i].second, InFlag); 4690 InFlag = Chain.getValue(1); 4691 } 4692 4693 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4694 // registers. 4695 if (isVarArg) { 4696 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4697 SDValue Ops[] = { Chain, InFlag }; 4698 4699 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4700 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4701 4702 InFlag = Chain.getValue(1); 4703 } 4704 4705 if (isTailCall) 4706 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 4707 false, TailCallArguments); 4708 4709 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 4710 /* unused except on PPC64 ELFv1 */ false, DAG, 4711 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 4712 NumBytes, Ins, InVals, CS); 4713 } 4714 4715 // Copy an argument into memory, being careful to do this outside the 4716 // call sequence for the call to which the argument belongs. 4717 SDValue 4718 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 4719 SDValue CallSeqStart, 4720 ISD::ArgFlagsTy Flags, 4721 SelectionDAG &DAG, 4722 SDLoc dl) const { 4723 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4724 CallSeqStart.getNode()->getOperand(0), 4725 Flags, DAG, dl); 4726 // The MEMCPY must go outside the CALLSEQ_START..END. 4727 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4728 CallSeqStart.getNode()->getOperand(1), 4729 SDLoc(MemcpyCall)); 4730 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4731 NewCallSeqStart.getNode()); 4732 return NewCallSeqStart; 4733 } 4734 4735 SDValue 4736 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 4737 CallingConv::ID CallConv, bool isVarArg, 4738 bool isTailCall, bool IsPatchPoint, 4739 const SmallVectorImpl<ISD::OutputArg> &Outs, 4740 const SmallVectorImpl<SDValue> &OutVals, 4741 const SmallVectorImpl<ISD::InputArg> &Ins, 4742 SDLoc dl, SelectionDAG &DAG, 4743 SmallVectorImpl<SDValue> &InVals, 4744 ImmutableCallSite *CS) const { 4745 4746 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4747 bool isLittleEndian = Subtarget.isLittleEndian(); 4748 unsigned NumOps = Outs.size(); 4749 bool hasNest = false; 4750 4751 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4752 unsigned PtrByteSize = 8; 4753 4754 MachineFunction &MF = DAG.getMachineFunction(); 4755 4756 // Mark this function as potentially containing a function that contains a 4757 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4758 // and restoring the callers stack pointer in this functions epilog. This is 4759 // done because by tail calling the called function might overwrite the value 4760 // in this function's (MF) stack pointer stack slot 0(SP). 4761 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4762 CallConv == CallingConv::Fast) 4763 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4764 4765 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4766 "fastcc not supported on varargs functions"); 4767 4768 // Count how many bytes are to be pushed on the stack, including the linkage 4769 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 4770 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 4771 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 4772 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4773 unsigned NumBytes = LinkageSize; 4774 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4775 unsigned &QFPR_idx = FPR_idx; 4776 4777 static const MCPhysReg GPR[] = { 4778 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4779 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4780 }; 4781 static const MCPhysReg VR[] = { 4782 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4783 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4784 }; 4785 static const MCPhysReg VSRH[] = { 4786 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 4787 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 4788 }; 4789 4790 const unsigned NumGPRs = array_lengthof(GPR); 4791 const unsigned NumFPRs = 13; 4792 const unsigned NumVRs = array_lengthof(VR); 4793 const unsigned NumQFPRs = NumFPRs; 4794 4795 // When using the fast calling convention, we don't provide backing for 4796 // arguments that will be in registers. 4797 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 4798 4799 // Add up all the space actually used. 4800 for (unsigned i = 0; i != NumOps; ++i) { 4801 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4802 EVT ArgVT = Outs[i].VT; 4803 EVT OrigVT = Outs[i].ArgVT; 4804 4805 if (Flags.isNest()) 4806 continue; 4807 4808 if (CallConv == CallingConv::Fast) { 4809 if (Flags.isByVal()) 4810 NumGPRsUsed += (Flags.getByValSize()+7)/8; 4811 else 4812 switch (ArgVT.getSimpleVT().SimpleTy) { 4813 default: llvm_unreachable("Unexpected ValueType for argument!"); 4814 case MVT::i1: 4815 case MVT::i32: 4816 case MVT::i64: 4817 if (++NumGPRsUsed <= NumGPRs) 4818 continue; 4819 break; 4820 case MVT::v4i32: 4821 case MVT::v8i16: 4822 case MVT::v16i8: 4823 case MVT::v2f64: 4824 case MVT::v2i64: 4825 case MVT::v1i128: 4826 if (++NumVRsUsed <= NumVRs) 4827 continue; 4828 break; 4829 case MVT::v4f32: 4830 // When using QPX, this is handled like a FP register, otherwise, it 4831 // is an Altivec register. 4832 if (Subtarget.hasQPX()) { 4833 if (++NumFPRsUsed <= NumFPRs) 4834 continue; 4835 } else { 4836 if (++NumVRsUsed <= NumVRs) 4837 continue; 4838 } 4839 break; 4840 case MVT::f32: 4841 case MVT::f64: 4842 case MVT::v4f64: // QPX 4843 case MVT::v4i1: // QPX 4844 if (++NumFPRsUsed <= NumFPRs) 4845 continue; 4846 break; 4847 } 4848 } 4849 4850 /* Respect alignment of argument on the stack. */ 4851 unsigned Align = 4852 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4853 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 4854 4855 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 4856 if (Flags.isInConsecutiveRegsLast()) 4857 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4858 } 4859 4860 unsigned NumBytesActuallyUsed = NumBytes; 4861 4862 // The prolog code of the callee may store up to 8 GPR argument registers to 4863 // the stack, allowing va_start to index over them in memory if its varargs. 4864 // Because we cannot tell if this is needed on the caller side, we have to 4865 // conservatively assume that it is needed. As such, make sure we have at 4866 // least enough stack space for the caller to store the 8 GPRs. 4867 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 4868 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 4869 4870 // Tail call needs the stack to be aligned. 4871 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4872 CallConv == CallingConv::Fast) 4873 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 4874 4875 // Calculate by how many bytes the stack has to be adjusted in case of tail 4876 // call optimization. 4877 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4878 4879 // To protect arguments on the stack from being clobbered in a tail call, 4880 // force all the loads to happen before doing any other lowering. 4881 if (isTailCall) 4882 Chain = DAG.getStackArgumentTokenFactor(Chain); 4883 4884 // Adjust the stack pointer for the new arguments... 4885 // These operations are automatically eliminated by the prolog/epilog pass 4886 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4887 dl); 4888 SDValue CallSeqStart = Chain; 4889 4890 // Load the return address and frame pointer so it can be move somewhere else 4891 // later. 4892 SDValue LROp, FPOp; 4893 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4894 dl); 4895 4896 // Set up a copy of the stack pointer for use loading and storing any 4897 // arguments that may not fit in the registers available for argument 4898 // passing. 4899 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4900 4901 // Figure out which arguments are going to go in registers, and which in 4902 // memory. Also, if this is a vararg function, floating point operations 4903 // must be stored to our stack, and loaded into integer regs as well, if 4904 // any integer regs are available for argument passing. 4905 unsigned ArgOffset = LinkageSize; 4906 4907 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4908 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4909 4910 SmallVector<SDValue, 8> MemOpChains; 4911 for (unsigned i = 0; i != NumOps; ++i) { 4912 SDValue Arg = OutVals[i]; 4913 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4914 EVT ArgVT = Outs[i].VT; 4915 EVT OrigVT = Outs[i].ArgVT; 4916 4917 // PtrOff will be used to store the current argument to the stack if a 4918 // register cannot be found for it. 4919 SDValue PtrOff; 4920 4921 // We re-align the argument offset for each argument, except when using the 4922 // fast calling convention, when we need to make sure we do that only when 4923 // we'll actually use a stack slot. 4924 auto ComputePtrOff = [&]() { 4925 /* Respect alignment of argument on the stack. */ 4926 unsigned Align = 4927 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4928 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 4929 4930 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 4931 4932 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4933 }; 4934 4935 if (CallConv != CallingConv::Fast) { 4936 ComputePtrOff(); 4937 4938 /* Compute GPR index associated with argument offset. */ 4939 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4940 GPR_idx = std::min(GPR_idx, NumGPRs); 4941 } 4942 4943 // Promote integers to 64-bit values. 4944 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 4945 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4946 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4947 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4948 } 4949 4950 // FIXME memcpy is used way more than necessary. Correctness first. 4951 // Note: "by value" is code for passing a structure by value, not 4952 // basic types. 4953 if (Flags.isByVal()) { 4954 // Note: Size includes alignment padding, so 4955 // struct x { short a; char b; } 4956 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 4957 // These are the proper values we need for right-justifying the 4958 // aggregate in a parameter register. 4959 unsigned Size = Flags.getByValSize(); 4960 4961 // An empty aggregate parameter takes up no storage and no 4962 // registers. 4963 if (Size == 0) 4964 continue; 4965 4966 if (CallConv == CallingConv::Fast) 4967 ComputePtrOff(); 4968 4969 // All aggregates smaller than 8 bytes must be passed right-justified. 4970 if (Size==1 || Size==2 || Size==4) { 4971 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 4972 if (GPR_idx != NumGPRs) { 4973 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4974 MachinePointerInfo(), VT, 4975 false, false, false, 0); 4976 MemOpChains.push_back(Load.getValue(1)); 4977 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4978 4979 ArgOffset += PtrByteSize; 4980 continue; 4981 } 4982 } 4983 4984 if (GPR_idx == NumGPRs && Size < 8) { 4985 SDValue AddPtr = PtrOff; 4986 if (!isLittleEndian) { 4987 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 4988 PtrOff.getValueType()); 4989 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4990 } 4991 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4992 CallSeqStart, 4993 Flags, DAG, dl); 4994 ArgOffset += PtrByteSize; 4995 continue; 4996 } 4997 // Copy entire object into memory. There are cases where gcc-generated 4998 // code assumes it is there, even if it could be put entirely into 4999 // registers. (This is not what the doc says.) 5000 5001 // FIXME: The above statement is likely due to a misunderstanding of the 5002 // documents. All arguments must be copied into the parameter area BY 5003 // THE CALLEE in the event that the callee takes the address of any 5004 // formal argument. That has not yet been implemented. However, it is 5005 // reasonable to use the stack area as a staging area for the register 5006 // load. 5007 5008 // Skip this for small aggregates, as we will use the same slot for a 5009 // right-justified copy, below. 5010 if (Size >= 8) 5011 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5012 CallSeqStart, 5013 Flags, DAG, dl); 5014 5015 // When a register is available, pass a small aggregate right-justified. 5016 if (Size < 8 && GPR_idx != NumGPRs) { 5017 // The easiest way to get this right-justified in a register 5018 // is to copy the structure into the rightmost portion of a 5019 // local variable slot, then load the whole slot into the 5020 // register. 5021 // FIXME: The memcpy seems to produce pretty awful code for 5022 // small aggregates, particularly for packed ones. 5023 // FIXME: It would be preferable to use the slot in the 5024 // parameter save area instead of a new local variable. 5025 SDValue AddPtr = PtrOff; 5026 if (!isLittleEndian) { 5027 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5028 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5029 } 5030 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5031 CallSeqStart, 5032 Flags, DAG, dl); 5033 5034 // Load the slot into the register. 5035 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 5036 MachinePointerInfo(), 5037 false, false, false, 0); 5038 MemOpChains.push_back(Load.getValue(1)); 5039 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5040 5041 // Done with this argument. 5042 ArgOffset += PtrByteSize; 5043 continue; 5044 } 5045 5046 // For aggregates larger than PtrByteSize, copy the pieces of the 5047 // object that fit into registers from the parameter save area. 5048 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5049 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5050 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5051 if (GPR_idx != NumGPRs) { 5052 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5053 MachinePointerInfo(), 5054 false, false, false, 0); 5055 MemOpChains.push_back(Load.getValue(1)); 5056 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5057 ArgOffset += PtrByteSize; 5058 } else { 5059 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5060 break; 5061 } 5062 } 5063 continue; 5064 } 5065 5066 switch (Arg.getSimpleValueType().SimpleTy) { 5067 default: llvm_unreachable("Unexpected ValueType for argument!"); 5068 case MVT::i1: 5069 case MVT::i32: 5070 case MVT::i64: 5071 if (Flags.isNest()) { 5072 // The 'nest' parameter, if any, is passed in R11. 5073 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5074 hasNest = true; 5075 break; 5076 } 5077 5078 // These can be scalar arguments or elements of an integer array type 5079 // passed directly. Clang may use those instead of "byval" aggregate 5080 // types to avoid forcing arguments to memory unnecessarily. 5081 if (GPR_idx != NumGPRs) { 5082 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5083 } else { 5084 if (CallConv == CallingConv::Fast) 5085 ComputePtrOff(); 5086 5087 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5088 true, isTailCall, false, MemOpChains, 5089 TailCallArguments, dl); 5090 if (CallConv == CallingConv::Fast) 5091 ArgOffset += PtrByteSize; 5092 } 5093 if (CallConv != CallingConv::Fast) 5094 ArgOffset += PtrByteSize; 5095 break; 5096 case MVT::f32: 5097 case MVT::f64: { 5098 // These can be scalar arguments or elements of a float array type 5099 // passed directly. The latter are used to implement ELFv2 homogenous 5100 // float aggregates. 5101 5102 // Named arguments go into FPRs first, and once they overflow, the 5103 // remaining arguments go into GPRs and then the parameter save area. 5104 // Unnamed arguments for vararg functions always go to GPRs and 5105 // then the parameter save area. For now, put all arguments to vararg 5106 // routines always in both locations (FPR *and* GPR or stack slot). 5107 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5108 bool NeededLoad = false; 5109 5110 // First load the argument into the next available FPR. 5111 if (FPR_idx != NumFPRs) 5112 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5113 5114 // Next, load the argument into GPR or stack slot if needed. 5115 if (!NeedGPROrStack) 5116 ; 5117 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5118 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5119 // once we support fp <-> gpr moves. 5120 5121 // In the non-vararg case, this can only ever happen in the 5122 // presence of f32 array types, since otherwise we never run 5123 // out of FPRs before running out of GPRs. 5124 SDValue ArgVal; 5125 5126 // Double values are always passed in a single GPR. 5127 if (Arg.getValueType() != MVT::f32) { 5128 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5129 5130 // Non-array float values are extended and passed in a GPR. 5131 } else if (!Flags.isInConsecutiveRegs()) { 5132 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5133 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5134 5135 // If we have an array of floats, we collect every odd element 5136 // together with its predecessor into one GPR. 5137 } else if (ArgOffset % PtrByteSize != 0) { 5138 SDValue Lo, Hi; 5139 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5140 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5141 if (!isLittleEndian) 5142 std::swap(Lo, Hi); 5143 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5144 5145 // The final element, if even, goes into the first half of a GPR. 5146 } else if (Flags.isInConsecutiveRegsLast()) { 5147 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5148 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5149 if (!isLittleEndian) 5150 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5151 DAG.getConstant(32, dl, MVT::i32)); 5152 5153 // Non-final even elements are skipped; they will be handled 5154 // together the with subsequent argument on the next go-around. 5155 } else 5156 ArgVal = SDValue(); 5157 5158 if (ArgVal.getNode()) 5159 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5160 } else { 5161 if (CallConv == CallingConv::Fast) 5162 ComputePtrOff(); 5163 5164 // Single-precision floating-point values are mapped to the 5165 // second (rightmost) word of the stack doubleword. 5166 if (Arg.getValueType() == MVT::f32 && 5167 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5168 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5169 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5170 } 5171 5172 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5173 true, isTailCall, false, MemOpChains, 5174 TailCallArguments, dl); 5175 5176 NeededLoad = true; 5177 } 5178 // When passing an array of floats, the array occupies consecutive 5179 // space in the argument area; only round up to the next doubleword 5180 // at the end of the array. Otherwise, each float takes 8 bytes. 5181 if (CallConv != CallingConv::Fast || NeededLoad) { 5182 ArgOffset += (Arg.getValueType() == MVT::f32 && 5183 Flags.isInConsecutiveRegs()) ? 4 : 8; 5184 if (Flags.isInConsecutiveRegsLast()) 5185 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5186 } 5187 break; 5188 } 5189 case MVT::v4f32: 5190 case MVT::v4i32: 5191 case MVT::v8i16: 5192 case MVT::v16i8: 5193 case MVT::v2f64: 5194 case MVT::v2i64: 5195 case MVT::v1i128: 5196 if (!Subtarget.hasQPX()) { 5197 // These can be scalar arguments or elements of a vector array type 5198 // passed directly. The latter are used to implement ELFv2 homogenous 5199 // vector aggregates. 5200 5201 // For a varargs call, named arguments go into VRs or on the stack as 5202 // usual; unnamed arguments always go to the stack or the corresponding 5203 // GPRs when within range. For now, we always put the value in both 5204 // locations (or even all three). 5205 if (isVarArg) { 5206 // We could elide this store in the case where the object fits 5207 // entirely in R registers. Maybe later. 5208 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5209 MachinePointerInfo(), false, false, 0); 5210 MemOpChains.push_back(Store); 5211 if (VR_idx != NumVRs) { 5212 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5213 MachinePointerInfo(), 5214 false, false, false, 0); 5215 MemOpChains.push_back(Load.getValue(1)); 5216 5217 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5218 Arg.getSimpleValueType() == MVT::v2i64) ? 5219 VSRH[VR_idx] : VR[VR_idx]; 5220 ++VR_idx; 5221 5222 RegsToPass.push_back(std::make_pair(VReg, Load)); 5223 } 5224 ArgOffset += 16; 5225 for (unsigned i=0; i<16; i+=PtrByteSize) { 5226 if (GPR_idx == NumGPRs) 5227 break; 5228 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5229 DAG.getConstant(i, dl, PtrVT)); 5230 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5231 false, false, false, 0); 5232 MemOpChains.push_back(Load.getValue(1)); 5233 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5234 } 5235 break; 5236 } 5237 5238 // Non-varargs Altivec params go into VRs or on the stack. 5239 if (VR_idx != NumVRs) { 5240 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5241 Arg.getSimpleValueType() == MVT::v2i64) ? 5242 VSRH[VR_idx] : VR[VR_idx]; 5243 ++VR_idx; 5244 5245 RegsToPass.push_back(std::make_pair(VReg, Arg)); 5246 } else { 5247 if (CallConv == CallingConv::Fast) 5248 ComputePtrOff(); 5249 5250 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5251 true, isTailCall, true, MemOpChains, 5252 TailCallArguments, dl); 5253 if (CallConv == CallingConv::Fast) 5254 ArgOffset += 16; 5255 } 5256 5257 if (CallConv != CallingConv::Fast) 5258 ArgOffset += 16; 5259 break; 5260 } // not QPX 5261 5262 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5263 "Invalid QPX parameter type"); 5264 5265 /* fall through */ 5266 case MVT::v4f64: 5267 case MVT::v4i1: { 5268 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5269 if (isVarArg) { 5270 // We could elide this store in the case where the object fits 5271 // entirely in R registers. Maybe later. 5272 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5273 MachinePointerInfo(), false, false, 0); 5274 MemOpChains.push_back(Store); 5275 if (QFPR_idx != NumQFPRs) { 5276 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, 5277 Store, PtrOff, MachinePointerInfo(), 5278 false, false, false, 0); 5279 MemOpChains.push_back(Load.getValue(1)); 5280 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5281 } 5282 ArgOffset += (IsF32 ? 16 : 32); 5283 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5284 if (GPR_idx == NumGPRs) 5285 break; 5286 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5287 DAG.getConstant(i, dl, PtrVT)); 5288 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5289 false, false, false, 0); 5290 MemOpChains.push_back(Load.getValue(1)); 5291 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5292 } 5293 break; 5294 } 5295 5296 // Non-varargs QPX params go into registers or on the stack. 5297 if (QFPR_idx != NumQFPRs) { 5298 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5299 } else { 5300 if (CallConv == CallingConv::Fast) 5301 ComputePtrOff(); 5302 5303 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5304 true, isTailCall, true, MemOpChains, 5305 TailCallArguments, dl); 5306 if (CallConv == CallingConv::Fast) 5307 ArgOffset += (IsF32 ? 16 : 32); 5308 } 5309 5310 if (CallConv != CallingConv::Fast) 5311 ArgOffset += (IsF32 ? 16 : 32); 5312 break; 5313 } 5314 } 5315 } 5316 5317 assert(NumBytesActuallyUsed == ArgOffset); 5318 (void)NumBytesActuallyUsed; 5319 5320 if (!MemOpChains.empty()) 5321 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5322 5323 // Check if this is an indirect call (MTCTR/BCTRL). 5324 // See PrepareCall() for more information about calls through function 5325 // pointers in the 64-bit SVR4 ABI. 5326 if (!isTailCall && !IsPatchPoint && 5327 !isFunctionGlobalAddress(Callee) && 5328 !isa<ExternalSymbolSDNode>(Callee)) { 5329 // Load r2 into a virtual register and store it to the TOC save area. 5330 setUsesTOCBasePtr(DAG); 5331 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5332 // TOC save area offset. 5333 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5334 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5335 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5336 Chain = DAG.getStore( 5337 Val.getValue(1), dl, Val, AddPtr, 5338 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset), 5339 false, false, 0); 5340 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5341 // This does not mean the MTCTR instruction must use R12; it's easier 5342 // to model this as an extra parameter, so do that. 5343 if (isELFv2ABI && !IsPatchPoint) 5344 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5345 } 5346 5347 // Build a sequence of copy-to-reg nodes chained together with token chain 5348 // and flag operands which copy the outgoing args into the appropriate regs. 5349 SDValue InFlag; 5350 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5351 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5352 RegsToPass[i].second, InFlag); 5353 InFlag = Chain.getValue(1); 5354 } 5355 5356 if (isTailCall) 5357 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 5358 FPOp, true, TailCallArguments); 5359 5360 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5361 hasNest, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 5362 Callee, SPDiff, NumBytes, Ins, InVals, CS); 5363 } 5364 5365 SDValue 5366 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 5367 CallingConv::ID CallConv, bool isVarArg, 5368 bool isTailCall, bool IsPatchPoint, 5369 const SmallVectorImpl<ISD::OutputArg> &Outs, 5370 const SmallVectorImpl<SDValue> &OutVals, 5371 const SmallVectorImpl<ISD::InputArg> &Ins, 5372 SDLoc dl, SelectionDAG &DAG, 5373 SmallVectorImpl<SDValue> &InVals, 5374 ImmutableCallSite *CS) const { 5375 5376 unsigned NumOps = Outs.size(); 5377 5378 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5379 bool isPPC64 = PtrVT == MVT::i64; 5380 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5381 5382 MachineFunction &MF = DAG.getMachineFunction(); 5383 5384 // Mark this function as potentially containing a function that contains a 5385 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5386 // and restoring the callers stack pointer in this functions epilog. This is 5387 // done because by tail calling the called function might overwrite the value 5388 // in this function's (MF) stack pointer stack slot 0(SP). 5389 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5390 CallConv == CallingConv::Fast) 5391 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5392 5393 // Count how many bytes are to be pushed on the stack, including the linkage 5394 // area, and parameter passing area. We start with 24/48 bytes, which is 5395 // prereserved space for [SP][CR][LR][3 x unused]. 5396 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5397 unsigned NumBytes = LinkageSize; 5398 5399 // Add up all the space actually used. 5400 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5401 // they all go in registers, but we must reserve stack space for them for 5402 // possible use by the caller. In varargs or 64-bit calls, parameters are 5403 // assigned stack space in order, with padding so Altivec parameters are 5404 // 16-byte aligned. 5405 unsigned nAltivecParamsAtEnd = 0; 5406 for (unsigned i = 0; i != NumOps; ++i) { 5407 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5408 EVT ArgVT = Outs[i].VT; 5409 // Varargs Altivec parameters are padded to a 16 byte boundary. 5410 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5411 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5412 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5413 if (!isVarArg && !isPPC64) { 5414 // Non-varargs Altivec parameters go after all the non-Altivec 5415 // parameters; handle those later so we know how much padding we need. 5416 nAltivecParamsAtEnd++; 5417 continue; 5418 } 5419 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5420 NumBytes = ((NumBytes+15)/16)*16; 5421 } 5422 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5423 } 5424 5425 // Allow for Altivec parameters at the end, if needed. 5426 if (nAltivecParamsAtEnd) { 5427 NumBytes = ((NumBytes+15)/16)*16; 5428 NumBytes += 16*nAltivecParamsAtEnd; 5429 } 5430 5431 // The prolog code of the callee may store up to 8 GPR argument registers to 5432 // the stack, allowing va_start to index over them in memory if its varargs. 5433 // Because we cannot tell if this is needed on the caller side, we have to 5434 // conservatively assume that it is needed. As such, make sure we have at 5435 // least enough stack space for the caller to store the 8 GPRs. 5436 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5437 5438 // Tail call needs the stack to be aligned. 5439 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5440 CallConv == CallingConv::Fast) 5441 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5442 5443 // Calculate by how many bytes the stack has to be adjusted in case of tail 5444 // call optimization. 5445 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5446 5447 // To protect arguments on the stack from being clobbered in a tail call, 5448 // force all the loads to happen before doing any other lowering. 5449 if (isTailCall) 5450 Chain = DAG.getStackArgumentTokenFactor(Chain); 5451 5452 // Adjust the stack pointer for the new arguments... 5453 // These operations are automatically eliminated by the prolog/epilog pass 5454 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5455 dl); 5456 SDValue CallSeqStart = Chain; 5457 5458 // Load the return address and frame pointer so it can be move somewhere else 5459 // later. 5460 SDValue LROp, FPOp; 5461 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5462 dl); 5463 5464 // Set up a copy of the stack pointer for use loading and storing any 5465 // arguments that may not fit in the registers available for argument 5466 // passing. 5467 SDValue StackPtr; 5468 if (isPPC64) 5469 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5470 else 5471 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5472 5473 // Figure out which arguments are going to go in registers, and which in 5474 // memory. Also, if this is a vararg function, floating point operations 5475 // must be stored to our stack, and loaded into integer regs as well, if 5476 // any integer regs are available for argument passing. 5477 unsigned ArgOffset = LinkageSize; 5478 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5479 5480 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5481 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5482 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5483 }; 5484 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5485 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5486 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5487 }; 5488 static const MCPhysReg VR[] = { 5489 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5490 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5491 }; 5492 const unsigned NumGPRs = array_lengthof(GPR_32); 5493 const unsigned NumFPRs = 13; 5494 const unsigned NumVRs = array_lengthof(VR); 5495 5496 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5497 5498 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5499 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5500 5501 SmallVector<SDValue, 8> MemOpChains; 5502 for (unsigned i = 0; i != NumOps; ++i) { 5503 SDValue Arg = OutVals[i]; 5504 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5505 5506 // PtrOff will be used to store the current argument to the stack if a 5507 // register cannot be found for it. 5508 SDValue PtrOff; 5509 5510 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5511 5512 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5513 5514 // On PPC64, promote integers to 64-bit values. 5515 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5516 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5517 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5518 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5519 } 5520 5521 // FIXME memcpy is used way more than necessary. Correctness first. 5522 // Note: "by value" is code for passing a structure by value, not 5523 // basic types. 5524 if (Flags.isByVal()) { 5525 unsigned Size = Flags.getByValSize(); 5526 // Very small objects are passed right-justified. Everything else is 5527 // passed left-justified. 5528 if (Size==1 || Size==2) { 5529 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5530 if (GPR_idx != NumGPRs) { 5531 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5532 MachinePointerInfo(), VT, 5533 false, false, false, 0); 5534 MemOpChains.push_back(Load.getValue(1)); 5535 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5536 5537 ArgOffset += PtrByteSize; 5538 } else { 5539 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5540 PtrOff.getValueType()); 5541 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5542 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5543 CallSeqStart, 5544 Flags, DAG, dl); 5545 ArgOffset += PtrByteSize; 5546 } 5547 continue; 5548 } 5549 // Copy entire object into memory. There are cases where gcc-generated 5550 // code assumes it is there, even if it could be put entirely into 5551 // registers. (This is not what the doc says.) 5552 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5553 CallSeqStart, 5554 Flags, DAG, dl); 5555 5556 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5557 // copy the pieces of the object that fit into registers from the 5558 // parameter save area. 5559 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5560 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5561 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5562 if (GPR_idx != NumGPRs) { 5563 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5564 MachinePointerInfo(), 5565 false, false, false, 0); 5566 MemOpChains.push_back(Load.getValue(1)); 5567 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5568 ArgOffset += PtrByteSize; 5569 } else { 5570 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5571 break; 5572 } 5573 } 5574 continue; 5575 } 5576 5577 switch (Arg.getSimpleValueType().SimpleTy) { 5578 default: llvm_unreachable("Unexpected ValueType for argument!"); 5579 case MVT::i1: 5580 case MVT::i32: 5581 case MVT::i64: 5582 if (GPR_idx != NumGPRs) { 5583 if (Arg.getValueType() == MVT::i1) 5584 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5585 5586 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5587 } else { 5588 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5589 isPPC64, isTailCall, false, MemOpChains, 5590 TailCallArguments, dl); 5591 } 5592 ArgOffset += PtrByteSize; 5593 break; 5594 case MVT::f32: 5595 case MVT::f64: 5596 if (FPR_idx != NumFPRs) { 5597 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5598 5599 if (isVarArg) { 5600 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5601 MachinePointerInfo(), false, false, 0); 5602 MemOpChains.push_back(Store); 5603 5604 // Float varargs are always shadowed in available integer registers 5605 if (GPR_idx != NumGPRs) { 5606 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5607 MachinePointerInfo(), false, false, 5608 false, 0); 5609 MemOpChains.push_back(Load.getValue(1)); 5610 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5611 } 5612 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5613 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5614 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5615 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5616 MachinePointerInfo(), 5617 false, false, false, 0); 5618 MemOpChains.push_back(Load.getValue(1)); 5619 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5620 } 5621 } else { 5622 // If we have any FPRs remaining, we may also have GPRs remaining. 5623 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5624 // GPRs. 5625 if (GPR_idx != NumGPRs) 5626 ++GPR_idx; 5627 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 5628 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 5629 ++GPR_idx; 5630 } 5631 } else 5632 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5633 isPPC64, isTailCall, false, MemOpChains, 5634 TailCallArguments, dl); 5635 if (isPPC64) 5636 ArgOffset += 8; 5637 else 5638 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 5639 break; 5640 case MVT::v4f32: 5641 case MVT::v4i32: 5642 case MVT::v8i16: 5643 case MVT::v16i8: 5644 if (isVarArg) { 5645 // These go aligned on the stack, or in the corresponding R registers 5646 // when within range. The Darwin PPC ABI doc claims they also go in 5647 // V registers; in fact gcc does this only for arguments that are 5648 // prototyped, not for those that match the ... We do it for all 5649 // arguments, seems to work. 5650 while (ArgOffset % 16 !=0) { 5651 ArgOffset += PtrByteSize; 5652 if (GPR_idx != NumGPRs) 5653 GPR_idx++; 5654 } 5655 // We could elide this store in the case where the object fits 5656 // entirely in R registers. Maybe later. 5657 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5658 DAG.getConstant(ArgOffset, dl, PtrVT)); 5659 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5660 MachinePointerInfo(), false, false, 0); 5661 MemOpChains.push_back(Store); 5662 if (VR_idx != NumVRs) { 5663 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5664 MachinePointerInfo(), 5665 false, false, false, 0); 5666 MemOpChains.push_back(Load.getValue(1)); 5667 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5668 } 5669 ArgOffset += 16; 5670 for (unsigned i=0; i<16; i+=PtrByteSize) { 5671 if (GPR_idx == NumGPRs) 5672 break; 5673 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5674 DAG.getConstant(i, dl, PtrVT)); 5675 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5676 false, false, false, 0); 5677 MemOpChains.push_back(Load.getValue(1)); 5678 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5679 } 5680 break; 5681 } 5682 5683 // Non-varargs Altivec params generally go in registers, but have 5684 // stack space allocated at the end. 5685 if (VR_idx != NumVRs) { 5686 // Doesn't have GPR space allocated. 5687 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5688 } else if (nAltivecParamsAtEnd==0) { 5689 // We are emitting Altivec params in order. 5690 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5691 isPPC64, isTailCall, true, MemOpChains, 5692 TailCallArguments, dl); 5693 ArgOffset += 16; 5694 } 5695 break; 5696 } 5697 } 5698 // If all Altivec parameters fit in registers, as they usually do, 5699 // they get stack space following the non-Altivec parameters. We 5700 // don't track this here because nobody below needs it. 5701 // If there are more Altivec parameters than fit in registers emit 5702 // the stores here. 5703 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5704 unsigned j = 0; 5705 // Offset is aligned; skip 1st 12 params which go in V registers. 5706 ArgOffset = ((ArgOffset+15)/16)*16; 5707 ArgOffset += 12*16; 5708 for (unsigned i = 0; i != NumOps; ++i) { 5709 SDValue Arg = OutVals[i]; 5710 EVT ArgType = Outs[i].VT; 5711 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5712 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5713 if (++j > NumVRs) { 5714 SDValue PtrOff; 5715 // We are emitting Altivec params in order. 5716 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5717 isPPC64, isTailCall, true, MemOpChains, 5718 TailCallArguments, dl); 5719 ArgOffset += 16; 5720 } 5721 } 5722 } 5723 } 5724 5725 if (!MemOpChains.empty()) 5726 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5727 5728 // On Darwin, R12 must contain the address of an indirect callee. This does 5729 // not mean the MTCTR instruction must use R12; it's easier to model this as 5730 // an extra parameter, so do that. 5731 if (!isTailCall && 5732 !isFunctionGlobalAddress(Callee) && 5733 !isa<ExternalSymbolSDNode>(Callee) && 5734 !isBLACompatibleAddress(Callee, DAG)) 5735 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5736 PPC::R12), Callee)); 5737 5738 // Build a sequence of copy-to-reg nodes chained together with token chain 5739 // and flag operands which copy the outgoing args into the appropriate regs. 5740 SDValue InFlag; 5741 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5742 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5743 RegsToPass[i].second, InFlag); 5744 InFlag = Chain.getValue(1); 5745 } 5746 5747 if (isTailCall) 5748 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 5749 FPOp, true, TailCallArguments); 5750 5751 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5752 /* unused except on PPC64 ELFv1 */ false, DAG, 5753 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5754 NumBytes, Ins, InVals, CS); 5755 } 5756 5757 bool 5758 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5759 MachineFunction &MF, bool isVarArg, 5760 const SmallVectorImpl<ISD::OutputArg> &Outs, 5761 LLVMContext &Context) const { 5762 SmallVector<CCValAssign, 16> RVLocs; 5763 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 5764 return CCInfo.CheckReturn(Outs, RetCC_PPC); 5765 } 5766 5767 SDValue 5768 PPCTargetLowering::LowerReturn(SDValue Chain, 5769 CallingConv::ID CallConv, bool isVarArg, 5770 const SmallVectorImpl<ISD::OutputArg> &Outs, 5771 const SmallVectorImpl<SDValue> &OutVals, 5772 SDLoc dl, SelectionDAG &DAG) const { 5773 5774 SmallVector<CCValAssign, 16> RVLocs; 5775 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5776 *DAG.getContext()); 5777 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 5778 5779 SDValue Flag; 5780 SmallVector<SDValue, 4> RetOps(1, Chain); 5781 5782 // Copy the result values into the output registers. 5783 for (unsigned i = 0; i != RVLocs.size(); ++i) { 5784 CCValAssign &VA = RVLocs[i]; 5785 assert(VA.isRegLoc() && "Can only return in registers!"); 5786 5787 SDValue Arg = OutVals[i]; 5788 5789 switch (VA.getLocInfo()) { 5790 default: llvm_unreachable("Unknown loc info!"); 5791 case CCValAssign::Full: break; 5792 case CCValAssign::AExt: 5793 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 5794 break; 5795 case CCValAssign::ZExt: 5796 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 5797 break; 5798 case CCValAssign::SExt: 5799 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 5800 break; 5801 } 5802 5803 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 5804 Flag = Chain.getValue(1); 5805 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 5806 } 5807 5808 RetOps[0] = Chain; // Update chain. 5809 5810 // Add the flag if we have it. 5811 if (Flag.getNode()) 5812 RetOps.push_back(Flag); 5813 5814 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 5815 } 5816 5817 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 5818 const PPCSubtarget &Subtarget) const { 5819 // When we pop the dynamic allocation we need to restore the SP link. 5820 SDLoc dl(Op); 5821 5822 // Get the corect type for pointers. 5823 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5824 5825 // Construct the stack pointer operand. 5826 bool isPPC64 = Subtarget.isPPC64(); 5827 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 5828 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 5829 5830 // Get the operands for the STACKRESTORE. 5831 SDValue Chain = Op.getOperand(0); 5832 SDValue SaveSP = Op.getOperand(1); 5833 5834 // Load the old link SP. 5835 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 5836 MachinePointerInfo(), 5837 false, false, false, 0); 5838 5839 // Restore the stack pointer. 5840 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 5841 5842 // Store the old link SP. 5843 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 5844 false, false, 0); 5845 } 5846 5847 5848 5849 SDValue 5850 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 5851 MachineFunction &MF = DAG.getMachineFunction(); 5852 bool isPPC64 = Subtarget.isPPC64(); 5853 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 5854 5855 // Get current frame pointer save index. The users of this index will be 5856 // primarily DYNALLOC instructions. 5857 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5858 int RASI = FI->getReturnAddrSaveIndex(); 5859 5860 // If the frame pointer save index hasn't been defined yet. 5861 if (!RASI) { 5862 // Find out what the fix offset of the frame pointer save area. 5863 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 5864 // Allocate the frame index for frame pointer save area. 5865 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 5866 // Save the result. 5867 FI->setReturnAddrSaveIndex(RASI); 5868 } 5869 return DAG.getFrameIndex(RASI, PtrVT); 5870 } 5871 5872 SDValue 5873 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 5874 MachineFunction &MF = DAG.getMachineFunction(); 5875 bool isPPC64 = Subtarget.isPPC64(); 5876 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 5877 5878 // Get current frame pointer save index. The users of this index will be 5879 // primarily DYNALLOC instructions. 5880 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5881 int FPSI = FI->getFramePointerSaveIndex(); 5882 5883 // If the frame pointer save index hasn't been defined yet. 5884 if (!FPSI) { 5885 // Find out what the fix offset of the frame pointer save area. 5886 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 5887 // Allocate the frame index for frame pointer save area. 5888 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 5889 // Save the result. 5890 FI->setFramePointerSaveIndex(FPSI); 5891 } 5892 return DAG.getFrameIndex(FPSI, PtrVT); 5893 } 5894 5895 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 5896 SelectionDAG &DAG, 5897 const PPCSubtarget &Subtarget) const { 5898 // Get the inputs. 5899 SDValue Chain = Op.getOperand(0); 5900 SDValue Size = Op.getOperand(1); 5901 SDLoc dl(Op); 5902 5903 // Get the corect type for pointers. 5904 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5905 // Negate the size. 5906 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 5907 DAG.getConstant(0, dl, PtrVT), Size); 5908 // Construct a node for the frame pointer save index. 5909 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 5910 // Build a DYNALLOC node. 5911 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 5912 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 5913 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 5914 } 5915 5916 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 5917 SelectionDAG &DAG) const { 5918 SDLoc DL(Op); 5919 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 5920 DAG.getVTList(MVT::i32, MVT::Other), 5921 Op.getOperand(0), Op.getOperand(1)); 5922 } 5923 5924 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 5925 SelectionDAG &DAG) const { 5926 SDLoc DL(Op); 5927 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 5928 Op.getOperand(0), Op.getOperand(1)); 5929 } 5930 5931 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 5932 if (Op.getValueType().isVector()) 5933 return LowerVectorLoad(Op, DAG); 5934 5935 assert(Op.getValueType() == MVT::i1 && 5936 "Custom lowering only for i1 loads"); 5937 5938 // First, load 8 bits into 32 bits, then truncate to 1 bit. 5939 5940 SDLoc dl(Op); 5941 LoadSDNode *LD = cast<LoadSDNode>(Op); 5942 5943 SDValue Chain = LD->getChain(); 5944 SDValue BasePtr = LD->getBasePtr(); 5945 MachineMemOperand *MMO = LD->getMemOperand(); 5946 5947 SDValue NewLD = 5948 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 5949 BasePtr, MVT::i8, MMO); 5950 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 5951 5952 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 5953 return DAG.getMergeValues(Ops, dl); 5954 } 5955 5956 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 5957 if (Op.getOperand(1).getValueType().isVector()) 5958 return LowerVectorStore(Op, DAG); 5959 5960 assert(Op.getOperand(1).getValueType() == MVT::i1 && 5961 "Custom lowering only for i1 stores"); 5962 5963 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 5964 5965 SDLoc dl(Op); 5966 StoreSDNode *ST = cast<StoreSDNode>(Op); 5967 5968 SDValue Chain = ST->getChain(); 5969 SDValue BasePtr = ST->getBasePtr(); 5970 SDValue Value = ST->getValue(); 5971 MachineMemOperand *MMO = ST->getMemOperand(); 5972 5973 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 5974 Value); 5975 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 5976 } 5977 5978 // FIXME: Remove this once the ANDI glue bug is fixed: 5979 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 5980 assert(Op.getValueType() == MVT::i1 && 5981 "Custom lowering only for i1 results"); 5982 5983 SDLoc DL(Op); 5984 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 5985 Op.getOperand(0)); 5986 } 5987 5988 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 5989 /// possible. 5990 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 5991 // Not FP? Not a fsel. 5992 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 5993 !Op.getOperand(2).getValueType().isFloatingPoint()) 5994 return Op; 5995 5996 // We might be able to do better than this under some circumstances, but in 5997 // general, fsel-based lowering of select is a finite-math-only optimization. 5998 // For more information, see section F.3 of the 2.06 ISA specification. 5999 if (!DAG.getTarget().Options.NoInfsFPMath || 6000 !DAG.getTarget().Options.NoNaNsFPMath) 6001 return Op; 6002 6003 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6004 6005 EVT ResVT = Op.getValueType(); 6006 EVT CmpVT = Op.getOperand(0).getValueType(); 6007 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6008 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6009 SDLoc dl(Op); 6010 6011 // If the RHS of the comparison is a 0.0, we don't need to do the 6012 // subtraction at all. 6013 SDValue Sel1; 6014 if (isFloatingPointZero(RHS)) 6015 switch (CC) { 6016 default: break; // SETUO etc aren't handled by fsel. 6017 case ISD::SETNE: 6018 std::swap(TV, FV); 6019 case ISD::SETEQ: 6020 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6021 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6022 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6023 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6024 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6025 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6026 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6027 case ISD::SETULT: 6028 case ISD::SETLT: 6029 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6030 case ISD::SETOGE: 6031 case ISD::SETGE: 6032 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6033 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6034 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6035 case ISD::SETUGT: 6036 case ISD::SETGT: 6037 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6038 case ISD::SETOLE: 6039 case ISD::SETLE: 6040 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6041 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6042 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6043 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6044 } 6045 6046 SDValue Cmp; 6047 switch (CC) { 6048 default: break; // SETUO etc aren't handled by fsel. 6049 case ISD::SETNE: 6050 std::swap(TV, FV); 6051 case ISD::SETEQ: 6052 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 6053 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6054 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6055 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6056 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6057 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6058 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6059 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6060 case ISD::SETULT: 6061 case ISD::SETLT: 6062 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 6063 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6064 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6065 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6066 case ISD::SETOGE: 6067 case ISD::SETGE: 6068 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 6069 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6070 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6071 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6072 case ISD::SETUGT: 6073 case ISD::SETGT: 6074 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 6075 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6076 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6077 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6078 case ISD::SETOLE: 6079 case ISD::SETLE: 6080 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 6081 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6082 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6083 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6084 } 6085 return Op; 6086 } 6087 6088 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6089 SelectionDAG &DAG, 6090 SDLoc dl) const { 6091 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6092 SDValue Src = Op.getOperand(0); 6093 if (Src.getValueType() == MVT::f32) 6094 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6095 6096 SDValue Tmp; 6097 switch (Op.getSimpleValueType().SimpleTy) { 6098 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6099 case MVT::i32: 6100 Tmp = DAG.getNode( 6101 Op.getOpcode() == ISD::FP_TO_SINT 6102 ? PPCISD::FCTIWZ 6103 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6104 dl, MVT::f64, Src); 6105 break; 6106 case MVT::i64: 6107 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6108 "i64 FP_TO_UINT is supported only with FPCVT"); 6109 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6110 PPCISD::FCTIDUZ, 6111 dl, MVT::f64, Src); 6112 break; 6113 } 6114 6115 // Convert the FP value to an int value through memory. 6116 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6117 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6118 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6119 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6120 MachinePointerInfo MPI = 6121 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6122 6123 // Emit a store to the stack slot. 6124 SDValue Chain; 6125 if (i32Stack) { 6126 MachineFunction &MF = DAG.getMachineFunction(); 6127 MachineMemOperand *MMO = 6128 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6129 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6130 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6131 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6132 } else 6133 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 6134 MPI, false, false, 0); 6135 6136 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6137 // add in a bias. 6138 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6139 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6140 DAG.getConstant(4, dl, FIPtr.getValueType())); 6141 MPI = MPI.getWithOffset(4); 6142 } 6143 6144 RLI.Chain = Chain; 6145 RLI.Ptr = FIPtr; 6146 RLI.MPI = MPI; 6147 } 6148 6149 /// \brief Custom lowers floating point to integer conversions to use 6150 /// the direct move instructions available in ISA 2.07 to avoid the 6151 /// need for load/store combinations. 6152 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6153 SelectionDAG &DAG, 6154 SDLoc dl) const { 6155 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6156 SDValue Src = Op.getOperand(0); 6157 6158 if (Src.getValueType() == MVT::f32) 6159 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6160 6161 SDValue Tmp; 6162 switch (Op.getSimpleValueType().SimpleTy) { 6163 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6164 case MVT::i32: 6165 Tmp = DAG.getNode( 6166 Op.getOpcode() == ISD::FP_TO_SINT 6167 ? PPCISD::FCTIWZ 6168 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6169 dl, MVT::f64, Src); 6170 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6171 break; 6172 case MVT::i64: 6173 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6174 "i64 FP_TO_UINT is supported only with FPCVT"); 6175 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6176 PPCISD::FCTIDUZ, 6177 dl, MVT::f64, Src); 6178 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6179 break; 6180 } 6181 return Tmp; 6182 } 6183 6184 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6185 SDLoc dl) const { 6186 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6187 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6188 6189 ReuseLoadInfo RLI; 6190 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6191 6192 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6193 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6194 RLI.Ranges); 6195 } 6196 6197 // We're trying to insert a regular store, S, and then a load, L. If the 6198 // incoming value, O, is a load, we might just be able to have our load use the 6199 // address used by O. However, we don't know if anything else will store to 6200 // that address before we can load from it. To prevent this situation, we need 6201 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6202 // the same chain operand as O, we create a token factor from the chain results 6203 // of O and L, and we replace all uses of O's chain result with that token 6204 // factor (see spliceIntoChain below for this last part). 6205 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6206 ReuseLoadInfo &RLI, 6207 SelectionDAG &DAG, 6208 ISD::LoadExtType ET) const { 6209 SDLoc dl(Op); 6210 if (ET == ISD::NON_EXTLOAD && 6211 (Op.getOpcode() == ISD::FP_TO_UINT || 6212 Op.getOpcode() == ISD::FP_TO_SINT) && 6213 isOperationLegalOrCustom(Op.getOpcode(), 6214 Op.getOperand(0).getValueType())) { 6215 6216 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6217 return true; 6218 } 6219 6220 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6221 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6222 LD->isNonTemporal()) 6223 return false; 6224 if (LD->getMemoryVT() != MemVT) 6225 return false; 6226 6227 RLI.Ptr = LD->getBasePtr(); 6228 if (LD->isIndexed() && LD->getOffset().getOpcode() != ISD::UNDEF) { 6229 assert(LD->getAddressingMode() == ISD::PRE_INC && 6230 "Non-pre-inc AM on PPC?"); 6231 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6232 LD->getOffset()); 6233 } 6234 6235 RLI.Chain = LD->getChain(); 6236 RLI.MPI = LD->getPointerInfo(); 6237 RLI.IsInvariant = LD->isInvariant(); 6238 RLI.Alignment = LD->getAlignment(); 6239 RLI.AAInfo = LD->getAAInfo(); 6240 RLI.Ranges = LD->getRanges(); 6241 6242 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6243 return true; 6244 } 6245 6246 // Given the head of the old chain, ResChain, insert a token factor containing 6247 // it and NewResChain, and make users of ResChain now be users of that token 6248 // factor. 6249 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6250 SDValue NewResChain, 6251 SelectionDAG &DAG) const { 6252 if (!ResChain) 6253 return; 6254 6255 SDLoc dl(NewResChain); 6256 6257 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6258 NewResChain, DAG.getUNDEF(MVT::Other)); 6259 assert(TF.getNode() != NewResChain.getNode() && 6260 "A new TF really is required here"); 6261 6262 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6263 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6264 } 6265 6266 /// \brief Custom lowers integer to floating point conversions to use 6267 /// the direct move instructions available in ISA 2.07 to avoid the 6268 /// need for load/store combinations. 6269 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6270 SelectionDAG &DAG, 6271 SDLoc dl) const { 6272 assert((Op.getValueType() == MVT::f32 || 6273 Op.getValueType() == MVT::f64) && 6274 "Invalid floating point type as target of conversion"); 6275 assert(Subtarget.hasFPCVT() && 6276 "Int to FP conversions with direct moves require FPCVT"); 6277 SDValue FP; 6278 SDValue Src = Op.getOperand(0); 6279 bool SinglePrec = Op.getValueType() == MVT::f32; 6280 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6281 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6282 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6283 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6284 6285 if (WordInt) { 6286 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6287 dl, MVT::f64, Src); 6288 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6289 } 6290 else { 6291 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6292 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6293 } 6294 6295 return FP; 6296 } 6297 6298 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6299 SelectionDAG &DAG) const { 6300 SDLoc dl(Op); 6301 6302 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6303 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6304 return SDValue(); 6305 6306 SDValue Value = Op.getOperand(0); 6307 // The values are now known to be -1 (false) or 1 (true). To convert this 6308 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6309 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6310 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6311 6312 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 6313 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 6314 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 6315 6316 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6317 6318 if (Op.getValueType() != MVT::v4f64) 6319 Value = DAG.getNode(ISD::FP_ROUND, dl, 6320 Op.getValueType(), Value, 6321 DAG.getIntPtrConstant(1, dl)); 6322 return Value; 6323 } 6324 6325 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6326 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6327 return SDValue(); 6328 6329 if (Op.getOperand(0).getValueType() == MVT::i1) 6330 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6331 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6332 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6333 6334 // If we have direct moves, we can do all the conversion, skip the store/load 6335 // however, without FPCVT we can't do most conversions. 6336 if (Subtarget.hasDirectMove() && Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6337 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6338 6339 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6340 "UINT_TO_FP is supported only with FPCVT"); 6341 6342 // If we have FCFIDS, then use it when converting to single-precision. 6343 // Otherwise, convert to double-precision and then round. 6344 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6345 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6346 : PPCISD::FCFIDS) 6347 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6348 : PPCISD::FCFID); 6349 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6350 ? MVT::f32 6351 : MVT::f64; 6352 6353 if (Op.getOperand(0).getValueType() == MVT::i64) { 6354 SDValue SINT = Op.getOperand(0); 6355 // When converting to single-precision, we actually need to convert 6356 // to double-precision first and then round to single-precision. 6357 // To avoid double-rounding effects during that operation, we have 6358 // to prepare the input operand. Bits that might be truncated when 6359 // converting to double-precision are replaced by a bit that won't 6360 // be lost at this stage, but is below the single-precision rounding 6361 // position. 6362 // 6363 // However, if -enable-unsafe-fp-math is in effect, accept double 6364 // rounding to avoid the extra overhead. 6365 if (Op.getValueType() == MVT::f32 && 6366 !Subtarget.hasFPCVT() && 6367 !DAG.getTarget().Options.UnsafeFPMath) { 6368 6369 // Twiddle input to make sure the low 11 bits are zero. (If this 6370 // is the case, we are guaranteed the value will fit into the 53 bit 6371 // mantissa of an IEEE double-precision value without rounding.) 6372 // If any of those low 11 bits were not zero originally, make sure 6373 // bit 12 (value 2048) is set instead, so that the final rounding 6374 // to single-precision gets the correct result. 6375 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6376 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6377 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6378 Round, DAG.getConstant(2047, dl, MVT::i64)); 6379 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6380 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6381 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6382 6383 // However, we cannot use that value unconditionally: if the magnitude 6384 // of the input value is small, the bit-twiddling we did above might 6385 // end up visibly changing the output. Fortunately, in that case, we 6386 // don't need to twiddle bits since the original input will convert 6387 // exactly to double-precision floating-point already. Therefore, 6388 // construct a conditional to use the original value if the top 11 6389 // bits are all sign-bit copies, and use the rounded value computed 6390 // above otherwise. 6391 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6392 SINT, DAG.getConstant(53, dl, MVT::i32)); 6393 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6394 Cond, DAG.getConstant(1, dl, MVT::i64)); 6395 Cond = DAG.getSetCC(dl, MVT::i32, 6396 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6397 6398 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6399 } 6400 6401 ReuseLoadInfo RLI; 6402 SDValue Bits; 6403 6404 MachineFunction &MF = DAG.getMachineFunction(); 6405 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6406 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6407 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6408 RLI.Ranges); 6409 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6410 } else if (Subtarget.hasLFIWAX() && 6411 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6412 MachineMemOperand *MMO = 6413 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6414 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6415 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6416 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6417 DAG.getVTList(MVT::f64, MVT::Other), 6418 Ops, MVT::i32, MMO); 6419 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6420 } else if (Subtarget.hasFPCVT() && 6421 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6422 MachineMemOperand *MMO = 6423 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6424 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6425 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6426 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6427 DAG.getVTList(MVT::f64, MVT::Other), 6428 Ops, MVT::i32, MMO); 6429 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6430 } else if (((Subtarget.hasLFIWAX() && 6431 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6432 (Subtarget.hasFPCVT() && 6433 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6434 SINT.getOperand(0).getValueType() == MVT::i32) { 6435 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6436 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6437 6438 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6439 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6440 6441 SDValue Store = DAG.getStore( 6442 DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6443 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6444 false, false, 0); 6445 6446 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6447 "Expected an i32 store"); 6448 6449 RLI.Ptr = FIdx; 6450 RLI.Chain = Store; 6451 RLI.MPI = 6452 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6453 RLI.Alignment = 4; 6454 6455 MachineMemOperand *MMO = 6456 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6457 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6458 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6459 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6460 PPCISD::LFIWZX : PPCISD::LFIWAX, 6461 dl, DAG.getVTList(MVT::f64, MVT::Other), 6462 Ops, MVT::i32, MMO); 6463 } else 6464 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6465 6466 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6467 6468 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6469 FP = DAG.getNode(ISD::FP_ROUND, dl, 6470 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 6471 return FP; 6472 } 6473 6474 assert(Op.getOperand(0).getValueType() == MVT::i32 && 6475 "Unhandled INT_TO_FP type in custom expander!"); 6476 // Since we only generate this in 64-bit mode, we can take advantage of 6477 // 64-bit registers. In particular, sign extend the input value into the 6478 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 6479 // then lfd it and fcfid it. 6480 MachineFunction &MF = DAG.getMachineFunction(); 6481 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6482 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6483 6484 SDValue Ld; 6485 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 6486 ReuseLoadInfo RLI; 6487 bool ReusingLoad; 6488 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 6489 DAG))) { 6490 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6491 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6492 6493 SDValue Store = DAG.getStore( 6494 DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 6495 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6496 false, false, 0); 6497 6498 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6499 "Expected an i32 store"); 6500 6501 RLI.Ptr = FIdx; 6502 RLI.Chain = Store; 6503 RLI.MPI = 6504 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6505 RLI.Alignment = 4; 6506 } 6507 6508 MachineMemOperand *MMO = 6509 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6510 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6511 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6512 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 6513 PPCISD::LFIWZX : PPCISD::LFIWAX, 6514 dl, DAG.getVTList(MVT::f64, MVT::Other), 6515 Ops, MVT::i32, MMO); 6516 if (ReusingLoad) 6517 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 6518 } else { 6519 assert(Subtarget.isPPC64() && 6520 "i32->FP without LFIWAX supported only on PPC64"); 6521 6522 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 6523 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6524 6525 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 6526 Op.getOperand(0)); 6527 6528 // STD the extended value into the stack slot. 6529 SDValue Store = DAG.getStore( 6530 DAG.getEntryNode(), dl, Ext64, FIdx, 6531 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6532 false, false, 0); 6533 6534 // Load the value as a double. 6535 Ld = DAG.getLoad( 6536 MVT::f64, dl, Store, FIdx, 6537 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6538 false, false, false, 0); 6539 } 6540 6541 // FCFID it and return it. 6542 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 6543 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6544 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 6545 DAG.getIntPtrConstant(0, dl)); 6546 return FP; 6547 } 6548 6549 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6550 SelectionDAG &DAG) const { 6551 SDLoc dl(Op); 6552 /* 6553 The rounding mode is in bits 30:31 of FPSR, and has the following 6554 settings: 6555 00 Round to nearest 6556 01 Round to 0 6557 10 Round to +inf 6558 11 Round to -inf 6559 6560 FLT_ROUNDS, on the other hand, expects the following: 6561 -1 Undefined 6562 0 Round to 0 6563 1 Round to nearest 6564 2 Round to +inf 6565 3 Round to -inf 6566 6567 To perform the conversion, we do: 6568 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 6569 */ 6570 6571 MachineFunction &MF = DAG.getMachineFunction(); 6572 EVT VT = Op.getValueType(); 6573 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6574 6575 // Save FP Control Word to register 6576 EVT NodeTys[] = { 6577 MVT::f64, // return register 6578 MVT::Glue // unused in this context 6579 }; 6580 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 6581 6582 // Save FP register to stack slot 6583 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 6584 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 6585 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 6586 StackSlot, MachinePointerInfo(), false, false,0); 6587 6588 // Load FP Control Word from low 32 bits of stack slot. 6589 SDValue Four = DAG.getConstant(4, dl, PtrVT); 6590 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 6591 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 6592 false, false, false, 0); 6593 6594 // Transform as necessary 6595 SDValue CWD1 = 6596 DAG.getNode(ISD::AND, dl, MVT::i32, 6597 CWD, DAG.getConstant(3, dl, MVT::i32)); 6598 SDValue CWD2 = 6599 DAG.getNode(ISD::SRL, dl, MVT::i32, 6600 DAG.getNode(ISD::AND, dl, MVT::i32, 6601 DAG.getNode(ISD::XOR, dl, MVT::i32, 6602 CWD, DAG.getConstant(3, dl, MVT::i32)), 6603 DAG.getConstant(3, dl, MVT::i32)), 6604 DAG.getConstant(1, dl, MVT::i32)); 6605 6606 SDValue RetVal = 6607 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 6608 6609 return DAG.getNode((VT.getSizeInBits() < 16 ? 6610 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 6611 } 6612 6613 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6614 EVT VT = Op.getValueType(); 6615 unsigned BitWidth = VT.getSizeInBits(); 6616 SDLoc dl(Op); 6617 assert(Op.getNumOperands() == 3 && 6618 VT == Op.getOperand(1).getValueType() && 6619 "Unexpected SHL!"); 6620 6621 // Expand into a bunch of logical ops. Note that these ops 6622 // depend on the PPC behavior for oversized shift amounts. 6623 SDValue Lo = Op.getOperand(0); 6624 SDValue Hi = Op.getOperand(1); 6625 SDValue Amt = Op.getOperand(2); 6626 EVT AmtVT = Amt.getValueType(); 6627 6628 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6629 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6630 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 6631 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 6632 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 6633 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6634 DAG.getConstant(-BitWidth, dl, AmtVT)); 6635 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 6636 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6637 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 6638 SDValue OutOps[] = { OutLo, OutHi }; 6639 return DAG.getMergeValues(OutOps, dl); 6640 } 6641 6642 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6643 EVT VT = Op.getValueType(); 6644 SDLoc dl(Op); 6645 unsigned BitWidth = VT.getSizeInBits(); 6646 assert(Op.getNumOperands() == 3 && 6647 VT == Op.getOperand(1).getValueType() && 6648 "Unexpected SRL!"); 6649 6650 // Expand into a bunch of logical ops. Note that these ops 6651 // depend on the PPC behavior for oversized shift amounts. 6652 SDValue Lo = Op.getOperand(0); 6653 SDValue Hi = Op.getOperand(1); 6654 SDValue Amt = Op.getOperand(2); 6655 EVT AmtVT = Amt.getValueType(); 6656 6657 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6658 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6659 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6660 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6661 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6662 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6663 DAG.getConstant(-BitWidth, dl, AmtVT)); 6664 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 6665 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6666 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 6667 SDValue OutOps[] = { OutLo, OutHi }; 6668 return DAG.getMergeValues(OutOps, dl); 6669 } 6670 6671 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 6672 SDLoc dl(Op); 6673 EVT VT = Op.getValueType(); 6674 unsigned BitWidth = VT.getSizeInBits(); 6675 assert(Op.getNumOperands() == 3 && 6676 VT == Op.getOperand(1).getValueType() && 6677 "Unexpected SRA!"); 6678 6679 // Expand into a bunch of logical ops, followed by a select_cc. 6680 SDValue Lo = Op.getOperand(0); 6681 SDValue Hi = Op.getOperand(1); 6682 SDValue Amt = Op.getOperand(2); 6683 EVT AmtVT = Amt.getValueType(); 6684 6685 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6686 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6687 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6688 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6689 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6690 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6691 DAG.getConstant(-BitWidth, dl, AmtVT)); 6692 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 6693 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 6694 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 6695 Tmp4, Tmp6, ISD::SETLE); 6696 SDValue OutOps[] = { OutLo, OutHi }; 6697 return DAG.getMergeValues(OutOps, dl); 6698 } 6699 6700 //===----------------------------------------------------------------------===// 6701 // Vector related lowering. 6702 // 6703 6704 /// BuildSplatI - Build a canonical splati of Val with an element size of 6705 /// SplatSize. Cast the result to VT. 6706 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 6707 SelectionDAG &DAG, SDLoc dl) { 6708 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 6709 6710 static const MVT VTys[] = { // canonical VT to use for each size. 6711 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 6712 }; 6713 6714 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 6715 6716 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 6717 if (Val == -1) 6718 SplatSize = 1; 6719 6720 EVT CanonicalVT = VTys[SplatSize-1]; 6721 6722 // Build a canonical splat for this value. 6723 SDValue Elt = DAG.getConstant(Val, dl, MVT::i32); 6724 SmallVector<SDValue, 8> Ops; 6725 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 6726 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops); 6727 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 6728 } 6729 6730 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 6731 /// specified intrinsic ID. 6732 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 6733 SelectionDAG &DAG, SDLoc dl, 6734 EVT DestVT = MVT::Other) { 6735 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 6736 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6737 DAG.getConstant(IID, dl, MVT::i32), Op); 6738 } 6739 6740 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 6741 /// specified intrinsic ID. 6742 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 6743 SelectionDAG &DAG, SDLoc dl, 6744 EVT DestVT = MVT::Other) { 6745 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 6746 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6747 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 6748 } 6749 6750 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 6751 /// specified intrinsic ID. 6752 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 6753 SDValue Op2, SelectionDAG &DAG, 6754 SDLoc dl, EVT DestVT = MVT::Other) { 6755 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 6756 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6757 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 6758 } 6759 6760 6761 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 6762 /// amount. The result has the specified value type. 6763 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 6764 EVT VT, SelectionDAG &DAG, SDLoc dl) { 6765 // Force LHS/RHS to be the right type. 6766 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 6767 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 6768 6769 int Ops[16]; 6770 for (unsigned i = 0; i != 16; ++i) 6771 Ops[i] = i + Amt; 6772 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 6773 return DAG.getNode(ISD::BITCAST, dl, VT, T); 6774 } 6775 6776 // If this is a case we can't handle, return null and let the default 6777 // expansion code take care of it. If we CAN select this case, and if it 6778 // selects to a single instruction, return Op. Otherwise, if we can codegen 6779 // this case more efficiently than a constant pool load, lower it to the 6780 // sequence of ops that should be used. 6781 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 6782 SelectionDAG &DAG) const { 6783 SDLoc dl(Op); 6784 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6785 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 6786 6787 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 6788 // We first build an i32 vector, load it into a QPX register, 6789 // then convert it to a floating-point vector and compare it 6790 // to a zero vector to get the boolean result. 6791 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 6792 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 6793 MachinePointerInfo PtrInfo = 6794 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6795 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6796 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6797 6798 assert(BVN->getNumOperands() == 4 && 6799 "BUILD_VECTOR for v4i1 does not have 4 operands"); 6800 6801 bool IsConst = true; 6802 for (unsigned i = 0; i < 4; ++i) { 6803 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; 6804 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 6805 IsConst = false; 6806 break; 6807 } 6808 } 6809 6810 if (IsConst) { 6811 Constant *One = 6812 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 6813 Constant *NegOne = 6814 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 6815 6816 SmallVector<Constant*, 4> CV(4, NegOne); 6817 for (unsigned i = 0; i < 4; ++i) { 6818 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) 6819 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 6820 else if (cast<ConstantSDNode>(BVN->getOperand(i))-> 6821 getConstantIntValue()->isZero()) 6822 continue; 6823 else 6824 CV[i] = One; 6825 } 6826 6827 Constant *CP = ConstantVector::get(CV); 6828 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 6829 16 /* alignment */); 6830 6831 SmallVector<SDValue, 2> Ops; 6832 Ops.push_back(DAG.getEntryNode()); 6833 Ops.push_back(CPIdx); 6834 6835 SmallVector<EVT, 2> ValueVTs; 6836 ValueVTs.push_back(MVT::v4i1); 6837 ValueVTs.push_back(MVT::Other); // chain 6838 SDVTList VTs = DAG.getVTList(ValueVTs); 6839 6840 return DAG.getMemIntrinsicNode( 6841 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 6842 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 6843 } 6844 6845 SmallVector<SDValue, 4> Stores; 6846 for (unsigned i = 0; i < 4; ++i) { 6847 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; 6848 6849 unsigned Offset = 4*i; 6850 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 6851 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 6852 6853 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 6854 if (StoreSize > 4) { 6855 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 6856 BVN->getOperand(i), Idx, 6857 PtrInfo.getWithOffset(Offset), 6858 MVT::i32, false, false, 0)); 6859 } else { 6860 SDValue StoreValue = BVN->getOperand(i); 6861 if (StoreSize < 4) 6862 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 6863 6864 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 6865 StoreValue, Idx, 6866 PtrInfo.getWithOffset(Offset), 6867 false, false, 0)); 6868 } 6869 } 6870 6871 SDValue StoreChain; 6872 if (!Stores.empty()) 6873 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 6874 else 6875 StoreChain = DAG.getEntryNode(); 6876 6877 // Now load from v4i32 into the QPX register; this will extend it to 6878 // v4i64 but not yet convert it to a floating point. Nevertheless, this 6879 // is typed as v4f64 because the QPX register integer states are not 6880 // explicitly represented. 6881 6882 SmallVector<SDValue, 2> Ops; 6883 Ops.push_back(StoreChain); 6884 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32)); 6885 Ops.push_back(FIdx); 6886 6887 SmallVector<EVT, 2> ValueVTs; 6888 ValueVTs.push_back(MVT::v4f64); 6889 ValueVTs.push_back(MVT::Other); // chain 6890 SDVTList VTs = DAG.getVTList(ValueVTs); 6891 6892 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 6893 dl, VTs, Ops, MVT::v4i32, PtrInfo); 6894 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 6895 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 6896 LoadedVect); 6897 6898 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::f64); 6899 FPZeros = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 6900 FPZeros, FPZeros, FPZeros, FPZeros); 6901 6902 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 6903 } 6904 6905 // All other QPX vectors are handled by generic code. 6906 if (Subtarget.hasQPX()) 6907 return SDValue(); 6908 6909 // Check if this is a splat of a constant value. 6910 APInt APSplatBits, APSplatUndef; 6911 unsigned SplatBitSize; 6912 bool HasAnyUndefs; 6913 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 6914 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 6915 SplatBitSize > 32) 6916 return SDValue(); 6917 6918 unsigned SplatBits = APSplatBits.getZExtValue(); 6919 unsigned SplatUndef = APSplatUndef.getZExtValue(); 6920 unsigned SplatSize = SplatBitSize / 8; 6921 6922 // First, handle single instruction cases. 6923 6924 // All zeros? 6925 if (SplatBits == 0) { 6926 // Canonicalize all zero vectors to be v4i32. 6927 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 6928 SDValue Z = DAG.getConstant(0, dl, MVT::i32); 6929 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 6930 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 6931 } 6932 return Op; 6933 } 6934 6935 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 6936 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 6937 (32-SplatBitSize)); 6938 if (SextVal >= -16 && SextVal <= 15) 6939 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 6940 6941 6942 // Two instruction sequences. 6943 6944 // If this value is in the range [-32,30] and is even, use: 6945 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 6946 // If this value is in the range [17,31] and is odd, use: 6947 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 6948 // If this value is in the range [-31,-17] and is odd, use: 6949 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 6950 // Note the last two are three-instruction sequences. 6951 if (SextVal >= -32 && SextVal <= 31) { 6952 // To avoid having these optimizations undone by constant folding, 6953 // we convert to a pseudo that will be expanded later into one of 6954 // the above forms. 6955 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 6956 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 6957 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 6958 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 6959 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 6960 if (VT == Op.getValueType()) 6961 return RetVal; 6962 else 6963 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 6964 } 6965 6966 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 6967 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 6968 // for fneg/fabs. 6969 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 6970 // Make -1 and vspltisw -1: 6971 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 6972 6973 // Make the VSLW intrinsic, computing 0x8000_0000. 6974 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 6975 OnesV, DAG, dl); 6976 6977 // xor by OnesV to invert it. 6978 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 6979 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6980 } 6981 6982 // Check to see if this is a wide variety of vsplti*, binop self cases. 6983 static const signed char SplatCsts[] = { 6984 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 6985 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 6986 }; 6987 6988 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 6989 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 6990 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 6991 int i = SplatCsts[idx]; 6992 6993 // Figure out what shift amount will be used by altivec if shifted by i in 6994 // this splat size. 6995 unsigned TypeShiftAmt = i & (SplatBitSize-1); 6996 6997 // vsplti + shl self. 6998 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 6999 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7000 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7001 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7002 Intrinsic::ppc_altivec_vslw 7003 }; 7004 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7005 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7006 } 7007 7008 // vsplti + srl self. 7009 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7010 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7011 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7012 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7013 Intrinsic::ppc_altivec_vsrw 7014 }; 7015 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7016 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7017 } 7018 7019 // vsplti + sra self. 7020 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7021 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7022 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7023 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7024 Intrinsic::ppc_altivec_vsraw 7025 }; 7026 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7027 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7028 } 7029 7030 // vsplti + rol self. 7031 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7032 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7033 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7034 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7035 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7036 Intrinsic::ppc_altivec_vrlw 7037 }; 7038 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7039 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7040 } 7041 7042 // t = vsplti c, result = vsldoi t, t, 1 7043 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7044 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7045 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7046 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7047 } 7048 // t = vsplti c, result = vsldoi t, t, 2 7049 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7050 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7051 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7052 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7053 } 7054 // t = vsplti c, result = vsldoi t, t, 3 7055 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7056 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7057 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7058 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7059 } 7060 } 7061 7062 return SDValue(); 7063 } 7064 7065 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7066 /// the specified operations to build the shuffle. 7067 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7068 SDValue RHS, SelectionDAG &DAG, 7069 SDLoc dl) { 7070 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7071 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7072 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7073 7074 enum { 7075 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7076 OP_VMRGHW, 7077 OP_VMRGLW, 7078 OP_VSPLTISW0, 7079 OP_VSPLTISW1, 7080 OP_VSPLTISW2, 7081 OP_VSPLTISW3, 7082 OP_VSLDOI4, 7083 OP_VSLDOI8, 7084 OP_VSLDOI12 7085 }; 7086 7087 if (OpNum == OP_COPY) { 7088 if (LHSID == (1*9+2)*9+3) return LHS; 7089 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7090 return RHS; 7091 } 7092 7093 SDValue OpLHS, OpRHS; 7094 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7095 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7096 7097 int ShufIdxs[16]; 7098 switch (OpNum) { 7099 default: llvm_unreachable("Unknown i32 permute!"); 7100 case OP_VMRGHW: 7101 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7102 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7103 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7104 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7105 break; 7106 case OP_VMRGLW: 7107 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7108 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7109 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7110 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7111 break; 7112 case OP_VSPLTISW0: 7113 for (unsigned i = 0; i != 16; ++i) 7114 ShufIdxs[i] = (i&3)+0; 7115 break; 7116 case OP_VSPLTISW1: 7117 for (unsigned i = 0; i != 16; ++i) 7118 ShufIdxs[i] = (i&3)+4; 7119 break; 7120 case OP_VSPLTISW2: 7121 for (unsigned i = 0; i != 16; ++i) 7122 ShufIdxs[i] = (i&3)+8; 7123 break; 7124 case OP_VSPLTISW3: 7125 for (unsigned i = 0; i != 16; ++i) 7126 ShufIdxs[i] = (i&3)+12; 7127 break; 7128 case OP_VSLDOI4: 7129 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7130 case OP_VSLDOI8: 7131 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7132 case OP_VSLDOI12: 7133 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7134 } 7135 EVT VT = OpLHS.getValueType(); 7136 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7137 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7138 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7139 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7140 } 7141 7142 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7143 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7144 /// return the code it can be lowered into. Worst case, it can always be 7145 /// lowered into a vperm. 7146 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7147 SelectionDAG &DAG) const { 7148 SDLoc dl(Op); 7149 SDValue V1 = Op.getOperand(0); 7150 SDValue V2 = Op.getOperand(1); 7151 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7152 EVT VT = Op.getValueType(); 7153 bool isLittleEndian = Subtarget.isLittleEndian(); 7154 7155 if (Subtarget.hasQPX()) { 7156 if (VT.getVectorNumElements() != 4) 7157 return SDValue(); 7158 7159 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 7160 7161 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 7162 if (AlignIdx != -1) { 7163 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7164 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7165 } else if (SVOp->isSplat()) { 7166 int SplatIdx = SVOp->getSplatIndex(); 7167 if (SplatIdx >= 4) { 7168 std::swap(V1, V2); 7169 SplatIdx -= 4; 7170 } 7171 7172 // FIXME: If SplatIdx == 0 and the input came from a load, then there is 7173 // nothing to do. 7174 7175 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7176 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7177 } 7178 7179 // Lower this into a qvgpci/qvfperm pair. 7180 7181 // Compute the qvgpci literal 7182 unsigned idx = 0; 7183 for (unsigned i = 0; i < 4; ++i) { 7184 int m = SVOp->getMaskElt(i); 7185 unsigned mm = m >= 0 ? (unsigned) m : i; 7186 idx |= mm << (3-i)*3; 7187 } 7188 7189 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7190 DAG.getConstant(idx, dl, MVT::i32)); 7191 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7192 } 7193 7194 // Cases that are handled by instructions that take permute immediates 7195 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7196 // selected by the instruction selector. 7197 if (V2.getOpcode() == ISD::UNDEF) { 7198 if (PPC::isSplatShuffleMask(SVOp, 1) || 7199 PPC::isSplatShuffleMask(SVOp, 2) || 7200 PPC::isSplatShuffleMask(SVOp, 4) || 7201 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7202 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7203 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7204 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7205 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7206 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7207 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7208 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7209 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 7210 (Subtarget.hasP8Altivec() && ( 7211 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7212 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 7213 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 7214 return Op; 7215 } 7216 } 7217 7218 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7219 // and produce a fixed permutation. If any of these match, do not lower to 7220 // VPERM. 7221 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7222 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7223 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7224 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7225 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7226 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7227 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7228 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7229 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7230 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7231 (Subtarget.hasP8Altivec() && ( 7232 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7233 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 7234 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 7235 return Op; 7236 7237 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7238 // perfect shuffle table to emit an optimal matching sequence. 7239 ArrayRef<int> PermMask = SVOp->getMask(); 7240 7241 unsigned PFIndexes[4]; 7242 bool isFourElementShuffle = true; 7243 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7244 unsigned EltNo = 8; // Start out undef. 7245 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7246 if (PermMask[i*4+j] < 0) 7247 continue; // Undef, ignore it. 7248 7249 unsigned ByteSource = PermMask[i*4+j]; 7250 if ((ByteSource & 3) != j) { 7251 isFourElementShuffle = false; 7252 break; 7253 } 7254 7255 if (EltNo == 8) { 7256 EltNo = ByteSource/4; 7257 } else if (EltNo != ByteSource/4) { 7258 isFourElementShuffle = false; 7259 break; 7260 } 7261 } 7262 PFIndexes[i] = EltNo; 7263 } 7264 7265 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7266 // perfect shuffle vector to determine if it is cost effective to do this as 7267 // discrete instructions, or whether we should use a vperm. 7268 // For now, we skip this for little endian until such time as we have a 7269 // little-endian perfect shuffle table. 7270 if (isFourElementShuffle && !isLittleEndian) { 7271 // Compute the index in the perfect shuffle table. 7272 unsigned PFTableIndex = 7273 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7274 7275 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7276 unsigned Cost = (PFEntry >> 30); 7277 7278 // Determining when to avoid vperm is tricky. Many things affect the cost 7279 // of vperm, particularly how many times the perm mask needs to be computed. 7280 // For example, if the perm mask can be hoisted out of a loop or is already 7281 // used (perhaps because there are multiple permutes with the same shuffle 7282 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7283 // the loop requires an extra register. 7284 // 7285 // As a compromise, we only emit discrete instructions if the shuffle can be 7286 // generated in 3 or fewer operations. When we have loop information 7287 // available, if this block is within a loop, we should avoid using vperm 7288 // for 3-operation perms and use a constant pool load instead. 7289 if (Cost < 3) 7290 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7291 } 7292 7293 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7294 // vector that will get spilled to the constant pool. 7295 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 7296 7297 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7298 // that it is in input element units, not in bytes. Convert now. 7299 7300 // For little endian, the order of the input vectors is reversed, and 7301 // the permutation mask is complemented with respect to 31. This is 7302 // necessary to produce proper semantics with the big-endian-biased vperm 7303 // instruction. 7304 EVT EltVT = V1.getValueType().getVectorElementType(); 7305 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7306 7307 SmallVector<SDValue, 16> ResultMask; 7308 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7309 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7310 7311 for (unsigned j = 0; j != BytesPerElement; ++j) 7312 if (isLittleEndian) 7313 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7314 dl, MVT::i32)); 7315 else 7316 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7317 MVT::i32)); 7318 } 7319 7320 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 7321 ResultMask); 7322 if (isLittleEndian) 7323 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7324 V2, V1, VPermMask); 7325 else 7326 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7327 V1, V2, VPermMask); 7328 } 7329 7330 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 7331 /// altivec comparison. If it is, return true and fill in Opc/isDot with 7332 /// information about the intrinsic. 7333 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 7334 bool &isDot, const PPCSubtarget &Subtarget) { 7335 unsigned IntrinsicID = 7336 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7337 CompareOpc = -1; 7338 isDot = false; 7339 switch (IntrinsicID) { 7340 default: return false; 7341 // Comparison predicates. 7342 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 7343 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 7344 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 7345 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 7346 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 7347 case Intrinsic::ppc_altivec_vcmpequd_p: 7348 if (Subtarget.hasP8Altivec()) { 7349 CompareOpc = 199; 7350 isDot = 1; 7351 } 7352 else 7353 return false; 7354 7355 break; 7356 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 7357 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 7358 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 7359 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 7360 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 7361 case Intrinsic::ppc_altivec_vcmpgtsd_p: 7362 if (Subtarget.hasP8Altivec()) { 7363 CompareOpc = 967; 7364 isDot = 1; 7365 } 7366 else 7367 return false; 7368 7369 break; 7370 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 7371 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 7372 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 7373 case Intrinsic::ppc_altivec_vcmpgtud_p: 7374 if (Subtarget.hasP8Altivec()) { 7375 CompareOpc = 711; 7376 isDot = 1; 7377 } 7378 else 7379 return false; 7380 7381 break; 7382 7383 // Normal Comparisons. 7384 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 7385 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 7386 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 7387 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 7388 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 7389 case Intrinsic::ppc_altivec_vcmpequd: 7390 if (Subtarget.hasP8Altivec()) { 7391 CompareOpc = 199; 7392 isDot = 0; 7393 } 7394 else 7395 return false; 7396 7397 break; 7398 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 7399 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 7400 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 7401 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 7402 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 7403 case Intrinsic::ppc_altivec_vcmpgtsd: 7404 if (Subtarget.hasP8Altivec()) { 7405 CompareOpc = 967; 7406 isDot = 0; 7407 } 7408 else 7409 return false; 7410 7411 break; 7412 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 7413 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 7414 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 7415 case Intrinsic::ppc_altivec_vcmpgtud: 7416 if (Subtarget.hasP8Altivec()) { 7417 CompareOpc = 711; 7418 isDot = 0; 7419 } 7420 else 7421 return false; 7422 7423 break; 7424 } 7425 return true; 7426 } 7427 7428 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 7429 /// lower, do it, otherwise return null. 7430 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 7431 SelectionDAG &DAG) const { 7432 // If this is a lowered altivec predicate compare, CompareOpc is set to the 7433 // opcode number of the comparison. 7434 SDLoc dl(Op); 7435 int CompareOpc; 7436 bool isDot; 7437 if (!getAltivecCompareInfo(Op, CompareOpc, isDot, Subtarget)) 7438 return SDValue(); // Don't custom lower most intrinsics. 7439 7440 // If this is a non-dot comparison, make the VCMP node and we are done. 7441 if (!isDot) { 7442 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 7443 Op.getOperand(1), Op.getOperand(2), 7444 DAG.getConstant(CompareOpc, dl, MVT::i32)); 7445 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 7446 } 7447 7448 // Create the PPCISD altivec 'dot' comparison node. 7449 SDValue Ops[] = { 7450 Op.getOperand(2), // LHS 7451 Op.getOperand(3), // RHS 7452 DAG.getConstant(CompareOpc, dl, MVT::i32) 7453 }; 7454 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 7455 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 7456 7457 // Now that we have the comparison, emit a copy from the CR to a GPR. 7458 // This is flagged to the above dot comparison. 7459 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 7460 DAG.getRegister(PPC::CR6, MVT::i32), 7461 CompNode.getValue(1)); 7462 7463 // Unpack the result based on how the target uses it. 7464 unsigned BitNo; // Bit # of CR6. 7465 bool InvertBit; // Invert result? 7466 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 7467 default: // Can't happen, don't crash on invalid number though. 7468 case 0: // Return the value of the EQ bit of CR6. 7469 BitNo = 0; InvertBit = false; 7470 break; 7471 case 1: // Return the inverted value of the EQ bit of CR6. 7472 BitNo = 0; InvertBit = true; 7473 break; 7474 case 2: // Return the value of the LT bit of CR6. 7475 BitNo = 2; InvertBit = false; 7476 break; 7477 case 3: // Return the inverted value of the LT bit of CR6. 7478 BitNo = 2; InvertBit = true; 7479 break; 7480 } 7481 7482 // Shift the bit into the low position. 7483 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 7484 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 7485 // Isolate the bit. 7486 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 7487 DAG.getConstant(1, dl, MVT::i32)); 7488 7489 // If we are supposed to, toggle the bit. 7490 if (InvertBit) 7491 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 7492 DAG.getConstant(1, dl, MVT::i32)); 7493 return Flags; 7494 } 7495 7496 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 7497 SelectionDAG &DAG) const { 7498 SDLoc dl(Op); 7499 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 7500 // instructions), but for smaller types, we need to first extend up to v2i32 7501 // before doing going farther. 7502 if (Op.getValueType() == MVT::v2i64) { 7503 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 7504 if (ExtVT != MVT::v2i32) { 7505 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 7506 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 7507 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 7508 ExtVT.getVectorElementType(), 4))); 7509 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 7510 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 7511 DAG.getValueType(MVT::v2i32)); 7512 } 7513 7514 return Op; 7515 } 7516 7517 return SDValue(); 7518 } 7519 7520 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 7521 SelectionDAG &DAG) const { 7522 SDLoc dl(Op); 7523 // Create a stack slot that is 16-byte aligned. 7524 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7525 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7526 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7527 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7528 7529 // Store the input value into Value#0 of the stack slot. 7530 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 7531 Op.getOperand(0), FIdx, MachinePointerInfo(), 7532 false, false, 0); 7533 // Load it out. 7534 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 7535 false, false, false, 0); 7536 } 7537 7538 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7539 SelectionDAG &DAG) const { 7540 SDLoc dl(Op); 7541 SDNode *N = Op.getNode(); 7542 7543 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 7544 "Unknown extract_vector_elt type"); 7545 7546 SDValue Value = N->getOperand(0); 7547 7548 // The first part of this is like the store lowering except that we don't 7549 // need to track the chain. 7550 7551 // The values are now known to be -1 (false) or 1 (true). To convert this 7552 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7553 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7554 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7555 7556 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7557 // understand how to form the extending load. 7558 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 7559 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 7560 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 7561 7562 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7563 7564 // Now convert to an integer and store. 7565 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7566 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7567 Value); 7568 7569 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7570 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7571 MachinePointerInfo PtrInfo = 7572 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7573 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7574 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7575 7576 SDValue StoreChain = DAG.getEntryNode(); 7577 SmallVector<SDValue, 2> Ops; 7578 Ops.push_back(StoreChain); 7579 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7580 Ops.push_back(Value); 7581 Ops.push_back(FIdx); 7582 7583 SmallVector<EVT, 2> ValueVTs; 7584 ValueVTs.push_back(MVT::Other); // chain 7585 SDVTList VTs = DAG.getVTList(ValueVTs); 7586 7587 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7588 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7589 7590 // Extract the value requested. 7591 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7592 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7593 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7594 7595 SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7596 PtrInfo.getWithOffset(Offset), 7597 false, false, false, 0); 7598 7599 if (!Subtarget.useCRBits()) 7600 return IntVal; 7601 7602 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 7603 } 7604 7605 /// Lowering for QPX v4i1 loads 7606 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 7607 SelectionDAG &DAG) const { 7608 SDLoc dl(Op); 7609 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 7610 SDValue LoadChain = LN->getChain(); 7611 SDValue BasePtr = LN->getBasePtr(); 7612 7613 if (Op.getValueType() == MVT::v4f64 || 7614 Op.getValueType() == MVT::v4f32) { 7615 EVT MemVT = LN->getMemoryVT(); 7616 unsigned Alignment = LN->getAlignment(); 7617 7618 // If this load is properly aligned, then it is legal. 7619 if (Alignment >= MemVT.getStoreSize()) 7620 return Op; 7621 7622 EVT ScalarVT = Op.getValueType().getScalarType(), 7623 ScalarMemVT = MemVT.getScalarType(); 7624 unsigned Stride = ScalarMemVT.getStoreSize(); 7625 7626 SmallVector<SDValue, 8> Vals, LoadChains; 7627 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7628 SDValue Load; 7629 if (ScalarVT != ScalarMemVT) 7630 Load = 7631 DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 7632 BasePtr, 7633 LN->getPointerInfo().getWithOffset(Idx*Stride), 7634 ScalarMemVT, LN->isVolatile(), LN->isNonTemporal(), 7635 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7636 LN->getAAInfo()); 7637 else 7638 Load = 7639 DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 7640 LN->getPointerInfo().getWithOffset(Idx*Stride), 7641 LN->isVolatile(), LN->isNonTemporal(), 7642 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7643 LN->getAAInfo()); 7644 7645 if (Idx == 0 && LN->isIndexed()) { 7646 assert(LN->getAddressingMode() == ISD::PRE_INC && 7647 "Unknown addressing mode on vector load"); 7648 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 7649 LN->getAddressingMode()); 7650 } 7651 7652 Vals.push_back(Load); 7653 LoadChains.push_back(Load.getValue(1)); 7654 7655 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7656 DAG.getConstant(Stride, dl, 7657 BasePtr.getValueType())); 7658 } 7659 7660 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7661 SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, 7662 Op.getValueType(), Vals); 7663 7664 if (LN->isIndexed()) { 7665 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 7666 return DAG.getMergeValues(RetOps, dl); 7667 } 7668 7669 SDValue RetOps[] = { Value, TF }; 7670 return DAG.getMergeValues(RetOps, dl); 7671 } 7672 7673 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 7674 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 7675 7676 // To lower v4i1 from a byte array, we load the byte elements of the 7677 // vector and then reuse the BUILD_VECTOR logic. 7678 7679 SmallVector<SDValue, 4> VectElmts, VectElmtChains; 7680 for (unsigned i = 0; i < 4; ++i) { 7681 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7682 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7683 7684 VectElmts.push_back(DAG.getExtLoad(ISD::EXTLOAD, 7685 dl, MVT::i32, LoadChain, Idx, 7686 LN->getPointerInfo().getWithOffset(i), 7687 MVT::i8 /* memory type */, 7688 LN->isVolatile(), LN->isNonTemporal(), 7689 LN->isInvariant(), 7690 1 /* alignment */, LN->getAAInfo())); 7691 VectElmtChains.push_back(VectElmts[i].getValue(1)); 7692 } 7693 7694 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 7695 SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i1, VectElmts); 7696 7697 SDValue RVals[] = { Value, LoadChain }; 7698 return DAG.getMergeValues(RVals, dl); 7699 } 7700 7701 /// Lowering for QPX v4i1 stores 7702 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 7703 SelectionDAG &DAG) const { 7704 SDLoc dl(Op); 7705 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 7706 SDValue StoreChain = SN->getChain(); 7707 SDValue BasePtr = SN->getBasePtr(); 7708 SDValue Value = SN->getValue(); 7709 7710 if (Value.getValueType() == MVT::v4f64 || 7711 Value.getValueType() == MVT::v4f32) { 7712 EVT MemVT = SN->getMemoryVT(); 7713 unsigned Alignment = SN->getAlignment(); 7714 7715 // If this store is properly aligned, then it is legal. 7716 if (Alignment >= MemVT.getStoreSize()) 7717 return Op; 7718 7719 EVT ScalarVT = Value.getValueType().getScalarType(), 7720 ScalarMemVT = MemVT.getScalarType(); 7721 unsigned Stride = ScalarMemVT.getStoreSize(); 7722 7723 SmallVector<SDValue, 8> Stores; 7724 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7725 SDValue Ex = DAG.getNode( 7726 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 7727 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 7728 SDValue Store; 7729 if (ScalarVT != ScalarMemVT) 7730 Store = 7731 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 7732 SN->getPointerInfo().getWithOffset(Idx*Stride), 7733 ScalarMemVT, SN->isVolatile(), SN->isNonTemporal(), 7734 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7735 else 7736 Store = 7737 DAG.getStore(StoreChain, dl, Ex, BasePtr, 7738 SN->getPointerInfo().getWithOffset(Idx*Stride), 7739 SN->isVolatile(), SN->isNonTemporal(), 7740 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7741 7742 if (Idx == 0 && SN->isIndexed()) { 7743 assert(SN->getAddressingMode() == ISD::PRE_INC && 7744 "Unknown addressing mode on vector store"); 7745 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 7746 SN->getAddressingMode()); 7747 } 7748 7749 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7750 DAG.getConstant(Stride, dl, 7751 BasePtr.getValueType())); 7752 Stores.push_back(Store); 7753 } 7754 7755 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7756 7757 if (SN->isIndexed()) { 7758 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 7759 return DAG.getMergeValues(RetOps, dl); 7760 } 7761 7762 return TF; 7763 } 7764 7765 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 7766 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 7767 7768 // The values are now known to be -1 (false) or 1 (true). To convert this 7769 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7770 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7771 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7772 7773 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7774 // understand how to form the extending load. 7775 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 7776 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 7777 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 7778 7779 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7780 7781 // Now convert to an integer and store. 7782 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7783 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7784 Value); 7785 7786 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7787 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7788 MachinePointerInfo PtrInfo = 7789 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7790 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7791 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7792 7793 SmallVector<SDValue, 2> Ops; 7794 Ops.push_back(StoreChain); 7795 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7796 Ops.push_back(Value); 7797 Ops.push_back(FIdx); 7798 7799 SmallVector<EVT, 2> ValueVTs; 7800 ValueVTs.push_back(MVT::Other); // chain 7801 SDVTList VTs = DAG.getVTList(ValueVTs); 7802 7803 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7804 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7805 7806 // Move data into the byte array. 7807 SmallVector<SDValue, 4> Loads, LoadChains; 7808 for (unsigned i = 0; i < 4; ++i) { 7809 unsigned Offset = 4*i; 7810 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7811 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7812 7813 Loads.push_back(DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7814 PtrInfo.getWithOffset(Offset), 7815 false, false, false, 0)); 7816 LoadChains.push_back(Loads[i].getValue(1)); 7817 } 7818 7819 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7820 7821 SmallVector<SDValue, 4> Stores; 7822 for (unsigned i = 0; i < 4; ++i) { 7823 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7824 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7825 7826 Stores.push_back(DAG.getTruncStore(StoreChain, dl, Loads[i], Idx, 7827 SN->getPointerInfo().getWithOffset(i), 7828 MVT::i8 /* memory type */, 7829 SN->isNonTemporal(), SN->isVolatile(), 7830 1 /* alignment */, SN->getAAInfo())); 7831 } 7832 7833 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7834 7835 return StoreChain; 7836 } 7837 7838 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 7839 SDLoc dl(Op); 7840 if (Op.getValueType() == MVT::v4i32) { 7841 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7842 7843 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 7844 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 7845 7846 SDValue RHSSwap = // = vrlw RHS, 16 7847 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 7848 7849 // Shrinkify inputs to v8i16. 7850 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 7851 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 7852 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 7853 7854 // Low parts multiplied together, generating 32-bit results (we ignore the 7855 // top parts). 7856 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 7857 LHS, RHS, DAG, dl, MVT::v4i32); 7858 7859 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 7860 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 7861 // Shift the high parts up 16 bits. 7862 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 7863 Neg16, DAG, dl); 7864 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 7865 } else if (Op.getValueType() == MVT::v8i16) { 7866 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7867 7868 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 7869 7870 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 7871 LHS, RHS, Zero, DAG, dl); 7872 } else if (Op.getValueType() == MVT::v16i8) { 7873 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7874 bool isLittleEndian = Subtarget.isLittleEndian(); 7875 7876 // Multiply the even 8-bit parts, producing 16-bit sums. 7877 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 7878 LHS, RHS, DAG, dl, MVT::v8i16); 7879 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 7880 7881 // Multiply the odd 8-bit parts, producing 16-bit sums. 7882 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 7883 LHS, RHS, DAG, dl, MVT::v8i16); 7884 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 7885 7886 // Merge the results together. Because vmuleub and vmuloub are 7887 // instructions with a big-endian bias, we must reverse the 7888 // element numbering and reverse the meaning of "odd" and "even" 7889 // when generating little endian code. 7890 int Ops[16]; 7891 for (unsigned i = 0; i != 8; ++i) { 7892 if (isLittleEndian) { 7893 Ops[i*2 ] = 2*i; 7894 Ops[i*2+1] = 2*i+16; 7895 } else { 7896 Ops[i*2 ] = 2*i+1; 7897 Ops[i*2+1] = 2*i+1+16; 7898 } 7899 } 7900 if (isLittleEndian) 7901 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 7902 else 7903 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 7904 } else { 7905 llvm_unreachable("Unknown mul to lower!"); 7906 } 7907 } 7908 7909 /// LowerOperation - Provide custom lowering hooks for some operations. 7910 /// 7911 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7912 switch (Op.getOpcode()) { 7913 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 7914 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 7915 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 7916 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 7917 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 7918 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 7919 case ISD::SETCC: return LowerSETCC(Op, DAG); 7920 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 7921 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 7922 case ISD::VASTART: 7923 return LowerVASTART(Op, DAG, Subtarget); 7924 7925 case ISD::VAARG: 7926 return LowerVAARG(Op, DAG, Subtarget); 7927 7928 case ISD::VACOPY: 7929 return LowerVACOPY(Op, DAG, Subtarget); 7930 7931 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget); 7932 case ISD::DYNAMIC_STACKALLOC: 7933 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget); 7934 7935 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 7936 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 7937 7938 case ISD::LOAD: return LowerLOAD(Op, DAG); 7939 case ISD::STORE: return LowerSTORE(Op, DAG); 7940 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 7941 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 7942 case ISD::FP_TO_UINT: 7943 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 7944 SDLoc(Op)); 7945 case ISD::UINT_TO_FP: 7946 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 7947 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 7948 7949 // Lower 64-bit shifts. 7950 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 7951 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 7952 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 7953 7954 // Vector-related lowering. 7955 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 7956 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 7957 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 7958 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 7959 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 7960 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 7961 case ISD::MUL: return LowerMUL(Op, DAG); 7962 7963 // For counter-based loop handling. 7964 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 7965 7966 // Frame & Return address. 7967 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 7968 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 7969 } 7970 } 7971 7972 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 7973 SmallVectorImpl<SDValue>&Results, 7974 SelectionDAG &DAG) const { 7975 SDLoc dl(N); 7976 switch (N->getOpcode()) { 7977 default: 7978 llvm_unreachable("Do not know how to custom type legalize this operation!"); 7979 case ISD::READCYCLECOUNTER: { 7980 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 7981 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 7982 7983 Results.push_back(RTB); 7984 Results.push_back(RTB.getValue(1)); 7985 Results.push_back(RTB.getValue(2)); 7986 break; 7987 } 7988 case ISD::INTRINSIC_W_CHAIN: { 7989 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 7990 Intrinsic::ppc_is_decremented_ctr_nonzero) 7991 break; 7992 7993 assert(N->getValueType(0) == MVT::i1 && 7994 "Unexpected result type for CTR decrement intrinsic"); 7995 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 7996 N->getValueType(0)); 7997 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 7998 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 7999 N->getOperand(1)); 8000 8001 Results.push_back(NewInt); 8002 Results.push_back(NewInt.getValue(1)); 8003 break; 8004 } 8005 case ISD::VAARG: { 8006 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 8007 return; 8008 8009 EVT VT = N->getValueType(0); 8010 8011 if (VT == MVT::i64) { 8012 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget); 8013 8014 Results.push_back(NewNode); 8015 Results.push_back(NewNode.getValue(1)); 8016 } 8017 return; 8018 } 8019 case ISD::FP_ROUND_INREG: { 8020 assert(N->getValueType(0) == MVT::ppcf128); 8021 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 8022 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8023 MVT::f64, N->getOperand(0), 8024 DAG.getIntPtrConstant(0, dl)); 8025 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8026 MVT::f64, N->getOperand(0), 8027 DAG.getIntPtrConstant(1, dl)); 8028 8029 // Add the two halves of the long double in round-to-zero mode. 8030 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 8031 8032 // We know the low half is about to be thrown away, so just use something 8033 // convenient. 8034 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 8035 FPreg, FPreg)); 8036 return; 8037 } 8038 case ISD::FP_TO_SINT: 8039 case ISD::FP_TO_UINT: 8040 // LowerFP_TO_INT() can only handle f32 and f64. 8041 if (N->getOperand(0).getValueType() == MVT::ppcf128) 8042 return; 8043 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 8044 return; 8045 } 8046 } 8047 8048 8049 //===----------------------------------------------------------------------===// 8050 // Other Lowering Code 8051 //===----------------------------------------------------------------------===// 8052 8053 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 8054 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 8055 Function *Func = Intrinsic::getDeclaration(M, Id); 8056 return Builder.CreateCall(Func, {}); 8057 } 8058 8059 // The mappings for emitLeading/TrailingFence is taken from 8060 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 8061 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 8062 AtomicOrdering Ord, bool IsStore, 8063 bool IsLoad) const { 8064 if (Ord == SequentiallyConsistent) 8065 return callIntrinsic(Builder, Intrinsic::ppc_sync); 8066 if (isAtLeastRelease(Ord)) 8067 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8068 return nullptr; 8069 } 8070 8071 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 8072 AtomicOrdering Ord, bool IsStore, 8073 bool IsLoad) const { 8074 if (IsLoad && isAtLeastAcquire(Ord)) 8075 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8076 // FIXME: this is too conservative, a dependent branch + isync is enough. 8077 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 8078 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 8079 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 8080 return nullptr; 8081 } 8082 8083 MachineBasicBlock * 8084 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 8085 unsigned AtomicSize, 8086 unsigned BinOpcode) const { 8087 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8088 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8089 8090 auto LoadMnemonic = PPC::LDARX; 8091 auto StoreMnemonic = PPC::STDCX; 8092 switch (AtomicSize) { 8093 default: 8094 llvm_unreachable("Unexpected size of atomic entity"); 8095 case 1: 8096 LoadMnemonic = PPC::LBARX; 8097 StoreMnemonic = PPC::STBCX; 8098 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8099 break; 8100 case 2: 8101 LoadMnemonic = PPC::LHARX; 8102 StoreMnemonic = PPC::STHCX; 8103 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8104 break; 8105 case 4: 8106 LoadMnemonic = PPC::LWARX; 8107 StoreMnemonic = PPC::STWCX; 8108 break; 8109 case 8: 8110 LoadMnemonic = PPC::LDARX; 8111 StoreMnemonic = PPC::STDCX; 8112 break; 8113 } 8114 8115 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8116 MachineFunction *F = BB->getParent(); 8117 MachineFunction::iterator It = BB; 8118 ++It; 8119 8120 unsigned dest = MI->getOperand(0).getReg(); 8121 unsigned ptrA = MI->getOperand(1).getReg(); 8122 unsigned ptrB = MI->getOperand(2).getReg(); 8123 unsigned incr = MI->getOperand(3).getReg(); 8124 DebugLoc dl = MI->getDebugLoc(); 8125 8126 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8127 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8128 F->insert(It, loopMBB); 8129 F->insert(It, exitMBB); 8130 exitMBB->splice(exitMBB->begin(), BB, 8131 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8132 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8133 8134 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8135 unsigned TmpReg = (!BinOpcode) ? incr : 8136 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 8137 : &PPC::GPRCRegClass); 8138 8139 // thisMBB: 8140 // ... 8141 // fallthrough --> loopMBB 8142 BB->addSuccessor(loopMBB); 8143 8144 // loopMBB: 8145 // l[wd]arx dest, ptr 8146 // add r0, dest, incr 8147 // st[wd]cx. r0, ptr 8148 // bne- loopMBB 8149 // fallthrough --> exitMBB 8150 BB = loopMBB; 8151 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8152 .addReg(ptrA).addReg(ptrB); 8153 if (BinOpcode) 8154 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 8155 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8156 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 8157 BuildMI(BB, dl, TII->get(PPC::BCC)) 8158 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8159 BB->addSuccessor(loopMBB); 8160 BB->addSuccessor(exitMBB); 8161 8162 // exitMBB: 8163 // ... 8164 BB = exitMBB; 8165 return BB; 8166 } 8167 8168 MachineBasicBlock * 8169 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 8170 MachineBasicBlock *BB, 8171 bool is8bit, // operation 8172 unsigned BinOpcode) const { 8173 // If we support part-word atomic mnemonics, just use them 8174 if (Subtarget.hasPartwordAtomics()) 8175 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode); 8176 8177 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8178 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8179 // In 64 bit mode we have to use 64 bits for addresses, even though the 8180 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 8181 // registers without caring whether they're 32 or 64, but here we're 8182 // doing actual arithmetic on the addresses. 8183 bool is64bit = Subtarget.isPPC64(); 8184 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8185 8186 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8187 MachineFunction *F = BB->getParent(); 8188 MachineFunction::iterator It = BB; 8189 ++It; 8190 8191 unsigned dest = MI->getOperand(0).getReg(); 8192 unsigned ptrA = MI->getOperand(1).getReg(); 8193 unsigned ptrB = MI->getOperand(2).getReg(); 8194 unsigned incr = MI->getOperand(3).getReg(); 8195 DebugLoc dl = MI->getDebugLoc(); 8196 8197 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8198 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8199 F->insert(It, loopMBB); 8200 F->insert(It, exitMBB); 8201 exitMBB->splice(exitMBB->begin(), BB, 8202 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8203 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8204 8205 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8206 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8207 : &PPC::GPRCRegClass; 8208 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8209 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8210 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8211 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 8212 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8213 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8214 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8215 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8216 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 8217 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8218 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8219 unsigned Ptr1Reg; 8220 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 8221 8222 // thisMBB: 8223 // ... 8224 // fallthrough --> loopMBB 8225 BB->addSuccessor(loopMBB); 8226 8227 // The 4-byte load must be aligned, while a char or short may be 8228 // anywhere in the word. Hence all this nasty bookkeeping code. 8229 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8230 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8231 // xori shift, shift1, 24 [16] 8232 // rlwinm ptr, ptr1, 0, 0, 29 8233 // slw incr2, incr, shift 8234 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8235 // slw mask, mask2, shift 8236 // loopMBB: 8237 // lwarx tmpDest, ptr 8238 // add tmp, tmpDest, incr2 8239 // andc tmp2, tmpDest, mask 8240 // and tmp3, tmp, mask 8241 // or tmp4, tmp3, tmp2 8242 // stwcx. tmp4, ptr 8243 // bne- loopMBB 8244 // fallthrough --> exitMBB 8245 // srw dest, tmpDest, shift 8246 if (ptrA != ZeroReg) { 8247 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8248 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8249 .addReg(ptrA).addReg(ptrB); 8250 } else { 8251 Ptr1Reg = ptrB; 8252 } 8253 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8254 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8255 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8256 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8257 if (is64bit) 8258 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8259 .addReg(Ptr1Reg).addImm(0).addImm(61); 8260 else 8261 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8262 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8263 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 8264 .addReg(incr).addReg(ShiftReg); 8265 if (is8bit) 8266 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8267 else { 8268 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8269 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 8270 } 8271 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8272 .addReg(Mask2Reg).addReg(ShiftReg); 8273 8274 BB = loopMBB; 8275 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8276 .addReg(ZeroReg).addReg(PtrReg); 8277 if (BinOpcode) 8278 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 8279 .addReg(Incr2Reg).addReg(TmpDestReg); 8280 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 8281 .addReg(TmpDestReg).addReg(MaskReg); 8282 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 8283 .addReg(TmpReg).addReg(MaskReg); 8284 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 8285 .addReg(Tmp3Reg).addReg(Tmp2Reg); 8286 BuildMI(BB, dl, TII->get(PPC::STWCX)) 8287 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 8288 BuildMI(BB, dl, TII->get(PPC::BCC)) 8289 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8290 BB->addSuccessor(loopMBB); 8291 BB->addSuccessor(exitMBB); 8292 8293 // exitMBB: 8294 // ... 8295 BB = exitMBB; 8296 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 8297 .addReg(ShiftReg); 8298 return BB; 8299 } 8300 8301 llvm::MachineBasicBlock* 8302 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 8303 MachineBasicBlock *MBB) const { 8304 DebugLoc DL = MI->getDebugLoc(); 8305 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8306 8307 MachineFunction *MF = MBB->getParent(); 8308 MachineRegisterInfo &MRI = MF->getRegInfo(); 8309 8310 const BasicBlock *BB = MBB->getBasicBlock(); 8311 MachineFunction::iterator I = MBB; 8312 ++I; 8313 8314 // Memory Reference 8315 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8316 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8317 8318 unsigned DstReg = MI->getOperand(0).getReg(); 8319 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 8320 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 8321 unsigned mainDstReg = MRI.createVirtualRegister(RC); 8322 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 8323 8324 MVT PVT = getPointerTy(MF->getDataLayout()); 8325 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8326 "Invalid Pointer Size!"); 8327 // For v = setjmp(buf), we generate 8328 // 8329 // thisMBB: 8330 // SjLjSetup mainMBB 8331 // bl mainMBB 8332 // v_restore = 1 8333 // b sinkMBB 8334 // 8335 // mainMBB: 8336 // buf[LabelOffset] = LR 8337 // v_main = 0 8338 // 8339 // sinkMBB: 8340 // v = phi(main, restore) 8341 // 8342 8343 MachineBasicBlock *thisMBB = MBB; 8344 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 8345 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 8346 MF->insert(I, mainMBB); 8347 MF->insert(I, sinkMBB); 8348 8349 MachineInstrBuilder MIB; 8350 8351 // Transfer the remainder of BB and its successor edges to sinkMBB. 8352 sinkMBB->splice(sinkMBB->begin(), MBB, 8353 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8354 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 8355 8356 // Note that the structure of the jmp_buf used here is not compatible 8357 // with that used by libc, and is not designed to be. Specifically, it 8358 // stores only those 'reserved' registers that LLVM does not otherwise 8359 // understand how to spill. Also, by convention, by the time this 8360 // intrinsic is called, Clang has already stored the frame address in the 8361 // first slot of the buffer and stack address in the third. Following the 8362 // X86 target code, we'll store the jump address in the second slot. We also 8363 // need to save the TOC pointer (R2) to handle jumps between shared 8364 // libraries, and that will be stored in the fourth slot. The thread 8365 // identifier (R13) is not affected. 8366 8367 // thisMBB: 8368 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8369 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8370 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8371 8372 // Prepare IP either in reg. 8373 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 8374 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 8375 unsigned BufReg = MI->getOperand(1).getReg(); 8376 8377 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 8378 setUsesTOCBasePtr(*MBB->getParent()); 8379 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 8380 .addReg(PPC::X2) 8381 .addImm(TOCOffset) 8382 .addReg(BufReg); 8383 MIB.setMemRefs(MMOBegin, MMOEnd); 8384 } 8385 8386 // Naked functions never have a base pointer, and so we use r1. For all 8387 // other functions, this decision must be delayed until during PEI. 8388 unsigned BaseReg; 8389 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 8390 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 8391 else 8392 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 8393 8394 MIB = BuildMI(*thisMBB, MI, DL, 8395 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 8396 .addReg(BaseReg) 8397 .addImm(BPOffset) 8398 .addReg(BufReg); 8399 MIB.setMemRefs(MMOBegin, MMOEnd); 8400 8401 // Setup 8402 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 8403 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 8404 MIB.addRegMask(TRI->getNoPreservedMask()); 8405 8406 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 8407 8408 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 8409 .addMBB(mainMBB); 8410 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 8411 8412 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 8413 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 8414 8415 // mainMBB: 8416 // mainDstReg = 0 8417 MIB = 8418 BuildMI(mainMBB, DL, 8419 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 8420 8421 // Store IP 8422 if (Subtarget.isPPC64()) { 8423 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 8424 .addReg(LabelReg) 8425 .addImm(LabelOffset) 8426 .addReg(BufReg); 8427 } else { 8428 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 8429 .addReg(LabelReg) 8430 .addImm(LabelOffset) 8431 .addReg(BufReg); 8432 } 8433 8434 MIB.setMemRefs(MMOBegin, MMOEnd); 8435 8436 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 8437 mainMBB->addSuccessor(sinkMBB); 8438 8439 // sinkMBB: 8440 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 8441 TII->get(PPC::PHI), DstReg) 8442 .addReg(mainDstReg).addMBB(mainMBB) 8443 .addReg(restoreDstReg).addMBB(thisMBB); 8444 8445 MI->eraseFromParent(); 8446 return sinkMBB; 8447 } 8448 8449 MachineBasicBlock * 8450 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 8451 MachineBasicBlock *MBB) const { 8452 DebugLoc DL = MI->getDebugLoc(); 8453 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8454 8455 MachineFunction *MF = MBB->getParent(); 8456 MachineRegisterInfo &MRI = MF->getRegInfo(); 8457 8458 // Memory Reference 8459 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8460 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8461 8462 MVT PVT = getPointerTy(MF->getDataLayout()); 8463 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8464 "Invalid Pointer Size!"); 8465 8466 const TargetRegisterClass *RC = 8467 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 8468 unsigned Tmp = MRI.createVirtualRegister(RC); 8469 // Since FP is only updated here but NOT referenced, it's treated as GPR. 8470 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 8471 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 8472 unsigned BP = 8473 (PVT == MVT::i64) 8474 ? PPC::X30 8475 : (Subtarget.isSVR4ABI() && 8476 MF->getTarget().getRelocationModel() == Reloc::PIC_ 8477 ? PPC::R29 8478 : PPC::R30); 8479 8480 MachineInstrBuilder MIB; 8481 8482 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8483 const int64_t SPOffset = 2 * PVT.getStoreSize(); 8484 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8485 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8486 8487 unsigned BufReg = MI->getOperand(0).getReg(); 8488 8489 // Reload FP (the jumped-to function may not have had a 8490 // frame pointer, and if so, then its r31 will be restored 8491 // as necessary). 8492 if (PVT == MVT::i64) { 8493 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 8494 .addImm(0) 8495 .addReg(BufReg); 8496 } else { 8497 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 8498 .addImm(0) 8499 .addReg(BufReg); 8500 } 8501 MIB.setMemRefs(MMOBegin, MMOEnd); 8502 8503 // Reload IP 8504 if (PVT == MVT::i64) { 8505 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 8506 .addImm(LabelOffset) 8507 .addReg(BufReg); 8508 } else { 8509 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 8510 .addImm(LabelOffset) 8511 .addReg(BufReg); 8512 } 8513 MIB.setMemRefs(MMOBegin, MMOEnd); 8514 8515 // Reload SP 8516 if (PVT == MVT::i64) { 8517 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 8518 .addImm(SPOffset) 8519 .addReg(BufReg); 8520 } else { 8521 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 8522 .addImm(SPOffset) 8523 .addReg(BufReg); 8524 } 8525 MIB.setMemRefs(MMOBegin, MMOEnd); 8526 8527 // Reload BP 8528 if (PVT == MVT::i64) { 8529 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 8530 .addImm(BPOffset) 8531 .addReg(BufReg); 8532 } else { 8533 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 8534 .addImm(BPOffset) 8535 .addReg(BufReg); 8536 } 8537 MIB.setMemRefs(MMOBegin, MMOEnd); 8538 8539 // Reload TOC 8540 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 8541 setUsesTOCBasePtr(*MBB->getParent()); 8542 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 8543 .addImm(TOCOffset) 8544 .addReg(BufReg); 8545 8546 MIB.setMemRefs(MMOBegin, MMOEnd); 8547 } 8548 8549 // Jump 8550 BuildMI(*MBB, MI, DL, 8551 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 8552 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 8553 8554 MI->eraseFromParent(); 8555 return MBB; 8556 } 8557 8558 MachineBasicBlock * 8559 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 8560 MachineBasicBlock *BB) const { 8561 if (MI->getOpcode() == TargetOpcode::STACKMAP || 8562 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8563 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 8564 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8565 // Call lowering should have added an r2 operand to indicate a dependence 8566 // on the TOC base pointer value. It can't however, because there is no 8567 // way to mark the dependence as implicit there, and so the stackmap code 8568 // will confuse it with a regular operand. Instead, add the dependence 8569 // here. 8570 setUsesTOCBasePtr(*BB->getParent()); 8571 MI->addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 8572 } 8573 8574 return emitPatchPoint(MI, BB); 8575 } 8576 8577 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 8578 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 8579 return emitEHSjLjSetJmp(MI, BB); 8580 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 8581 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 8582 return emitEHSjLjLongJmp(MI, BB); 8583 } 8584 8585 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8586 8587 // To "insert" these instructions we actually have to insert their 8588 // control-flow patterns. 8589 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8590 MachineFunction::iterator It = BB; 8591 ++It; 8592 8593 MachineFunction *F = BB->getParent(); 8594 8595 if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 8596 MI->getOpcode() == PPC::SELECT_CC_I8 || 8597 MI->getOpcode() == PPC::SELECT_I4 || 8598 MI->getOpcode() == PPC::SELECT_I8)) { 8599 SmallVector<MachineOperand, 2> Cond; 8600 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8601 MI->getOpcode() == PPC::SELECT_CC_I8) 8602 Cond.push_back(MI->getOperand(4)); 8603 else 8604 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 8605 Cond.push_back(MI->getOperand(1)); 8606 8607 DebugLoc dl = MI->getDebugLoc(); 8608 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 8609 Cond, MI->getOperand(2).getReg(), 8610 MI->getOperand(3).getReg()); 8611 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8612 MI->getOpcode() == PPC::SELECT_CC_I8 || 8613 MI->getOpcode() == PPC::SELECT_CC_F4 || 8614 MI->getOpcode() == PPC::SELECT_CC_F8 || 8615 MI->getOpcode() == PPC::SELECT_CC_QFRC || 8616 MI->getOpcode() == PPC::SELECT_CC_QSRC || 8617 MI->getOpcode() == PPC::SELECT_CC_QBRC || 8618 MI->getOpcode() == PPC::SELECT_CC_VRRC || 8619 MI->getOpcode() == PPC::SELECT_CC_VSFRC || 8620 MI->getOpcode() == PPC::SELECT_CC_VSSRC || 8621 MI->getOpcode() == PPC::SELECT_CC_VSRC || 8622 MI->getOpcode() == PPC::SELECT_I4 || 8623 MI->getOpcode() == PPC::SELECT_I8 || 8624 MI->getOpcode() == PPC::SELECT_F4 || 8625 MI->getOpcode() == PPC::SELECT_F8 || 8626 MI->getOpcode() == PPC::SELECT_QFRC || 8627 MI->getOpcode() == PPC::SELECT_QSRC || 8628 MI->getOpcode() == PPC::SELECT_QBRC || 8629 MI->getOpcode() == PPC::SELECT_VRRC || 8630 MI->getOpcode() == PPC::SELECT_VSFRC || 8631 MI->getOpcode() == PPC::SELECT_VSSRC || 8632 MI->getOpcode() == PPC::SELECT_VSRC) { 8633 // The incoming instruction knows the destination vreg to set, the 8634 // condition code register to branch on, the true/false values to 8635 // select between, and a branch opcode to use. 8636 8637 // thisMBB: 8638 // ... 8639 // TrueVal = ... 8640 // cmpTY ccX, r1, r2 8641 // bCC copy1MBB 8642 // fallthrough --> copy0MBB 8643 MachineBasicBlock *thisMBB = BB; 8644 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8645 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8646 DebugLoc dl = MI->getDebugLoc(); 8647 F->insert(It, copy0MBB); 8648 F->insert(It, sinkMBB); 8649 8650 // Transfer the remainder of BB and its successor edges to sinkMBB. 8651 sinkMBB->splice(sinkMBB->begin(), BB, 8652 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8653 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8654 8655 // Next, add the true and fallthrough blocks as its successors. 8656 BB->addSuccessor(copy0MBB); 8657 BB->addSuccessor(sinkMBB); 8658 8659 if (MI->getOpcode() == PPC::SELECT_I4 || 8660 MI->getOpcode() == PPC::SELECT_I8 || 8661 MI->getOpcode() == PPC::SELECT_F4 || 8662 MI->getOpcode() == PPC::SELECT_F8 || 8663 MI->getOpcode() == PPC::SELECT_QFRC || 8664 MI->getOpcode() == PPC::SELECT_QSRC || 8665 MI->getOpcode() == PPC::SELECT_QBRC || 8666 MI->getOpcode() == PPC::SELECT_VRRC || 8667 MI->getOpcode() == PPC::SELECT_VSFRC || 8668 MI->getOpcode() == PPC::SELECT_VSSRC || 8669 MI->getOpcode() == PPC::SELECT_VSRC) { 8670 BuildMI(BB, dl, TII->get(PPC::BC)) 8671 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8672 } else { 8673 unsigned SelectPred = MI->getOperand(4).getImm(); 8674 BuildMI(BB, dl, TII->get(PPC::BCC)) 8675 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8676 } 8677 8678 // copy0MBB: 8679 // %FalseValue = ... 8680 // # fallthrough to sinkMBB 8681 BB = copy0MBB; 8682 8683 // Update machine-CFG edges 8684 BB->addSuccessor(sinkMBB); 8685 8686 // sinkMBB: 8687 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8688 // ... 8689 BB = sinkMBB; 8690 BuildMI(*BB, BB->begin(), dl, 8691 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 8692 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 8693 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 8694 } else if (MI->getOpcode() == PPC::ReadTB) { 8695 // To read the 64-bit time-base register on a 32-bit target, we read the 8696 // two halves. Should the counter have wrapped while it was being read, we 8697 // need to try again. 8698 // ... 8699 // readLoop: 8700 // mfspr Rx,TBU # load from TBU 8701 // mfspr Ry,TB # load from TB 8702 // mfspr Rz,TBU # load from TBU 8703 // cmpw crX,Rx,Rz # check if ‘old’=’new’ 8704 // bne readLoop # branch if they're not equal 8705 // ... 8706 8707 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 8708 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8709 DebugLoc dl = MI->getDebugLoc(); 8710 F->insert(It, readMBB); 8711 F->insert(It, sinkMBB); 8712 8713 // Transfer the remainder of BB and its successor edges to sinkMBB. 8714 sinkMBB->splice(sinkMBB->begin(), BB, 8715 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8716 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8717 8718 BB->addSuccessor(readMBB); 8719 BB = readMBB; 8720 8721 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8722 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 8723 unsigned LoReg = MI->getOperand(0).getReg(); 8724 unsigned HiReg = MI->getOperand(1).getReg(); 8725 8726 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 8727 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 8728 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 8729 8730 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 8731 8732 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 8733 .addReg(HiReg).addReg(ReadAgainReg); 8734 BuildMI(BB, dl, TII->get(PPC::BCC)) 8735 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 8736 8737 BB->addSuccessor(readMBB); 8738 BB->addSuccessor(sinkMBB); 8739 } 8740 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 8741 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 8742 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 8743 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 8744 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 8745 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 8746 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 8747 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 8748 8749 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 8750 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 8751 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 8752 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 8753 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 8754 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 8755 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 8756 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 8757 8758 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 8759 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 8760 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 8761 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 8762 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 8763 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 8764 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 8765 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 8766 8767 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 8768 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 8769 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 8770 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 8771 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 8772 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 8773 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 8774 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 8775 8776 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 8777 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 8778 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 8779 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 8780 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 8781 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 8782 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 8783 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 8784 8785 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 8786 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 8787 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 8788 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 8789 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 8790 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 8791 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 8792 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 8793 8794 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 8795 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 8796 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 8797 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 8798 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 8799 BB = EmitAtomicBinary(MI, BB, 4, 0); 8800 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 8801 BB = EmitAtomicBinary(MI, BB, 8, 0); 8802 8803 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 8804 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 8805 (Subtarget.hasPartwordAtomics() && 8806 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 8807 (Subtarget.hasPartwordAtomics() && 8808 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 8809 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 8810 8811 auto LoadMnemonic = PPC::LDARX; 8812 auto StoreMnemonic = PPC::STDCX; 8813 switch(MI->getOpcode()) { 8814 default: 8815 llvm_unreachable("Compare and swap of unknown size"); 8816 case PPC::ATOMIC_CMP_SWAP_I8: 8817 LoadMnemonic = PPC::LBARX; 8818 StoreMnemonic = PPC::STBCX; 8819 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 8820 break; 8821 case PPC::ATOMIC_CMP_SWAP_I16: 8822 LoadMnemonic = PPC::LHARX; 8823 StoreMnemonic = PPC::STHCX; 8824 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 8825 break; 8826 case PPC::ATOMIC_CMP_SWAP_I32: 8827 LoadMnemonic = PPC::LWARX; 8828 StoreMnemonic = PPC::STWCX; 8829 break; 8830 case PPC::ATOMIC_CMP_SWAP_I64: 8831 LoadMnemonic = PPC::LDARX; 8832 StoreMnemonic = PPC::STDCX; 8833 break; 8834 } 8835 unsigned dest = MI->getOperand(0).getReg(); 8836 unsigned ptrA = MI->getOperand(1).getReg(); 8837 unsigned ptrB = MI->getOperand(2).getReg(); 8838 unsigned oldval = MI->getOperand(3).getReg(); 8839 unsigned newval = MI->getOperand(4).getReg(); 8840 DebugLoc dl = MI->getDebugLoc(); 8841 8842 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 8843 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 8844 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 8845 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8846 F->insert(It, loop1MBB); 8847 F->insert(It, loop2MBB); 8848 F->insert(It, midMBB); 8849 F->insert(It, exitMBB); 8850 exitMBB->splice(exitMBB->begin(), BB, 8851 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8852 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8853 8854 // thisMBB: 8855 // ... 8856 // fallthrough --> loopMBB 8857 BB->addSuccessor(loop1MBB); 8858 8859 // loop1MBB: 8860 // l[bhwd]arx dest, ptr 8861 // cmp[wd] dest, oldval 8862 // bne- midMBB 8863 // loop2MBB: 8864 // st[bhwd]cx. newval, ptr 8865 // bne- loopMBB 8866 // b exitBB 8867 // midMBB: 8868 // st[bhwd]cx. dest, ptr 8869 // exitBB: 8870 BB = loop1MBB; 8871 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8872 .addReg(ptrA).addReg(ptrB); 8873 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 8874 .addReg(oldval).addReg(dest); 8875 BuildMI(BB, dl, TII->get(PPC::BCC)) 8876 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 8877 BB->addSuccessor(loop2MBB); 8878 BB->addSuccessor(midMBB); 8879 8880 BB = loop2MBB; 8881 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8882 .addReg(newval).addReg(ptrA).addReg(ptrB); 8883 BuildMI(BB, dl, TII->get(PPC::BCC)) 8884 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 8885 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 8886 BB->addSuccessor(loop1MBB); 8887 BB->addSuccessor(exitMBB); 8888 8889 BB = midMBB; 8890 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8891 .addReg(dest).addReg(ptrA).addReg(ptrB); 8892 BB->addSuccessor(exitMBB); 8893 8894 // exitMBB: 8895 // ... 8896 BB = exitMBB; 8897 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 8898 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 8899 // We must use 64-bit registers for addresses when targeting 64-bit, 8900 // since we're actually doing arithmetic on them. Other registers 8901 // can be 32-bit. 8902 bool is64bit = Subtarget.isPPC64(); 8903 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 8904 8905 unsigned dest = MI->getOperand(0).getReg(); 8906 unsigned ptrA = MI->getOperand(1).getReg(); 8907 unsigned ptrB = MI->getOperand(2).getReg(); 8908 unsigned oldval = MI->getOperand(3).getReg(); 8909 unsigned newval = MI->getOperand(4).getReg(); 8910 DebugLoc dl = MI->getDebugLoc(); 8911 8912 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 8913 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 8914 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 8915 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8916 F->insert(It, loop1MBB); 8917 F->insert(It, loop2MBB); 8918 F->insert(It, midMBB); 8919 F->insert(It, exitMBB); 8920 exitMBB->splice(exitMBB->begin(), BB, 8921 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8922 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8923 8924 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8925 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8926 : &PPC::GPRCRegClass; 8927 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8928 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8929 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8930 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 8931 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 8932 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 8933 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 8934 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8935 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8936 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8937 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8938 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8939 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8940 unsigned Ptr1Reg; 8941 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 8942 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8943 // thisMBB: 8944 // ... 8945 // fallthrough --> loopMBB 8946 BB->addSuccessor(loop1MBB); 8947 8948 // The 4-byte load must be aligned, while a char or short may be 8949 // anywhere in the word. Hence all this nasty bookkeeping code. 8950 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8951 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8952 // xori shift, shift1, 24 [16] 8953 // rlwinm ptr, ptr1, 0, 0, 29 8954 // slw newval2, newval, shift 8955 // slw oldval2, oldval,shift 8956 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8957 // slw mask, mask2, shift 8958 // and newval3, newval2, mask 8959 // and oldval3, oldval2, mask 8960 // loop1MBB: 8961 // lwarx tmpDest, ptr 8962 // and tmp, tmpDest, mask 8963 // cmpw tmp, oldval3 8964 // bne- midMBB 8965 // loop2MBB: 8966 // andc tmp2, tmpDest, mask 8967 // or tmp4, tmp2, newval3 8968 // stwcx. tmp4, ptr 8969 // bne- loop1MBB 8970 // b exitBB 8971 // midMBB: 8972 // stwcx. tmpDest, ptr 8973 // exitBB: 8974 // srw dest, tmpDest, shift 8975 if (ptrA != ZeroReg) { 8976 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8977 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8978 .addReg(ptrA).addReg(ptrB); 8979 } else { 8980 Ptr1Reg = ptrB; 8981 } 8982 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8983 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8984 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8985 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8986 if (is64bit) 8987 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8988 .addReg(Ptr1Reg).addImm(0).addImm(61); 8989 else 8990 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8991 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8992 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 8993 .addReg(newval).addReg(ShiftReg); 8994 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 8995 .addReg(oldval).addReg(ShiftReg); 8996 if (is8bit) 8997 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8998 else { 8999 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9000 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 9001 .addReg(Mask3Reg).addImm(65535); 9002 } 9003 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9004 .addReg(Mask2Reg).addReg(ShiftReg); 9005 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 9006 .addReg(NewVal2Reg).addReg(MaskReg); 9007 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 9008 .addReg(OldVal2Reg).addReg(MaskReg); 9009 9010 BB = loop1MBB; 9011 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9012 .addReg(ZeroReg).addReg(PtrReg); 9013 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 9014 .addReg(TmpDestReg).addReg(MaskReg); 9015 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 9016 .addReg(TmpReg).addReg(OldVal3Reg); 9017 BuildMI(BB, dl, TII->get(PPC::BCC)) 9018 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9019 BB->addSuccessor(loop2MBB); 9020 BB->addSuccessor(midMBB); 9021 9022 BB = loop2MBB; 9023 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 9024 .addReg(TmpDestReg).addReg(MaskReg); 9025 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 9026 .addReg(Tmp2Reg).addReg(NewVal3Reg); 9027 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 9028 .addReg(ZeroReg).addReg(PtrReg); 9029 BuildMI(BB, dl, TII->get(PPC::BCC)) 9030 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9031 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9032 BB->addSuccessor(loop1MBB); 9033 BB->addSuccessor(exitMBB); 9034 9035 BB = midMBB; 9036 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 9037 .addReg(ZeroReg).addReg(PtrReg); 9038 BB->addSuccessor(exitMBB); 9039 9040 // exitMBB: 9041 // ... 9042 BB = exitMBB; 9043 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 9044 .addReg(ShiftReg); 9045 } else if (MI->getOpcode() == PPC::FADDrtz) { 9046 // This pseudo performs an FADD with rounding mode temporarily forced 9047 // to round-to-zero. We emit this via custom inserter since the FPSCR 9048 // is not modeled at the SelectionDAG level. 9049 unsigned Dest = MI->getOperand(0).getReg(); 9050 unsigned Src1 = MI->getOperand(1).getReg(); 9051 unsigned Src2 = MI->getOperand(2).getReg(); 9052 DebugLoc dl = MI->getDebugLoc(); 9053 9054 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9055 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 9056 9057 // Save FPSCR value. 9058 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 9059 9060 // Set rounding mode to round-to-zero. 9061 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 9062 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 9063 9064 // Perform addition. 9065 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 9066 9067 // Restore FPSCR value. 9068 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 9069 } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9070 MI->getOpcode() == PPC::ANDIo_1_GT_BIT || 9071 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9072 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) { 9073 unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9074 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ? 9075 PPC::ANDIo8 : PPC::ANDIo; 9076 bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9077 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8); 9078 9079 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9080 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 9081 &PPC::GPRCRegClass : 9082 &PPC::G8RCRegClass); 9083 9084 DebugLoc dl = MI->getDebugLoc(); 9085 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 9086 .addReg(MI->getOperand(1).getReg()).addImm(1); 9087 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 9088 MI->getOperand(0).getReg()) 9089 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 9090 } else if (MI->getOpcode() == PPC::TCHECK_RET) { 9091 DebugLoc Dl = MI->getDebugLoc(); 9092 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9093 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9094 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 9095 return BB; 9096 } else { 9097 llvm_unreachable("Unexpected instr type to insert"); 9098 } 9099 9100 MI->eraseFromParent(); // The pseudo instruction is gone now. 9101 return BB; 9102 } 9103 9104 //===----------------------------------------------------------------------===// 9105 // Target Optimization Hooks 9106 //===----------------------------------------------------------------------===// 9107 9108 static std::string getRecipOp(const char *Base, EVT VT) { 9109 std::string RecipOp(Base); 9110 if (VT.getScalarType() == MVT::f64) 9111 RecipOp += "d"; 9112 else 9113 RecipOp += "f"; 9114 9115 if (VT.isVector()) 9116 RecipOp = "vec-" + RecipOp; 9117 9118 return RecipOp; 9119 } 9120 9121 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand, 9122 DAGCombinerInfo &DCI, 9123 unsigned &RefinementSteps, 9124 bool &UseOneConstNR) const { 9125 EVT VT = Operand.getValueType(); 9126 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 9127 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 9128 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9129 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9130 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9131 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9132 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9133 std::string RecipOp = getRecipOp("sqrt", VT); 9134 if (!Recips.isEnabled(RecipOp)) 9135 return SDValue(); 9136 9137 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9138 UseOneConstNR = true; 9139 return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 9140 } 9141 return SDValue(); 9142 } 9143 9144 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, 9145 DAGCombinerInfo &DCI, 9146 unsigned &RefinementSteps) const { 9147 EVT VT = Operand.getValueType(); 9148 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 9149 (VT == MVT::f64 && Subtarget.hasFRE()) || 9150 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9151 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9152 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9153 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9154 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9155 std::string RecipOp = getRecipOp("div", VT); 9156 if (!Recips.isEnabled(RecipOp)) 9157 return SDValue(); 9158 9159 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9160 return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 9161 } 9162 return SDValue(); 9163 } 9164 9165 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 9166 // Note: This functionality is used only when unsafe-fp-math is enabled, and 9167 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 9168 // enabled for division), this functionality is redundant with the default 9169 // combiner logic (once the division -> reciprocal/multiply transformation 9170 // has taken place). As a result, this matters more for older cores than for 9171 // newer ones. 9172 9173 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9174 // reciprocal if there are two or more FDIVs (for embedded cores with only 9175 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 9176 switch (Subtarget.getDarwinDirective()) { 9177 default: 9178 return 3; 9179 case PPC::DIR_440: 9180 case PPC::DIR_A2: 9181 case PPC::DIR_E500mc: 9182 case PPC::DIR_E5500: 9183 return 2; 9184 } 9185 } 9186 9187 // isConsecutiveLSLoc needs to work even if all adds have not yet been 9188 // collapsed, and so we need to look through chains of them. 9189 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 9190 int64_t& Offset, SelectionDAG &DAG) { 9191 if (DAG.isBaseWithConstantOffset(Loc)) { 9192 Base = Loc.getOperand(0); 9193 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 9194 9195 // The base might itself be a base plus an offset, and if so, accumulate 9196 // that as well. 9197 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 9198 } 9199 } 9200 9201 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 9202 unsigned Bytes, int Dist, 9203 SelectionDAG &DAG) { 9204 if (VT.getSizeInBits() / 8 != Bytes) 9205 return false; 9206 9207 SDValue BaseLoc = Base->getBasePtr(); 9208 if (Loc.getOpcode() == ISD::FrameIndex) { 9209 if (BaseLoc.getOpcode() != ISD::FrameIndex) 9210 return false; 9211 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9212 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 9213 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 9214 int FS = MFI->getObjectSize(FI); 9215 int BFS = MFI->getObjectSize(BFI); 9216 if (FS != BFS || FS != (int)Bytes) return false; 9217 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 9218 } 9219 9220 SDValue Base1 = Loc, Base2 = BaseLoc; 9221 int64_t Offset1 = 0, Offset2 = 0; 9222 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 9223 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 9224 if (Base1 == Base2 && Offset1 == (Offset2 + Dist*Bytes)) 9225 return true; 9226 9227 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9228 const GlobalValue *GV1 = nullptr; 9229 const GlobalValue *GV2 = nullptr; 9230 Offset1 = 0; 9231 Offset2 = 0; 9232 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 9233 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 9234 if (isGA1 && isGA2 && GV1 == GV2) 9235 return Offset1 == (Offset2 + Dist*Bytes); 9236 return false; 9237 } 9238 9239 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 9240 // not enforce equality of the chain operands. 9241 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 9242 unsigned Bytes, int Dist, 9243 SelectionDAG &DAG) { 9244 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 9245 EVT VT = LS->getMemoryVT(); 9246 SDValue Loc = LS->getBasePtr(); 9247 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 9248 } 9249 9250 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 9251 EVT VT; 9252 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9253 default: return false; 9254 case Intrinsic::ppc_qpx_qvlfd: 9255 case Intrinsic::ppc_qpx_qvlfda: 9256 VT = MVT::v4f64; 9257 break; 9258 case Intrinsic::ppc_qpx_qvlfs: 9259 case Intrinsic::ppc_qpx_qvlfsa: 9260 VT = MVT::v4f32; 9261 break; 9262 case Intrinsic::ppc_qpx_qvlfcd: 9263 case Intrinsic::ppc_qpx_qvlfcda: 9264 VT = MVT::v2f64; 9265 break; 9266 case Intrinsic::ppc_qpx_qvlfcs: 9267 case Intrinsic::ppc_qpx_qvlfcsa: 9268 VT = MVT::v2f32; 9269 break; 9270 case Intrinsic::ppc_qpx_qvlfiwa: 9271 case Intrinsic::ppc_qpx_qvlfiwz: 9272 case Intrinsic::ppc_altivec_lvx: 9273 case Intrinsic::ppc_altivec_lvxl: 9274 case Intrinsic::ppc_vsx_lxvw4x: 9275 VT = MVT::v4i32; 9276 break; 9277 case Intrinsic::ppc_vsx_lxvd2x: 9278 VT = MVT::v2f64; 9279 break; 9280 case Intrinsic::ppc_altivec_lvebx: 9281 VT = MVT::i8; 9282 break; 9283 case Intrinsic::ppc_altivec_lvehx: 9284 VT = MVT::i16; 9285 break; 9286 case Intrinsic::ppc_altivec_lvewx: 9287 VT = MVT::i32; 9288 break; 9289 } 9290 9291 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 9292 } 9293 9294 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 9295 EVT VT; 9296 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9297 default: return false; 9298 case Intrinsic::ppc_qpx_qvstfd: 9299 case Intrinsic::ppc_qpx_qvstfda: 9300 VT = MVT::v4f64; 9301 break; 9302 case Intrinsic::ppc_qpx_qvstfs: 9303 case Intrinsic::ppc_qpx_qvstfsa: 9304 VT = MVT::v4f32; 9305 break; 9306 case Intrinsic::ppc_qpx_qvstfcd: 9307 case Intrinsic::ppc_qpx_qvstfcda: 9308 VT = MVT::v2f64; 9309 break; 9310 case Intrinsic::ppc_qpx_qvstfcs: 9311 case Intrinsic::ppc_qpx_qvstfcsa: 9312 VT = MVT::v2f32; 9313 break; 9314 case Intrinsic::ppc_qpx_qvstfiw: 9315 case Intrinsic::ppc_qpx_qvstfiwa: 9316 case Intrinsic::ppc_altivec_stvx: 9317 case Intrinsic::ppc_altivec_stvxl: 9318 case Intrinsic::ppc_vsx_stxvw4x: 9319 VT = MVT::v4i32; 9320 break; 9321 case Intrinsic::ppc_vsx_stxvd2x: 9322 VT = MVT::v2f64; 9323 break; 9324 case Intrinsic::ppc_altivec_stvebx: 9325 VT = MVT::i8; 9326 break; 9327 case Intrinsic::ppc_altivec_stvehx: 9328 VT = MVT::i16; 9329 break; 9330 case Intrinsic::ppc_altivec_stvewx: 9331 VT = MVT::i32; 9332 break; 9333 } 9334 9335 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 9336 } 9337 9338 return false; 9339 } 9340 9341 // Return true is there is a nearyby consecutive load to the one provided 9342 // (regardless of alignment). We search up and down the chain, looking though 9343 // token factors and other loads (but nothing else). As a result, a true result 9344 // indicates that it is safe to create a new consecutive load adjacent to the 9345 // load provided. 9346 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 9347 SDValue Chain = LD->getChain(); 9348 EVT VT = LD->getMemoryVT(); 9349 9350 SmallSet<SDNode *, 16> LoadRoots; 9351 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 9352 SmallSet<SDNode *, 16> Visited; 9353 9354 // First, search up the chain, branching to follow all token-factor operands. 9355 // If we find a consecutive load, then we're done, otherwise, record all 9356 // nodes just above the top-level loads and token factors. 9357 while (!Queue.empty()) { 9358 SDNode *ChainNext = Queue.pop_back_val(); 9359 if (!Visited.insert(ChainNext).second) 9360 continue; 9361 9362 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 9363 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9364 return true; 9365 9366 if (!Visited.count(ChainLD->getChain().getNode())) 9367 Queue.push_back(ChainLD->getChain().getNode()); 9368 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 9369 for (const SDUse &O : ChainNext->ops()) 9370 if (!Visited.count(O.getNode())) 9371 Queue.push_back(O.getNode()); 9372 } else 9373 LoadRoots.insert(ChainNext); 9374 } 9375 9376 // Second, search down the chain, starting from the top-level nodes recorded 9377 // in the first phase. These top-level nodes are the nodes just above all 9378 // loads and token factors. Starting with their uses, recursively look though 9379 // all loads (just the chain uses) and token factors to find a consecutive 9380 // load. 9381 Visited.clear(); 9382 Queue.clear(); 9383 9384 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 9385 IE = LoadRoots.end(); I != IE; ++I) { 9386 Queue.push_back(*I); 9387 9388 while (!Queue.empty()) { 9389 SDNode *LoadRoot = Queue.pop_back_val(); 9390 if (!Visited.insert(LoadRoot).second) 9391 continue; 9392 9393 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 9394 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9395 return true; 9396 9397 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 9398 UE = LoadRoot->use_end(); UI != UE; ++UI) 9399 if (((isa<MemSDNode>(*UI) && 9400 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 9401 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 9402 Queue.push_back(*UI); 9403 } 9404 } 9405 9406 return false; 9407 } 9408 9409 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 9410 DAGCombinerInfo &DCI) const { 9411 SelectionDAG &DAG = DCI.DAG; 9412 SDLoc dl(N); 9413 9414 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 9415 // If we're tracking CR bits, we need to be careful that we don't have: 9416 // trunc(binary-ops(zext(x), zext(y))) 9417 // or 9418 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 9419 // such that we're unnecessarily moving things into GPRs when it would be 9420 // better to keep them in CR bits. 9421 9422 // Note that trunc here can be an actual i1 trunc, or can be the effective 9423 // truncation that comes from a setcc or select_cc. 9424 if (N->getOpcode() == ISD::TRUNCATE && 9425 N->getValueType(0) != MVT::i1) 9426 return SDValue(); 9427 9428 if (N->getOperand(0).getValueType() != MVT::i32 && 9429 N->getOperand(0).getValueType() != MVT::i64) 9430 return SDValue(); 9431 9432 if (N->getOpcode() == ISD::SETCC || 9433 N->getOpcode() == ISD::SELECT_CC) { 9434 // If we're looking at a comparison, then we need to make sure that the 9435 // high bits (all except for the first) don't matter the result. 9436 ISD::CondCode CC = 9437 cast<CondCodeSDNode>(N->getOperand( 9438 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 9439 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 9440 9441 if (ISD::isSignedIntSetCC(CC)) { 9442 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 9443 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 9444 return SDValue(); 9445 } else if (ISD::isUnsignedIntSetCC(CC)) { 9446 if (!DAG.MaskedValueIsZero(N->getOperand(0), 9447 APInt::getHighBitsSet(OpBits, OpBits-1)) || 9448 !DAG.MaskedValueIsZero(N->getOperand(1), 9449 APInt::getHighBitsSet(OpBits, OpBits-1))) 9450 return SDValue(); 9451 } else { 9452 // This is neither a signed nor an unsigned comparison, just make sure 9453 // that the high bits are equal. 9454 APInt Op1Zero, Op1One; 9455 APInt Op2Zero, Op2One; 9456 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 9457 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 9458 9459 // We don't really care about what is known about the first bit (if 9460 // anything), so clear it in all masks prior to comparing them. 9461 Op1Zero.clearBit(0); Op1One.clearBit(0); 9462 Op2Zero.clearBit(0); Op2One.clearBit(0); 9463 9464 if (Op1Zero != Op2Zero || Op1One != Op2One) 9465 return SDValue(); 9466 } 9467 } 9468 9469 // We now know that the higher-order bits are irrelevant, we just need to 9470 // make sure that all of the intermediate operations are bit operations, and 9471 // all inputs are extensions. 9472 if (N->getOperand(0).getOpcode() != ISD::AND && 9473 N->getOperand(0).getOpcode() != ISD::OR && 9474 N->getOperand(0).getOpcode() != ISD::XOR && 9475 N->getOperand(0).getOpcode() != ISD::SELECT && 9476 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 9477 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 9478 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 9479 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 9480 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 9481 return SDValue(); 9482 9483 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 9484 N->getOperand(1).getOpcode() != ISD::AND && 9485 N->getOperand(1).getOpcode() != ISD::OR && 9486 N->getOperand(1).getOpcode() != ISD::XOR && 9487 N->getOperand(1).getOpcode() != ISD::SELECT && 9488 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 9489 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 9490 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 9491 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 9492 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 9493 return SDValue(); 9494 9495 SmallVector<SDValue, 4> Inputs; 9496 SmallVector<SDValue, 8> BinOps, PromOps; 9497 SmallPtrSet<SDNode *, 16> Visited; 9498 9499 for (unsigned i = 0; i < 2; ++i) { 9500 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9501 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9502 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9503 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9504 isa<ConstantSDNode>(N->getOperand(i))) 9505 Inputs.push_back(N->getOperand(i)); 9506 else 9507 BinOps.push_back(N->getOperand(i)); 9508 9509 if (N->getOpcode() == ISD::TRUNCATE) 9510 break; 9511 } 9512 9513 // Visit all inputs, collect all binary operations (and, or, xor and 9514 // select) that are all fed by extensions. 9515 while (!BinOps.empty()) { 9516 SDValue BinOp = BinOps.back(); 9517 BinOps.pop_back(); 9518 9519 if (!Visited.insert(BinOp.getNode()).second) 9520 continue; 9521 9522 PromOps.push_back(BinOp); 9523 9524 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9525 // The condition of the select is not promoted. 9526 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9527 continue; 9528 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9529 continue; 9530 9531 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9532 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9533 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9534 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9535 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9536 Inputs.push_back(BinOp.getOperand(i)); 9537 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9538 BinOp.getOperand(i).getOpcode() == ISD::OR || 9539 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9540 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9541 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 9542 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9543 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9544 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9545 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 9546 BinOps.push_back(BinOp.getOperand(i)); 9547 } else { 9548 // We have an input that is not an extension or another binary 9549 // operation; we'll abort this transformation. 9550 return SDValue(); 9551 } 9552 } 9553 } 9554 9555 // Make sure that this is a self-contained cluster of operations (which 9556 // is not quite the same thing as saying that everything has only one 9557 // use). 9558 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9559 if (isa<ConstantSDNode>(Inputs[i])) 9560 continue; 9561 9562 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9563 UE = Inputs[i].getNode()->use_end(); 9564 UI != UE; ++UI) { 9565 SDNode *User = *UI; 9566 if (User != N && !Visited.count(User)) 9567 return SDValue(); 9568 9569 // Make sure that we're not going to promote the non-output-value 9570 // operand(s) or SELECT or SELECT_CC. 9571 // FIXME: Although we could sometimes handle this, and it does occur in 9572 // practice that one of the condition inputs to the select is also one of 9573 // the outputs, we currently can't deal with this. 9574 if (User->getOpcode() == ISD::SELECT) { 9575 if (User->getOperand(0) == Inputs[i]) 9576 return SDValue(); 9577 } else if (User->getOpcode() == ISD::SELECT_CC) { 9578 if (User->getOperand(0) == Inputs[i] || 9579 User->getOperand(1) == Inputs[i]) 9580 return SDValue(); 9581 } 9582 } 9583 } 9584 9585 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9586 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9587 UE = PromOps[i].getNode()->use_end(); 9588 UI != UE; ++UI) { 9589 SDNode *User = *UI; 9590 if (User != N && !Visited.count(User)) 9591 return SDValue(); 9592 9593 // Make sure that we're not going to promote the non-output-value 9594 // operand(s) or SELECT or SELECT_CC. 9595 // FIXME: Although we could sometimes handle this, and it does occur in 9596 // practice that one of the condition inputs to the select is also one of 9597 // the outputs, we currently can't deal with this. 9598 if (User->getOpcode() == ISD::SELECT) { 9599 if (User->getOperand(0) == PromOps[i]) 9600 return SDValue(); 9601 } else if (User->getOpcode() == ISD::SELECT_CC) { 9602 if (User->getOperand(0) == PromOps[i] || 9603 User->getOperand(1) == PromOps[i]) 9604 return SDValue(); 9605 } 9606 } 9607 } 9608 9609 // Replace all inputs with the extension operand. 9610 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9611 // Constants may have users outside the cluster of to-be-promoted nodes, 9612 // and so we need to replace those as we do the promotions. 9613 if (isa<ConstantSDNode>(Inputs[i])) 9614 continue; 9615 else 9616 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 9617 } 9618 9619 // Replace all operations (these are all the same, but have a different 9620 // (i1) return type). DAG.getNode will validate that the types of 9621 // a binary operator match, so go through the list in reverse so that 9622 // we've likely promoted both operands first. Any intermediate truncations or 9623 // extensions disappear. 9624 while (!PromOps.empty()) { 9625 SDValue PromOp = PromOps.back(); 9626 PromOps.pop_back(); 9627 9628 if (PromOp.getOpcode() == ISD::TRUNCATE || 9629 PromOp.getOpcode() == ISD::SIGN_EXTEND || 9630 PromOp.getOpcode() == ISD::ZERO_EXTEND || 9631 PromOp.getOpcode() == ISD::ANY_EXTEND) { 9632 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 9633 PromOp.getOperand(0).getValueType() != MVT::i1) { 9634 // The operand is not yet ready (see comment below). 9635 PromOps.insert(PromOps.begin(), PromOp); 9636 continue; 9637 } 9638 9639 SDValue RepValue = PromOp.getOperand(0); 9640 if (isa<ConstantSDNode>(RepValue)) 9641 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 9642 9643 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 9644 continue; 9645 } 9646 9647 unsigned C; 9648 switch (PromOp.getOpcode()) { 9649 default: C = 0; break; 9650 case ISD::SELECT: C = 1; break; 9651 case ISD::SELECT_CC: C = 2; break; 9652 } 9653 9654 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9655 PromOp.getOperand(C).getValueType() != MVT::i1) || 9656 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9657 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 9658 // The to-be-promoted operands of this node have not yet been 9659 // promoted (this should be rare because we're going through the 9660 // list backward, but if one of the operands has several users in 9661 // this cluster of to-be-promoted nodes, it is possible). 9662 PromOps.insert(PromOps.begin(), PromOp); 9663 continue; 9664 } 9665 9666 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9667 PromOp.getNode()->op_end()); 9668 9669 // If there are any constant inputs, make sure they're replaced now. 9670 for (unsigned i = 0; i < 2; ++i) 9671 if (isa<ConstantSDNode>(Ops[C+i])) 9672 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 9673 9674 DAG.ReplaceAllUsesOfValueWith(PromOp, 9675 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 9676 } 9677 9678 // Now we're left with the initial truncation itself. 9679 if (N->getOpcode() == ISD::TRUNCATE) 9680 return N->getOperand(0); 9681 9682 // Otherwise, this is a comparison. The operands to be compared have just 9683 // changed type (to i1), but everything else is the same. 9684 return SDValue(N, 0); 9685 } 9686 9687 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 9688 DAGCombinerInfo &DCI) const { 9689 SelectionDAG &DAG = DCI.DAG; 9690 SDLoc dl(N); 9691 9692 // If we're tracking CR bits, we need to be careful that we don't have: 9693 // zext(binary-ops(trunc(x), trunc(y))) 9694 // or 9695 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 9696 // such that we're unnecessarily moving things into CR bits that can more 9697 // efficiently stay in GPRs. Note that if we're not certain that the high 9698 // bits are set as required by the final extension, we still may need to do 9699 // some masking to get the proper behavior. 9700 9701 // This same functionality is important on PPC64 when dealing with 9702 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 9703 // the return values of functions. Because it is so similar, it is handled 9704 // here as well. 9705 9706 if (N->getValueType(0) != MVT::i32 && 9707 N->getValueType(0) != MVT::i64) 9708 return SDValue(); 9709 9710 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 9711 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 9712 return SDValue(); 9713 9714 if (N->getOperand(0).getOpcode() != ISD::AND && 9715 N->getOperand(0).getOpcode() != ISD::OR && 9716 N->getOperand(0).getOpcode() != ISD::XOR && 9717 N->getOperand(0).getOpcode() != ISD::SELECT && 9718 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 9719 return SDValue(); 9720 9721 SmallVector<SDValue, 4> Inputs; 9722 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 9723 SmallPtrSet<SDNode *, 16> Visited; 9724 9725 // Visit all inputs, collect all binary operations (and, or, xor and 9726 // select) that are all fed by truncations. 9727 while (!BinOps.empty()) { 9728 SDValue BinOp = BinOps.back(); 9729 BinOps.pop_back(); 9730 9731 if (!Visited.insert(BinOp.getNode()).second) 9732 continue; 9733 9734 PromOps.push_back(BinOp); 9735 9736 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9737 // The condition of the select is not promoted. 9738 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9739 continue; 9740 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9741 continue; 9742 9743 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9744 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9745 Inputs.push_back(BinOp.getOperand(i)); 9746 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9747 BinOp.getOperand(i).getOpcode() == ISD::OR || 9748 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9749 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9750 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 9751 BinOps.push_back(BinOp.getOperand(i)); 9752 } else { 9753 // We have an input that is not a truncation or another binary 9754 // operation; we'll abort this transformation. 9755 return SDValue(); 9756 } 9757 } 9758 } 9759 9760 // The operands of a select that must be truncated when the select is 9761 // promoted because the operand is actually part of the to-be-promoted set. 9762 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 9763 9764 // Make sure that this is a self-contained cluster of operations (which 9765 // is not quite the same thing as saying that everything has only one 9766 // use). 9767 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9768 if (isa<ConstantSDNode>(Inputs[i])) 9769 continue; 9770 9771 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9772 UE = Inputs[i].getNode()->use_end(); 9773 UI != UE; ++UI) { 9774 SDNode *User = *UI; 9775 if (User != N && !Visited.count(User)) 9776 return SDValue(); 9777 9778 // If we're going to promote the non-output-value operand(s) or SELECT or 9779 // SELECT_CC, record them for truncation. 9780 if (User->getOpcode() == ISD::SELECT) { 9781 if (User->getOperand(0) == Inputs[i]) 9782 SelectTruncOp[0].insert(std::make_pair(User, 9783 User->getOperand(0).getValueType())); 9784 } else if (User->getOpcode() == ISD::SELECT_CC) { 9785 if (User->getOperand(0) == Inputs[i]) 9786 SelectTruncOp[0].insert(std::make_pair(User, 9787 User->getOperand(0).getValueType())); 9788 if (User->getOperand(1) == Inputs[i]) 9789 SelectTruncOp[1].insert(std::make_pair(User, 9790 User->getOperand(1).getValueType())); 9791 } 9792 } 9793 } 9794 9795 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9796 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9797 UE = PromOps[i].getNode()->use_end(); 9798 UI != UE; ++UI) { 9799 SDNode *User = *UI; 9800 if (User != N && !Visited.count(User)) 9801 return SDValue(); 9802 9803 // If we're going to promote the non-output-value operand(s) or SELECT or 9804 // SELECT_CC, record them for truncation. 9805 if (User->getOpcode() == ISD::SELECT) { 9806 if (User->getOperand(0) == PromOps[i]) 9807 SelectTruncOp[0].insert(std::make_pair(User, 9808 User->getOperand(0).getValueType())); 9809 } else if (User->getOpcode() == ISD::SELECT_CC) { 9810 if (User->getOperand(0) == PromOps[i]) 9811 SelectTruncOp[0].insert(std::make_pair(User, 9812 User->getOperand(0).getValueType())); 9813 if (User->getOperand(1) == PromOps[i]) 9814 SelectTruncOp[1].insert(std::make_pair(User, 9815 User->getOperand(1).getValueType())); 9816 } 9817 } 9818 } 9819 9820 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 9821 bool ReallyNeedsExt = false; 9822 if (N->getOpcode() != ISD::ANY_EXTEND) { 9823 // If all of the inputs are not already sign/zero extended, then 9824 // we'll still need to do that at the end. 9825 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9826 if (isa<ConstantSDNode>(Inputs[i])) 9827 continue; 9828 9829 unsigned OpBits = 9830 Inputs[i].getOperand(0).getValueSizeInBits(); 9831 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 9832 9833 if ((N->getOpcode() == ISD::ZERO_EXTEND && 9834 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 9835 APInt::getHighBitsSet(OpBits, 9836 OpBits-PromBits))) || 9837 (N->getOpcode() == ISD::SIGN_EXTEND && 9838 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 9839 (OpBits-(PromBits-1)))) { 9840 ReallyNeedsExt = true; 9841 break; 9842 } 9843 } 9844 } 9845 9846 // Replace all inputs, either with the truncation operand, or a 9847 // truncation or extension to the final output type. 9848 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9849 // Constant inputs need to be replaced with the to-be-promoted nodes that 9850 // use them because they might have users outside of the cluster of 9851 // promoted nodes. 9852 if (isa<ConstantSDNode>(Inputs[i])) 9853 continue; 9854 9855 SDValue InSrc = Inputs[i].getOperand(0); 9856 if (Inputs[i].getValueType() == N->getValueType(0)) 9857 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 9858 else if (N->getOpcode() == ISD::SIGN_EXTEND) 9859 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9860 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 9861 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9862 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9863 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 9864 else 9865 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9866 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 9867 } 9868 9869 // Replace all operations (these are all the same, but have a different 9870 // (promoted) return type). DAG.getNode will validate that the types of 9871 // a binary operator match, so go through the list in reverse so that 9872 // we've likely promoted both operands first. 9873 while (!PromOps.empty()) { 9874 SDValue PromOp = PromOps.back(); 9875 PromOps.pop_back(); 9876 9877 unsigned C; 9878 switch (PromOp.getOpcode()) { 9879 default: C = 0; break; 9880 case ISD::SELECT: C = 1; break; 9881 case ISD::SELECT_CC: C = 2; break; 9882 } 9883 9884 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9885 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 9886 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9887 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 9888 // The to-be-promoted operands of this node have not yet been 9889 // promoted (this should be rare because we're going through the 9890 // list backward, but if one of the operands has several users in 9891 // this cluster of to-be-promoted nodes, it is possible). 9892 PromOps.insert(PromOps.begin(), PromOp); 9893 continue; 9894 } 9895 9896 // For SELECT and SELECT_CC nodes, we do a similar check for any 9897 // to-be-promoted comparison inputs. 9898 if (PromOp.getOpcode() == ISD::SELECT || 9899 PromOp.getOpcode() == ISD::SELECT_CC) { 9900 if ((SelectTruncOp[0].count(PromOp.getNode()) && 9901 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 9902 (SelectTruncOp[1].count(PromOp.getNode()) && 9903 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 9904 PromOps.insert(PromOps.begin(), PromOp); 9905 continue; 9906 } 9907 } 9908 9909 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9910 PromOp.getNode()->op_end()); 9911 9912 // If this node has constant inputs, then they'll need to be promoted here. 9913 for (unsigned i = 0; i < 2; ++i) { 9914 if (!isa<ConstantSDNode>(Ops[C+i])) 9915 continue; 9916 if (Ops[C+i].getValueType() == N->getValueType(0)) 9917 continue; 9918 9919 if (N->getOpcode() == ISD::SIGN_EXTEND) 9920 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9921 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9922 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9923 else 9924 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9925 } 9926 9927 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 9928 // truncate them again to the original value type. 9929 if (PromOp.getOpcode() == ISD::SELECT || 9930 PromOp.getOpcode() == ISD::SELECT_CC) { 9931 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 9932 if (SI0 != SelectTruncOp[0].end()) 9933 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 9934 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 9935 if (SI1 != SelectTruncOp[1].end()) 9936 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 9937 } 9938 9939 DAG.ReplaceAllUsesOfValueWith(PromOp, 9940 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 9941 } 9942 9943 // Now we're left with the initial extension itself. 9944 if (!ReallyNeedsExt) 9945 return N->getOperand(0); 9946 9947 // To zero extend, just mask off everything except for the first bit (in the 9948 // i1 case). 9949 if (N->getOpcode() == ISD::ZERO_EXTEND) 9950 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 9951 DAG.getConstant(APInt::getLowBitsSet( 9952 N->getValueSizeInBits(0), PromBits), 9953 dl, N->getValueType(0))); 9954 9955 assert(N->getOpcode() == ISD::SIGN_EXTEND && 9956 "Invalid extension type"); 9957 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 9958 SDValue ShiftCst = 9959 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 9960 return DAG.getNode(ISD::SRA, dl, N->getValueType(0), 9961 DAG.getNode(ISD::SHL, dl, N->getValueType(0), 9962 N->getOperand(0), ShiftCst), ShiftCst); 9963 } 9964 9965 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 9966 DAGCombinerInfo &DCI) const { 9967 assert((N->getOpcode() == ISD::SINT_TO_FP || 9968 N->getOpcode() == ISD::UINT_TO_FP) && 9969 "Need an int -> FP conversion node here"); 9970 9971 if (!Subtarget.has64BitSupport()) 9972 return SDValue(); 9973 9974 SelectionDAG &DAG = DCI.DAG; 9975 SDLoc dl(N); 9976 SDValue Op(N, 0); 9977 9978 // Don't handle ppc_fp128 here or i1 conversions. 9979 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 9980 return SDValue(); 9981 if (Op.getOperand(0).getValueType() == MVT::i1) 9982 return SDValue(); 9983 9984 // For i32 intermediate values, unfortunately, the conversion functions 9985 // leave the upper 32 bits of the value are undefined. Within the set of 9986 // scalar instructions, we have no method for zero- or sign-extending the 9987 // value. Thus, we cannot handle i32 intermediate values here. 9988 if (Op.getOperand(0).getValueType() == MVT::i32) 9989 return SDValue(); 9990 9991 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 9992 "UINT_TO_FP is supported only with FPCVT"); 9993 9994 // If we have FCFIDS, then use it when converting to single-precision. 9995 // Otherwise, convert to double-precision and then round. 9996 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 9997 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 9998 : PPCISD::FCFIDS) 9999 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 10000 : PPCISD::FCFID); 10001 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 10002 ? MVT::f32 10003 : MVT::f64; 10004 10005 // If we're converting from a float, to an int, and back to a float again, 10006 // then we don't need the store/load pair at all. 10007 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 10008 Subtarget.hasFPCVT()) || 10009 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 10010 SDValue Src = Op.getOperand(0).getOperand(0); 10011 if (Src.getValueType() == MVT::f32) { 10012 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 10013 DCI.AddToWorklist(Src.getNode()); 10014 } else if (Src.getValueType() != MVT::f64) { 10015 // Make sure that we don't pick up a ppc_fp128 source value. 10016 return SDValue(); 10017 } 10018 10019 unsigned FCTOp = 10020 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 10021 PPCISD::FCTIDUZ; 10022 10023 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 10024 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 10025 10026 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 10027 FP = DAG.getNode(ISD::FP_ROUND, dl, 10028 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 10029 DCI.AddToWorklist(FP.getNode()); 10030 } 10031 10032 return FP; 10033 } 10034 10035 return SDValue(); 10036 } 10037 10038 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 10039 // builtins) into loads with swaps. 10040 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 10041 DAGCombinerInfo &DCI) const { 10042 SelectionDAG &DAG = DCI.DAG; 10043 SDLoc dl(N); 10044 SDValue Chain; 10045 SDValue Base; 10046 MachineMemOperand *MMO; 10047 10048 switch (N->getOpcode()) { 10049 default: 10050 llvm_unreachable("Unexpected opcode for little endian VSX load"); 10051 case ISD::LOAD: { 10052 LoadSDNode *LD = cast<LoadSDNode>(N); 10053 Chain = LD->getChain(); 10054 Base = LD->getBasePtr(); 10055 MMO = LD->getMemOperand(); 10056 // If the MMO suggests this isn't a load of a full vector, leave 10057 // things alone. For a built-in, we have to make the change for 10058 // correctness, so if there is a size problem that will be a bug. 10059 if (MMO->getSize() < 16) 10060 return SDValue(); 10061 break; 10062 } 10063 case ISD::INTRINSIC_W_CHAIN: { 10064 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10065 Chain = Intrin->getChain(); 10066 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 10067 // us what we want. Get operand 2 instead. 10068 Base = Intrin->getOperand(2); 10069 MMO = Intrin->getMemOperand(); 10070 break; 10071 } 10072 } 10073 10074 MVT VecTy = N->getValueType(0).getSimpleVT(); 10075 SDValue LoadOps[] = { Chain, Base }; 10076 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 10077 DAG.getVTList(VecTy, MVT::Other), 10078 LoadOps, VecTy, MMO); 10079 DCI.AddToWorklist(Load.getNode()); 10080 Chain = Load.getValue(1); 10081 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10082 DAG.getVTList(VecTy, MVT::Other), Chain, Load); 10083 DCI.AddToWorklist(Swap.getNode()); 10084 return Swap; 10085 } 10086 10087 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 10088 // builtins) into stores with swaps. 10089 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 10090 DAGCombinerInfo &DCI) const { 10091 SelectionDAG &DAG = DCI.DAG; 10092 SDLoc dl(N); 10093 SDValue Chain; 10094 SDValue Base; 10095 unsigned SrcOpnd; 10096 MachineMemOperand *MMO; 10097 10098 switch (N->getOpcode()) { 10099 default: 10100 llvm_unreachable("Unexpected opcode for little endian VSX store"); 10101 case ISD::STORE: { 10102 StoreSDNode *ST = cast<StoreSDNode>(N); 10103 Chain = ST->getChain(); 10104 Base = ST->getBasePtr(); 10105 MMO = ST->getMemOperand(); 10106 SrcOpnd = 1; 10107 // If the MMO suggests this isn't a store of a full vector, leave 10108 // things alone. For a built-in, we have to make the change for 10109 // correctness, so if there is a size problem that will be a bug. 10110 if (MMO->getSize() < 16) 10111 return SDValue(); 10112 break; 10113 } 10114 case ISD::INTRINSIC_VOID: { 10115 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10116 Chain = Intrin->getChain(); 10117 // Intrin->getBasePtr() oddly does not get what we want. 10118 Base = Intrin->getOperand(3); 10119 MMO = Intrin->getMemOperand(); 10120 SrcOpnd = 2; 10121 break; 10122 } 10123 } 10124 10125 SDValue Src = N->getOperand(SrcOpnd); 10126 MVT VecTy = Src.getValueType().getSimpleVT(); 10127 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10128 DAG.getVTList(VecTy, MVT::Other), Chain, Src); 10129 DCI.AddToWorklist(Swap.getNode()); 10130 Chain = Swap.getValue(1); 10131 SDValue StoreOps[] = { Chain, Swap, Base }; 10132 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 10133 DAG.getVTList(MVT::Other), 10134 StoreOps, VecTy, MMO); 10135 DCI.AddToWorklist(Store.getNode()); 10136 return Store; 10137 } 10138 10139 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 10140 DAGCombinerInfo &DCI) const { 10141 SelectionDAG &DAG = DCI.DAG; 10142 SDLoc dl(N); 10143 switch (N->getOpcode()) { 10144 default: break; 10145 case PPCISD::SHL: 10146 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10147 if (C->isNullValue()) // 0 << V -> 0. 10148 return N->getOperand(0); 10149 } 10150 break; 10151 case PPCISD::SRL: 10152 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10153 if (C->isNullValue()) // 0 >>u V -> 0. 10154 return N->getOperand(0); 10155 } 10156 break; 10157 case PPCISD::SRA: 10158 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10159 if (C->isNullValue() || // 0 >>s V -> 0. 10160 C->isAllOnesValue()) // -1 >>s V -> -1. 10161 return N->getOperand(0); 10162 } 10163 break; 10164 case ISD::SIGN_EXTEND: 10165 case ISD::ZERO_EXTEND: 10166 case ISD::ANY_EXTEND: 10167 return DAGCombineExtBoolTrunc(N, DCI); 10168 case ISD::TRUNCATE: 10169 case ISD::SETCC: 10170 case ISD::SELECT_CC: 10171 return DAGCombineTruncBoolExt(N, DCI); 10172 case ISD::SINT_TO_FP: 10173 case ISD::UINT_TO_FP: 10174 return combineFPToIntToFP(N, DCI); 10175 case ISD::STORE: { 10176 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 10177 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 10178 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 10179 N->getOperand(1).getValueType() == MVT::i32 && 10180 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 10181 SDValue Val = N->getOperand(1).getOperand(0); 10182 if (Val.getValueType() == MVT::f32) { 10183 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 10184 DCI.AddToWorklist(Val.getNode()); 10185 } 10186 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 10187 DCI.AddToWorklist(Val.getNode()); 10188 10189 SDValue Ops[] = { 10190 N->getOperand(0), Val, N->getOperand(2), 10191 DAG.getValueType(N->getOperand(1).getValueType()) 10192 }; 10193 10194 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 10195 DAG.getVTList(MVT::Other), Ops, 10196 cast<StoreSDNode>(N)->getMemoryVT(), 10197 cast<StoreSDNode>(N)->getMemOperand()); 10198 DCI.AddToWorklist(Val.getNode()); 10199 return Val; 10200 } 10201 10202 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 10203 if (cast<StoreSDNode>(N)->isUnindexed() && 10204 N->getOperand(1).getOpcode() == ISD::BSWAP && 10205 N->getOperand(1).getNode()->hasOneUse() && 10206 (N->getOperand(1).getValueType() == MVT::i32 || 10207 N->getOperand(1).getValueType() == MVT::i16 || 10208 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10209 N->getOperand(1).getValueType() == MVT::i64))) { 10210 SDValue BSwapOp = N->getOperand(1).getOperand(0); 10211 // Do an any-extend to 32-bits if this is a half-word input. 10212 if (BSwapOp.getValueType() == MVT::i16) 10213 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 10214 10215 SDValue Ops[] = { 10216 N->getOperand(0), BSwapOp, N->getOperand(2), 10217 DAG.getValueType(N->getOperand(1).getValueType()) 10218 }; 10219 return 10220 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 10221 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 10222 cast<StoreSDNode>(N)->getMemOperand()); 10223 } 10224 10225 // For little endian, VSX stores require generating xxswapd/lxvd2x. 10226 EVT VT = N->getOperand(1).getValueType(); 10227 if (VT.isSimple()) { 10228 MVT StoreVT = VT.getSimpleVT(); 10229 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10230 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 10231 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 10232 return expandVSXStoreForLE(N, DCI); 10233 } 10234 break; 10235 } 10236 case ISD::LOAD: { 10237 LoadSDNode *LD = cast<LoadSDNode>(N); 10238 EVT VT = LD->getValueType(0); 10239 10240 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10241 if (VT.isSimple()) { 10242 MVT LoadVT = VT.getSimpleVT(); 10243 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10244 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 10245 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 10246 return expandVSXLoadForLE(N, DCI); 10247 } 10248 10249 EVT MemVT = LD->getMemoryVT(); 10250 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 10251 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 10252 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 10253 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 10254 if (LD->isUnindexed() && VT.isVector() && 10255 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 10256 // P8 and later hardware should just use LOAD. 10257 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 10258 VT == MVT::v4i32 || VT == MVT::v4f32)) || 10259 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 10260 LD->getAlignment() >= ScalarABIAlignment)) && 10261 LD->getAlignment() < ABIAlignment) { 10262 // This is a type-legal unaligned Altivec or QPX load. 10263 SDValue Chain = LD->getChain(); 10264 SDValue Ptr = LD->getBasePtr(); 10265 bool isLittleEndian = Subtarget.isLittleEndian(); 10266 10267 // This implements the loading of unaligned vectors as described in 10268 // the venerable Apple Velocity Engine overview. Specifically: 10269 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 10270 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 10271 // 10272 // The general idea is to expand a sequence of one or more unaligned 10273 // loads into an alignment-based permutation-control instruction (lvsl 10274 // or lvsr), a series of regular vector loads (which always truncate 10275 // their input address to an aligned address), and a series of 10276 // permutations. The results of these permutations are the requested 10277 // loaded values. The trick is that the last "extra" load is not taken 10278 // from the address you might suspect (sizeof(vector) bytes after the 10279 // last requested load), but rather sizeof(vector) - 1 bytes after the 10280 // last requested vector. The point of this is to avoid a page fault if 10281 // the base address happened to be aligned. This works because if the 10282 // base address is aligned, then adding less than a full vector length 10283 // will cause the last vector in the sequence to be (re)loaded. 10284 // Otherwise, the next vector will be fetched as you might suspect was 10285 // necessary. 10286 10287 // We might be able to reuse the permutation generation from 10288 // a different base address offset from this one by an aligned amount. 10289 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 10290 // optimization later. 10291 Intrinsic::ID Intr, IntrLD, IntrPerm; 10292 MVT PermCntlTy, PermTy, LDTy; 10293 if (Subtarget.hasAltivec()) { 10294 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 10295 Intrinsic::ppc_altivec_lvsl; 10296 IntrLD = Intrinsic::ppc_altivec_lvx; 10297 IntrPerm = Intrinsic::ppc_altivec_vperm; 10298 PermCntlTy = MVT::v16i8; 10299 PermTy = MVT::v4i32; 10300 LDTy = MVT::v4i32; 10301 } else { 10302 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 10303 Intrinsic::ppc_qpx_qvlpcls; 10304 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 10305 Intrinsic::ppc_qpx_qvlfs; 10306 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 10307 PermCntlTy = MVT::v4f64; 10308 PermTy = MVT::v4f64; 10309 LDTy = MemVT.getSimpleVT(); 10310 } 10311 10312 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 10313 10314 // Create the new MMO for the new base load. It is like the original MMO, 10315 // but represents an area in memory almost twice the vector size centered 10316 // on the original address. If the address is unaligned, we might start 10317 // reading up to (sizeof(vector)-1) bytes below the address of the 10318 // original unaligned load. 10319 MachineFunction &MF = DAG.getMachineFunction(); 10320 MachineMemOperand *BaseMMO = 10321 MF.getMachineMemOperand(LD->getMemOperand(), 10322 -(long)MemVT.getStoreSize()+1, 10323 2*MemVT.getStoreSize()-1); 10324 10325 // Create the new base load. 10326 SDValue LDXIntID = 10327 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 10328 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 10329 SDValue BaseLoad = 10330 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10331 DAG.getVTList(PermTy, MVT::Other), 10332 BaseLoadOps, LDTy, BaseMMO); 10333 10334 // Note that the value of IncOffset (which is provided to the next 10335 // load's pointer info offset value, and thus used to calculate the 10336 // alignment), and the value of IncValue (which is actually used to 10337 // increment the pointer value) are different! This is because we 10338 // require the next load to appear to be aligned, even though it 10339 // is actually offset from the base pointer by a lesser amount. 10340 int IncOffset = VT.getSizeInBits() / 8; 10341 int IncValue = IncOffset; 10342 10343 // Walk (both up and down) the chain looking for another load at the real 10344 // (aligned) offset (the alignment of the other load does not matter in 10345 // this case). If found, then do not use the offset reduction trick, as 10346 // that will prevent the loads from being later combined (as they would 10347 // otherwise be duplicates). 10348 if (!findConsecutiveLoad(LD, DAG)) 10349 --IncValue; 10350 10351 SDValue Increment = 10352 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 10353 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 10354 10355 MachineMemOperand *ExtraMMO = 10356 MF.getMachineMemOperand(LD->getMemOperand(), 10357 1, 2*MemVT.getStoreSize()-1); 10358 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 10359 SDValue ExtraLoad = 10360 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10361 DAG.getVTList(PermTy, MVT::Other), 10362 ExtraLoadOps, LDTy, ExtraMMO); 10363 10364 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 10365 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 10366 10367 // Because vperm has a big-endian bias, we must reverse the order 10368 // of the input vectors and complement the permute control vector 10369 // when generating little endian code. We have already handled the 10370 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 10371 // and ExtraLoad here. 10372 SDValue Perm; 10373 if (isLittleEndian) 10374 Perm = BuildIntrinsicOp(IntrPerm, 10375 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 10376 else 10377 Perm = BuildIntrinsicOp(IntrPerm, 10378 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 10379 10380 if (VT != PermTy) 10381 Perm = Subtarget.hasAltivec() ? 10382 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 10383 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 10384 DAG.getTargetConstant(1, dl, MVT::i64)); 10385 // second argument is 1 because this rounding 10386 // is always exact. 10387 10388 // The output of the permutation is our loaded result, the TokenFactor is 10389 // our new chain. 10390 DCI.CombineTo(N, Perm, TF); 10391 return SDValue(N, 0); 10392 } 10393 } 10394 break; 10395 case ISD::INTRINSIC_WO_CHAIN: { 10396 bool isLittleEndian = Subtarget.isLittleEndian(); 10397 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10398 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 10399 : Intrinsic::ppc_altivec_lvsl); 10400 if ((IID == Intr || 10401 IID == Intrinsic::ppc_qpx_qvlpcld || 10402 IID == Intrinsic::ppc_qpx_qvlpcls) && 10403 N->getOperand(1)->getOpcode() == ISD::ADD) { 10404 SDValue Add = N->getOperand(1); 10405 10406 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 10407 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 10408 10409 if (DAG.MaskedValueIsZero( 10410 Add->getOperand(1), 10411 APInt::getAllOnesValue(Bits /* alignment */) 10412 .zext( 10413 Add.getValueType().getScalarType().getSizeInBits()))) { 10414 SDNode *BasePtr = Add->getOperand(0).getNode(); 10415 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10416 UE = BasePtr->use_end(); 10417 UI != UE; ++UI) { 10418 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10419 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 10420 // We've found another LVSL/LVSR, and this address is an aligned 10421 // multiple of that one. The results will be the same, so use the 10422 // one we've just found instead. 10423 10424 return SDValue(*UI, 0); 10425 } 10426 } 10427 } 10428 10429 if (isa<ConstantSDNode>(Add->getOperand(1))) { 10430 SDNode *BasePtr = Add->getOperand(0).getNode(); 10431 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10432 UE = BasePtr->use_end(); UI != UE; ++UI) { 10433 if (UI->getOpcode() == ISD::ADD && 10434 isa<ConstantSDNode>(UI->getOperand(1)) && 10435 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 10436 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 10437 (1ULL << Bits) == 0) { 10438 SDNode *OtherAdd = *UI; 10439 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 10440 VE = OtherAdd->use_end(); VI != VE; ++VI) { 10441 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10442 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 10443 return SDValue(*VI, 0); 10444 } 10445 } 10446 } 10447 } 10448 } 10449 } 10450 } 10451 10452 break; 10453 case ISD::INTRINSIC_W_CHAIN: { 10454 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10455 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10456 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10457 default: 10458 break; 10459 case Intrinsic::ppc_vsx_lxvw4x: 10460 case Intrinsic::ppc_vsx_lxvd2x: 10461 return expandVSXLoadForLE(N, DCI); 10462 } 10463 } 10464 break; 10465 } 10466 case ISD::INTRINSIC_VOID: { 10467 // For little endian, VSX stores require generating xxswapd/stxvd2x. 10468 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10469 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10470 default: 10471 break; 10472 case Intrinsic::ppc_vsx_stxvw4x: 10473 case Intrinsic::ppc_vsx_stxvd2x: 10474 return expandVSXStoreForLE(N, DCI); 10475 } 10476 } 10477 break; 10478 } 10479 case ISD::BSWAP: 10480 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 10481 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 10482 N->getOperand(0).hasOneUse() && 10483 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 10484 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10485 N->getValueType(0) == MVT::i64))) { 10486 SDValue Load = N->getOperand(0); 10487 LoadSDNode *LD = cast<LoadSDNode>(Load); 10488 // Create the byte-swapping load. 10489 SDValue Ops[] = { 10490 LD->getChain(), // Chain 10491 LD->getBasePtr(), // Ptr 10492 DAG.getValueType(N->getValueType(0)) // VT 10493 }; 10494 SDValue BSLoad = 10495 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 10496 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 10497 MVT::i64 : MVT::i32, MVT::Other), 10498 Ops, LD->getMemoryVT(), LD->getMemOperand()); 10499 10500 // If this is an i16 load, insert the truncate. 10501 SDValue ResVal = BSLoad; 10502 if (N->getValueType(0) == MVT::i16) 10503 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 10504 10505 // First, combine the bswap away. This makes the value produced by the 10506 // load dead. 10507 DCI.CombineTo(N, ResVal); 10508 10509 // Next, combine the load away, we give it a bogus result value but a real 10510 // chain result. The result value is dead because the bswap is dead. 10511 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 10512 10513 // Return N so it doesn't get rechecked! 10514 return SDValue(N, 0); 10515 } 10516 10517 break; 10518 case PPCISD::VCMP: { 10519 // If a VCMPo node already exists with exactly the same operands as this 10520 // node, use its result instead of this node (VCMPo computes both a CR6 and 10521 // a normal output). 10522 // 10523 if (!N->getOperand(0).hasOneUse() && 10524 !N->getOperand(1).hasOneUse() && 10525 !N->getOperand(2).hasOneUse()) { 10526 10527 // Scan all of the users of the LHS, looking for VCMPo's that match. 10528 SDNode *VCMPoNode = nullptr; 10529 10530 SDNode *LHSN = N->getOperand(0).getNode(); 10531 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 10532 UI != E; ++UI) 10533 if (UI->getOpcode() == PPCISD::VCMPo && 10534 UI->getOperand(1) == N->getOperand(1) && 10535 UI->getOperand(2) == N->getOperand(2) && 10536 UI->getOperand(0) == N->getOperand(0)) { 10537 VCMPoNode = *UI; 10538 break; 10539 } 10540 10541 // If there is no VCMPo node, or if the flag value has a single use, don't 10542 // transform this. 10543 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 10544 break; 10545 10546 // Look at the (necessarily single) use of the flag value. If it has a 10547 // chain, this transformation is more complex. Note that multiple things 10548 // could use the value result, which we should ignore. 10549 SDNode *FlagUser = nullptr; 10550 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 10551 FlagUser == nullptr; ++UI) { 10552 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 10553 SDNode *User = *UI; 10554 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 10555 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 10556 FlagUser = User; 10557 break; 10558 } 10559 } 10560 } 10561 10562 // If the user is a MFOCRF instruction, we know this is safe. 10563 // Otherwise we give up for right now. 10564 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 10565 return SDValue(VCMPoNode, 0); 10566 } 10567 break; 10568 } 10569 case ISD::BRCOND: { 10570 SDValue Cond = N->getOperand(1); 10571 SDValue Target = N->getOperand(2); 10572 10573 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10574 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 10575 Intrinsic::ppc_is_decremented_ctr_nonzero) { 10576 10577 // We now need to make the intrinsic dead (it cannot be instruction 10578 // selected). 10579 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 10580 assert(Cond.getNode()->hasOneUse() && 10581 "Counter decrement has more than one use"); 10582 10583 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 10584 N->getOperand(0), Target); 10585 } 10586 } 10587 break; 10588 case ISD::BR_CC: { 10589 // If this is a branch on an altivec predicate comparison, lower this so 10590 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 10591 // lowering is done pre-legalize, because the legalizer lowers the predicate 10592 // compare down to code that is difficult to reassemble. 10593 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 10594 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 10595 10596 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 10597 // value. If so, pass-through the AND to get to the intrinsic. 10598 if (LHS.getOpcode() == ISD::AND && 10599 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 10600 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 10601 Intrinsic::ppc_is_decremented_ctr_nonzero && 10602 isa<ConstantSDNode>(LHS.getOperand(1)) && 10603 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 10604 isZero()) 10605 LHS = LHS.getOperand(0); 10606 10607 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10608 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 10609 Intrinsic::ppc_is_decremented_ctr_nonzero && 10610 isa<ConstantSDNode>(RHS)) { 10611 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 10612 "Counter decrement comparison is not EQ or NE"); 10613 10614 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10615 bool isBDNZ = (CC == ISD::SETEQ && Val) || 10616 (CC == ISD::SETNE && !Val); 10617 10618 // We now need to make the intrinsic dead (it cannot be instruction 10619 // selected). 10620 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 10621 assert(LHS.getNode()->hasOneUse() && 10622 "Counter decrement has more than one use"); 10623 10624 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 10625 N->getOperand(0), N->getOperand(4)); 10626 } 10627 10628 int CompareOpc; 10629 bool isDot; 10630 10631 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10632 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 10633 getAltivecCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 10634 assert(isDot && "Can't compare against a vector result!"); 10635 10636 // If this is a comparison against something other than 0/1, then we know 10637 // that the condition is never/always true. 10638 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10639 if (Val != 0 && Val != 1) { 10640 if (CC == ISD::SETEQ) // Cond never true, remove branch. 10641 return N->getOperand(0); 10642 // Always !=, turn it into an unconditional branch. 10643 return DAG.getNode(ISD::BR, dl, MVT::Other, 10644 N->getOperand(0), N->getOperand(4)); 10645 } 10646 10647 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 10648 10649 // Create the PPCISD altivec 'dot' comparison node. 10650 SDValue Ops[] = { 10651 LHS.getOperand(2), // LHS of compare 10652 LHS.getOperand(3), // RHS of compare 10653 DAG.getConstant(CompareOpc, dl, MVT::i32) 10654 }; 10655 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 10656 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 10657 10658 // Unpack the result based on how the target uses it. 10659 PPC::Predicate CompOpc; 10660 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 10661 default: // Can't happen, don't crash on invalid number though. 10662 case 0: // Branch on the value of the EQ bit of CR6. 10663 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 10664 break; 10665 case 1: // Branch on the inverted value of the EQ bit of CR6. 10666 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 10667 break; 10668 case 2: // Branch on the value of the LT bit of CR6. 10669 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 10670 break; 10671 case 3: // Branch on the inverted value of the LT bit of CR6. 10672 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 10673 break; 10674 } 10675 10676 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 10677 DAG.getConstant(CompOpc, dl, MVT::i32), 10678 DAG.getRegister(PPC::CR6, MVT::i32), 10679 N->getOperand(4), CompNode.getValue(1)); 10680 } 10681 break; 10682 } 10683 } 10684 10685 return SDValue(); 10686 } 10687 10688 SDValue 10689 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 10690 SelectionDAG &DAG, 10691 std::vector<SDNode *> *Created) const { 10692 // fold (sdiv X, pow2) 10693 EVT VT = N->getValueType(0); 10694 if (VT == MVT::i64 && !Subtarget.isPPC64()) 10695 return SDValue(); 10696 if ((VT != MVT::i32 && VT != MVT::i64) || 10697 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 10698 return SDValue(); 10699 10700 SDLoc DL(N); 10701 SDValue N0 = N->getOperand(0); 10702 10703 bool IsNegPow2 = (-Divisor).isPowerOf2(); 10704 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 10705 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 10706 10707 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 10708 if (Created) 10709 Created->push_back(Op.getNode()); 10710 10711 if (IsNegPow2) { 10712 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 10713 if (Created) 10714 Created->push_back(Op.getNode()); 10715 } 10716 10717 return Op; 10718 } 10719 10720 //===----------------------------------------------------------------------===// 10721 // Inline Assembly Support 10722 //===----------------------------------------------------------------------===// 10723 10724 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 10725 APInt &KnownZero, 10726 APInt &KnownOne, 10727 const SelectionDAG &DAG, 10728 unsigned Depth) const { 10729 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 10730 switch (Op.getOpcode()) { 10731 default: break; 10732 case PPCISD::LBRX: { 10733 // lhbrx is known to have the top bits cleared out. 10734 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 10735 KnownZero = 0xFFFF0000; 10736 break; 10737 } 10738 case ISD::INTRINSIC_WO_CHAIN: { 10739 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 10740 default: break; 10741 case Intrinsic::ppc_altivec_vcmpbfp_p: 10742 case Intrinsic::ppc_altivec_vcmpeqfp_p: 10743 case Intrinsic::ppc_altivec_vcmpequb_p: 10744 case Intrinsic::ppc_altivec_vcmpequh_p: 10745 case Intrinsic::ppc_altivec_vcmpequw_p: 10746 case Intrinsic::ppc_altivec_vcmpequd_p: 10747 case Intrinsic::ppc_altivec_vcmpgefp_p: 10748 case Intrinsic::ppc_altivec_vcmpgtfp_p: 10749 case Intrinsic::ppc_altivec_vcmpgtsb_p: 10750 case Intrinsic::ppc_altivec_vcmpgtsh_p: 10751 case Intrinsic::ppc_altivec_vcmpgtsw_p: 10752 case Intrinsic::ppc_altivec_vcmpgtsd_p: 10753 case Intrinsic::ppc_altivec_vcmpgtub_p: 10754 case Intrinsic::ppc_altivec_vcmpgtuh_p: 10755 case Intrinsic::ppc_altivec_vcmpgtuw_p: 10756 case Intrinsic::ppc_altivec_vcmpgtud_p: 10757 KnownZero = ~1U; // All bits but the low one are known to be zero. 10758 break; 10759 } 10760 } 10761 } 10762 } 10763 10764 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 10765 switch (Subtarget.getDarwinDirective()) { 10766 default: break; 10767 case PPC::DIR_970: 10768 case PPC::DIR_PWR4: 10769 case PPC::DIR_PWR5: 10770 case PPC::DIR_PWR5X: 10771 case PPC::DIR_PWR6: 10772 case PPC::DIR_PWR6X: 10773 case PPC::DIR_PWR7: 10774 case PPC::DIR_PWR8: { 10775 if (!ML) 10776 break; 10777 10778 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 10779 10780 // For small loops (between 5 and 8 instructions), align to a 32-byte 10781 // boundary so that the entire loop fits in one instruction-cache line. 10782 uint64_t LoopSize = 0; 10783 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 10784 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) 10785 LoopSize += TII->GetInstSizeInBytes(J); 10786 10787 if (LoopSize > 16 && LoopSize <= 32) 10788 return 5; 10789 10790 break; 10791 } 10792 } 10793 10794 return TargetLowering::getPrefLoopAlignment(ML); 10795 } 10796 10797 /// getConstraintType - Given a constraint, return the type of 10798 /// constraint it is for this target. 10799 PPCTargetLowering::ConstraintType 10800 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 10801 if (Constraint.size() == 1) { 10802 switch (Constraint[0]) { 10803 default: break; 10804 case 'b': 10805 case 'r': 10806 case 'f': 10807 case 'v': 10808 case 'y': 10809 return C_RegisterClass; 10810 case 'Z': 10811 // FIXME: While Z does indicate a memory constraint, it specifically 10812 // indicates an r+r address (used in conjunction with the 'y' modifier 10813 // in the replacement string). Currently, we're forcing the base 10814 // register to be r0 in the asm printer (which is interpreted as zero) 10815 // and forming the complete address in the second register. This is 10816 // suboptimal. 10817 return C_Memory; 10818 } 10819 } else if (Constraint == "wc") { // individual CR bits. 10820 return C_RegisterClass; 10821 } else if (Constraint == "wa" || Constraint == "wd" || 10822 Constraint == "wf" || Constraint == "ws") { 10823 return C_RegisterClass; // VSX registers. 10824 } 10825 return TargetLowering::getConstraintType(Constraint); 10826 } 10827 10828 /// Examine constraint type and operand type and determine a weight value. 10829 /// This object must already have been set up with the operand type 10830 /// and the current alternative constraint selected. 10831 TargetLowering::ConstraintWeight 10832 PPCTargetLowering::getSingleConstraintMatchWeight( 10833 AsmOperandInfo &info, const char *constraint) const { 10834 ConstraintWeight weight = CW_Invalid; 10835 Value *CallOperandVal = info.CallOperandVal; 10836 // If we don't have a value, we can't do a match, 10837 // but allow it at the lowest weight. 10838 if (!CallOperandVal) 10839 return CW_Default; 10840 Type *type = CallOperandVal->getType(); 10841 10842 // Look at the constraint type. 10843 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 10844 return CW_Register; // an individual CR bit. 10845 else if ((StringRef(constraint) == "wa" || 10846 StringRef(constraint) == "wd" || 10847 StringRef(constraint) == "wf") && 10848 type->isVectorTy()) 10849 return CW_Register; 10850 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 10851 return CW_Register; 10852 10853 switch (*constraint) { 10854 default: 10855 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 10856 break; 10857 case 'b': 10858 if (type->isIntegerTy()) 10859 weight = CW_Register; 10860 break; 10861 case 'f': 10862 if (type->isFloatTy()) 10863 weight = CW_Register; 10864 break; 10865 case 'd': 10866 if (type->isDoubleTy()) 10867 weight = CW_Register; 10868 break; 10869 case 'v': 10870 if (type->isVectorTy()) 10871 weight = CW_Register; 10872 break; 10873 case 'y': 10874 weight = CW_Register; 10875 break; 10876 case 'Z': 10877 weight = CW_Memory; 10878 break; 10879 } 10880 return weight; 10881 } 10882 10883 std::pair<unsigned, const TargetRegisterClass *> 10884 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 10885 StringRef Constraint, 10886 MVT VT) const { 10887 if (Constraint.size() == 1) { 10888 // GCC RS6000 Constraint Letters 10889 switch (Constraint[0]) { 10890 case 'b': // R1-R31 10891 if (VT == MVT::i64 && Subtarget.isPPC64()) 10892 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 10893 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 10894 case 'r': // R0-R31 10895 if (VT == MVT::i64 && Subtarget.isPPC64()) 10896 return std::make_pair(0U, &PPC::G8RCRegClass); 10897 return std::make_pair(0U, &PPC::GPRCRegClass); 10898 case 'f': 10899 if (VT == MVT::f32 || VT == MVT::i32) 10900 return std::make_pair(0U, &PPC::F4RCRegClass); 10901 if (VT == MVT::f64 || VT == MVT::i64) 10902 return std::make_pair(0U, &PPC::F8RCRegClass); 10903 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 10904 return std::make_pair(0U, &PPC::QFRCRegClass); 10905 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 10906 return std::make_pair(0U, &PPC::QSRCRegClass); 10907 break; 10908 case 'v': 10909 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 10910 return std::make_pair(0U, &PPC::QFRCRegClass); 10911 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 10912 return std::make_pair(0U, &PPC::QSRCRegClass); 10913 return std::make_pair(0U, &PPC::VRRCRegClass); 10914 case 'y': // crrc 10915 return std::make_pair(0U, &PPC::CRRCRegClass); 10916 } 10917 } else if (Constraint == "wc") { // an individual CR bit. 10918 return std::make_pair(0U, &PPC::CRBITRCRegClass); 10919 } else if (Constraint == "wa" || Constraint == "wd" || 10920 Constraint == "wf") { 10921 return std::make_pair(0U, &PPC::VSRCRegClass); 10922 } else if (Constraint == "ws") { 10923 if (VT == MVT::f32) 10924 return std::make_pair(0U, &PPC::VSSRCRegClass); 10925 else 10926 return std::make_pair(0U, &PPC::VSFRCRegClass); 10927 } 10928 10929 std::pair<unsigned, const TargetRegisterClass *> R = 10930 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 10931 10932 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 10933 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 10934 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 10935 // register. 10936 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 10937 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 10938 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 10939 PPC::GPRCRegClass.contains(R.first)) 10940 return std::make_pair(TRI->getMatchingSuperReg(R.first, 10941 PPC::sub_32, &PPC::G8RCRegClass), 10942 &PPC::G8RCRegClass); 10943 10944 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 10945 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 10946 R.first = PPC::CR0; 10947 R.second = &PPC::CRRCRegClass; 10948 } 10949 10950 return R; 10951 } 10952 10953 10954 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 10955 /// vector. If it is invalid, don't add anything to Ops. 10956 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 10957 std::string &Constraint, 10958 std::vector<SDValue>&Ops, 10959 SelectionDAG &DAG) const { 10960 SDValue Result; 10961 10962 // Only support length 1 constraints. 10963 if (Constraint.length() > 1) return; 10964 10965 char Letter = Constraint[0]; 10966 switch (Letter) { 10967 default: break; 10968 case 'I': 10969 case 'J': 10970 case 'K': 10971 case 'L': 10972 case 'M': 10973 case 'N': 10974 case 'O': 10975 case 'P': { 10976 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 10977 if (!CST) return; // Must be an immediate to match. 10978 SDLoc dl(Op); 10979 int64_t Value = CST->getSExtValue(); 10980 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 10981 // numbers are printed as such. 10982 switch (Letter) { 10983 default: llvm_unreachable("Unknown constraint letter!"); 10984 case 'I': // "I" is a signed 16-bit constant. 10985 if (isInt<16>(Value)) 10986 Result = DAG.getTargetConstant(Value, dl, TCVT); 10987 break; 10988 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 10989 if (isShiftedUInt<16, 16>(Value)) 10990 Result = DAG.getTargetConstant(Value, dl, TCVT); 10991 break; 10992 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 10993 if (isShiftedInt<16, 16>(Value)) 10994 Result = DAG.getTargetConstant(Value, dl, TCVT); 10995 break; 10996 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 10997 if (isUInt<16>(Value)) 10998 Result = DAG.getTargetConstant(Value, dl, TCVT); 10999 break; 11000 case 'M': // "M" is a constant that is greater than 31. 11001 if (Value > 31) 11002 Result = DAG.getTargetConstant(Value, dl, TCVT); 11003 break; 11004 case 'N': // "N" is a positive constant that is an exact power of two. 11005 if (Value > 0 && isPowerOf2_64(Value)) 11006 Result = DAG.getTargetConstant(Value, dl, TCVT); 11007 break; 11008 case 'O': // "O" is the constant zero. 11009 if (Value == 0) 11010 Result = DAG.getTargetConstant(Value, dl, TCVT); 11011 break; 11012 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 11013 if (isInt<16>(-Value)) 11014 Result = DAG.getTargetConstant(Value, dl, TCVT); 11015 break; 11016 } 11017 break; 11018 } 11019 } 11020 11021 if (Result.getNode()) { 11022 Ops.push_back(Result); 11023 return; 11024 } 11025 11026 // Handle standard constraint letters. 11027 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11028 } 11029 11030 // isLegalAddressingMode - Return true if the addressing mode represented 11031 // by AM is legal for this target, for a load/store of the specified type. 11032 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 11033 const AddrMode &AM, Type *Ty, 11034 unsigned AS) const { 11035 // PPC does not allow r+i addressing modes for vectors! 11036 if (Ty->isVectorTy() && AM.BaseOffs != 0) 11037 return false; 11038 11039 // PPC allows a sign-extended 16-bit immediate field. 11040 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 11041 return false; 11042 11043 // No global is ever allowed as a base. 11044 if (AM.BaseGV) 11045 return false; 11046 11047 // PPC only support r+r, 11048 switch (AM.Scale) { 11049 case 0: // "r+i" or just "i", depending on HasBaseReg. 11050 break; 11051 case 1: 11052 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 11053 return false; 11054 // Otherwise we have r+r or r+i. 11055 break; 11056 case 2: 11057 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 11058 return false; 11059 // Allow 2*r as r+r. 11060 break; 11061 default: 11062 // No other scales are supported. 11063 return false; 11064 } 11065 11066 return true; 11067 } 11068 11069 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 11070 SelectionDAG &DAG) const { 11071 MachineFunction &MF = DAG.getMachineFunction(); 11072 MachineFrameInfo *MFI = MF.getFrameInfo(); 11073 MFI->setReturnAddressIsTaken(true); 11074 11075 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 11076 return SDValue(); 11077 11078 SDLoc dl(Op); 11079 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11080 11081 // Make sure the function does not optimize away the store of the RA to 11082 // the stack. 11083 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 11084 FuncInfo->setLRStoreRequired(); 11085 bool isPPC64 = Subtarget.isPPC64(); 11086 auto PtrVT = getPointerTy(MF.getDataLayout()); 11087 11088 if (Depth > 0) { 11089 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11090 SDValue Offset = 11091 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 11092 isPPC64 ? MVT::i64 : MVT::i32); 11093 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11094 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 11095 MachinePointerInfo(), false, false, false, 0); 11096 } 11097 11098 // Just load the return address off the stack. 11099 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 11100 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 11101 MachinePointerInfo(), false, false, false, 0); 11102 } 11103 11104 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 11105 SelectionDAG &DAG) const { 11106 SDLoc dl(Op); 11107 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11108 11109 MachineFunction &MF = DAG.getMachineFunction(); 11110 MachineFrameInfo *MFI = MF.getFrameInfo(); 11111 MFI->setFrameAddressIsTaken(true); 11112 11113 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 11114 bool isPPC64 = PtrVT == MVT::i64; 11115 11116 // Naked functions never have a frame pointer, and so we use r1. For all 11117 // other functions, this decision must be delayed until during PEI. 11118 unsigned FrameReg; 11119 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 11120 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 11121 else 11122 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 11123 11124 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 11125 PtrVT); 11126 while (Depth--) 11127 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 11128 FrameAddr, MachinePointerInfo(), false, false, 11129 false, 0); 11130 return FrameAddr; 11131 } 11132 11133 // FIXME? Maybe this could be a TableGen attribute on some registers and 11134 // this table could be generated automatically from RegInfo. 11135 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 11136 SelectionDAG &DAG) const { 11137 bool isPPC64 = Subtarget.isPPC64(); 11138 bool isDarwinABI = Subtarget.isDarwinABI(); 11139 11140 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 11141 (!isPPC64 && VT != MVT::i32)) 11142 report_fatal_error("Invalid register global variable type"); 11143 11144 bool is64Bit = isPPC64 && VT == MVT::i64; 11145 unsigned Reg = StringSwitch<unsigned>(RegName) 11146 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 11147 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 11148 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 11149 (is64Bit ? PPC::X13 : PPC::R13)) 11150 .Default(0); 11151 11152 if (Reg) 11153 return Reg; 11154 report_fatal_error("Invalid register name global variable"); 11155 } 11156 11157 bool 11158 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11159 // The PowerPC target isn't yet aware of offsets. 11160 return false; 11161 } 11162 11163 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 11164 const CallInst &I, 11165 unsigned Intrinsic) const { 11166 11167 switch (Intrinsic) { 11168 case Intrinsic::ppc_qpx_qvlfd: 11169 case Intrinsic::ppc_qpx_qvlfs: 11170 case Intrinsic::ppc_qpx_qvlfcd: 11171 case Intrinsic::ppc_qpx_qvlfcs: 11172 case Intrinsic::ppc_qpx_qvlfiwa: 11173 case Intrinsic::ppc_qpx_qvlfiwz: 11174 case Intrinsic::ppc_altivec_lvx: 11175 case Intrinsic::ppc_altivec_lvxl: 11176 case Intrinsic::ppc_altivec_lvebx: 11177 case Intrinsic::ppc_altivec_lvehx: 11178 case Intrinsic::ppc_altivec_lvewx: 11179 case Intrinsic::ppc_vsx_lxvd2x: 11180 case Intrinsic::ppc_vsx_lxvw4x: { 11181 EVT VT; 11182 switch (Intrinsic) { 11183 case Intrinsic::ppc_altivec_lvebx: 11184 VT = MVT::i8; 11185 break; 11186 case Intrinsic::ppc_altivec_lvehx: 11187 VT = MVT::i16; 11188 break; 11189 case Intrinsic::ppc_altivec_lvewx: 11190 VT = MVT::i32; 11191 break; 11192 case Intrinsic::ppc_vsx_lxvd2x: 11193 VT = MVT::v2f64; 11194 break; 11195 case Intrinsic::ppc_qpx_qvlfd: 11196 VT = MVT::v4f64; 11197 break; 11198 case Intrinsic::ppc_qpx_qvlfs: 11199 VT = MVT::v4f32; 11200 break; 11201 case Intrinsic::ppc_qpx_qvlfcd: 11202 VT = MVT::v2f64; 11203 break; 11204 case Intrinsic::ppc_qpx_qvlfcs: 11205 VT = MVT::v2f32; 11206 break; 11207 default: 11208 VT = MVT::v4i32; 11209 break; 11210 } 11211 11212 Info.opc = ISD::INTRINSIC_W_CHAIN; 11213 Info.memVT = VT; 11214 Info.ptrVal = I.getArgOperand(0); 11215 Info.offset = -VT.getStoreSize()+1; 11216 Info.size = 2*VT.getStoreSize()-1; 11217 Info.align = 1; 11218 Info.vol = false; 11219 Info.readMem = true; 11220 Info.writeMem = false; 11221 return true; 11222 } 11223 case Intrinsic::ppc_qpx_qvlfda: 11224 case Intrinsic::ppc_qpx_qvlfsa: 11225 case Intrinsic::ppc_qpx_qvlfcda: 11226 case Intrinsic::ppc_qpx_qvlfcsa: 11227 case Intrinsic::ppc_qpx_qvlfiwaa: 11228 case Intrinsic::ppc_qpx_qvlfiwza: { 11229 EVT VT; 11230 switch (Intrinsic) { 11231 case Intrinsic::ppc_qpx_qvlfda: 11232 VT = MVT::v4f64; 11233 break; 11234 case Intrinsic::ppc_qpx_qvlfsa: 11235 VT = MVT::v4f32; 11236 break; 11237 case Intrinsic::ppc_qpx_qvlfcda: 11238 VT = MVT::v2f64; 11239 break; 11240 case Intrinsic::ppc_qpx_qvlfcsa: 11241 VT = MVT::v2f32; 11242 break; 11243 default: 11244 VT = MVT::v4i32; 11245 break; 11246 } 11247 11248 Info.opc = ISD::INTRINSIC_W_CHAIN; 11249 Info.memVT = VT; 11250 Info.ptrVal = I.getArgOperand(0); 11251 Info.offset = 0; 11252 Info.size = VT.getStoreSize(); 11253 Info.align = 1; 11254 Info.vol = false; 11255 Info.readMem = true; 11256 Info.writeMem = false; 11257 return true; 11258 } 11259 case Intrinsic::ppc_qpx_qvstfd: 11260 case Intrinsic::ppc_qpx_qvstfs: 11261 case Intrinsic::ppc_qpx_qvstfcd: 11262 case Intrinsic::ppc_qpx_qvstfcs: 11263 case Intrinsic::ppc_qpx_qvstfiw: 11264 case Intrinsic::ppc_altivec_stvx: 11265 case Intrinsic::ppc_altivec_stvxl: 11266 case Intrinsic::ppc_altivec_stvebx: 11267 case Intrinsic::ppc_altivec_stvehx: 11268 case Intrinsic::ppc_altivec_stvewx: 11269 case Intrinsic::ppc_vsx_stxvd2x: 11270 case Intrinsic::ppc_vsx_stxvw4x: { 11271 EVT VT; 11272 switch (Intrinsic) { 11273 case Intrinsic::ppc_altivec_stvebx: 11274 VT = MVT::i8; 11275 break; 11276 case Intrinsic::ppc_altivec_stvehx: 11277 VT = MVT::i16; 11278 break; 11279 case Intrinsic::ppc_altivec_stvewx: 11280 VT = MVT::i32; 11281 break; 11282 case Intrinsic::ppc_vsx_stxvd2x: 11283 VT = MVT::v2f64; 11284 break; 11285 case Intrinsic::ppc_qpx_qvstfd: 11286 VT = MVT::v4f64; 11287 break; 11288 case Intrinsic::ppc_qpx_qvstfs: 11289 VT = MVT::v4f32; 11290 break; 11291 case Intrinsic::ppc_qpx_qvstfcd: 11292 VT = MVT::v2f64; 11293 break; 11294 case Intrinsic::ppc_qpx_qvstfcs: 11295 VT = MVT::v2f32; 11296 break; 11297 default: 11298 VT = MVT::v4i32; 11299 break; 11300 } 11301 11302 Info.opc = ISD::INTRINSIC_VOID; 11303 Info.memVT = VT; 11304 Info.ptrVal = I.getArgOperand(1); 11305 Info.offset = -VT.getStoreSize()+1; 11306 Info.size = 2*VT.getStoreSize()-1; 11307 Info.align = 1; 11308 Info.vol = false; 11309 Info.readMem = false; 11310 Info.writeMem = true; 11311 return true; 11312 } 11313 case Intrinsic::ppc_qpx_qvstfda: 11314 case Intrinsic::ppc_qpx_qvstfsa: 11315 case Intrinsic::ppc_qpx_qvstfcda: 11316 case Intrinsic::ppc_qpx_qvstfcsa: 11317 case Intrinsic::ppc_qpx_qvstfiwa: { 11318 EVT VT; 11319 switch (Intrinsic) { 11320 case Intrinsic::ppc_qpx_qvstfda: 11321 VT = MVT::v4f64; 11322 break; 11323 case Intrinsic::ppc_qpx_qvstfsa: 11324 VT = MVT::v4f32; 11325 break; 11326 case Intrinsic::ppc_qpx_qvstfcda: 11327 VT = MVT::v2f64; 11328 break; 11329 case Intrinsic::ppc_qpx_qvstfcsa: 11330 VT = MVT::v2f32; 11331 break; 11332 default: 11333 VT = MVT::v4i32; 11334 break; 11335 } 11336 11337 Info.opc = ISD::INTRINSIC_VOID; 11338 Info.memVT = VT; 11339 Info.ptrVal = I.getArgOperand(1); 11340 Info.offset = 0; 11341 Info.size = VT.getStoreSize(); 11342 Info.align = 1; 11343 Info.vol = false; 11344 Info.readMem = false; 11345 Info.writeMem = true; 11346 return true; 11347 } 11348 default: 11349 break; 11350 } 11351 11352 return false; 11353 } 11354 11355 /// getOptimalMemOpType - Returns the target specific optimal type for load 11356 /// and store operations as a result of memset, memcpy, and memmove 11357 /// lowering. If DstAlign is zero that means it's safe to destination 11358 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 11359 /// means there isn't a need to check it against alignment requirement, 11360 /// probably because the source does not need to be loaded. If 'IsMemset' is 11361 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 11362 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 11363 /// source is constant so it does not need to be loaded. 11364 /// It returns EVT::Other if the type should be determined using generic 11365 /// target-independent logic. 11366 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 11367 unsigned DstAlign, unsigned SrcAlign, 11368 bool IsMemset, bool ZeroMemset, 11369 bool MemcpyStrSrc, 11370 MachineFunction &MF) const { 11371 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 11372 const Function *F = MF.getFunction(); 11373 // When expanding a memset, require at least two QPX instructions to cover 11374 // the cost of loading the value to be stored from the constant pool. 11375 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 11376 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 11377 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 11378 return MVT::v4f64; 11379 } 11380 11381 // We should use Altivec/VSX loads and stores when available. For unaligned 11382 // addresses, unaligned VSX loads are only fast starting with the P8. 11383 if (Subtarget.hasAltivec() && Size >= 16 && 11384 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 11385 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 11386 return MVT::v4i32; 11387 } 11388 11389 if (Subtarget.isPPC64()) { 11390 return MVT::i64; 11391 } 11392 11393 return MVT::i32; 11394 } 11395 11396 /// \brief Returns true if it is beneficial to convert a load of a constant 11397 /// to just the constant itself. 11398 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 11399 Type *Ty) const { 11400 assert(Ty->isIntegerTy()); 11401 11402 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 11403 if (BitSize == 0 || BitSize > 64) 11404 return false; 11405 return true; 11406 } 11407 11408 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11409 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11410 return false; 11411 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11412 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11413 return NumBits1 == 64 && NumBits2 == 32; 11414 } 11415 11416 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11417 if (!VT1.isInteger() || !VT2.isInteger()) 11418 return false; 11419 unsigned NumBits1 = VT1.getSizeInBits(); 11420 unsigned NumBits2 = VT2.getSizeInBits(); 11421 return NumBits1 == 64 && NumBits2 == 32; 11422 } 11423 11424 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 11425 // Generally speaking, zexts are not free, but they are free when they can be 11426 // folded with other operations. 11427 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 11428 EVT MemVT = LD->getMemoryVT(); 11429 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 11430 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 11431 (LD->getExtensionType() == ISD::NON_EXTLOAD || 11432 LD->getExtensionType() == ISD::ZEXTLOAD)) 11433 return true; 11434 } 11435 11436 // FIXME: Add other cases... 11437 // - 32-bit shifts with a zext to i64 11438 // - zext after ctlz, bswap, etc. 11439 // - zext after and by a constant mask 11440 11441 return TargetLowering::isZExtFree(Val, VT2); 11442 } 11443 11444 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 11445 assert(VT.isFloatingPoint()); 11446 return true; 11447 } 11448 11449 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11450 return isInt<16>(Imm) || isUInt<16>(Imm); 11451 } 11452 11453 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11454 return isInt<16>(Imm) || isUInt<16>(Imm); 11455 } 11456 11457 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 11458 unsigned, 11459 unsigned, 11460 bool *Fast) const { 11461 if (DisablePPCUnaligned) 11462 return false; 11463 11464 // PowerPC supports unaligned memory access for simple non-vector types. 11465 // Although accessing unaligned addresses is not as efficient as accessing 11466 // aligned addresses, it is generally more efficient than manual expansion, 11467 // and generally only traps for software emulation when crossing page 11468 // boundaries. 11469 11470 if (!VT.isSimple()) 11471 return false; 11472 11473 if (VT.getSimpleVT().isVector()) { 11474 if (Subtarget.hasVSX()) { 11475 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 11476 VT != MVT::v4f32 && VT != MVT::v4i32) 11477 return false; 11478 } else { 11479 return false; 11480 } 11481 } 11482 11483 if (VT == MVT::ppcf128) 11484 return false; 11485 11486 if (Fast) 11487 *Fast = true; 11488 11489 return true; 11490 } 11491 11492 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 11493 VT = VT.getScalarType(); 11494 11495 if (!VT.isSimple()) 11496 return false; 11497 11498 switch (VT.getSimpleVT().SimpleTy) { 11499 case MVT::f32: 11500 case MVT::f64: 11501 return true; 11502 default: 11503 break; 11504 } 11505 11506 return false; 11507 } 11508 11509 const MCPhysReg * 11510 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 11511 // LR is a callee-save register, but we must treat it as clobbered by any call 11512 // site. Hence we include LR in the scratch registers, which are in turn added 11513 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 11514 // to CTR, which is used by any indirect call. 11515 static const MCPhysReg ScratchRegs[] = { 11516 PPC::X12, PPC::LR8, PPC::CTR8, 0 11517 }; 11518 11519 return ScratchRegs; 11520 } 11521 11522 bool 11523 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 11524 EVT VT , unsigned DefinedValues) const { 11525 if (VT == MVT::v2i64) 11526 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 11527 11528 if (Subtarget.hasQPX()) { 11529 if (VT == MVT::v4f32 || VT == MVT::v4f64 || VT == MVT::v4i1) 11530 return true; 11531 } 11532 11533 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 11534 } 11535 11536 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 11537 if (DisableILPPref || Subtarget.enableMachineScheduler()) 11538 return TargetLowering::getSchedulingPreference(N); 11539 11540 return Sched::ILP; 11541 } 11542 11543 // Create a fast isel object. 11544 FastISel * 11545 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 11546 const TargetLibraryInfo *LibInfo) const { 11547 return PPC::createFastISel(FuncInfo, LibInfo); 11548 } 11549