1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCCallingConv.h" 17 #include "PPCMachineFunctionInfo.h" 18 #include "PPCPerfectShuffle.h" 19 #include "PPCTargetMachine.h" 20 #include "PPCTargetObjectFile.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/StringSwitch.h" 23 #include "llvm/ADT/Triple.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineLoopInfo.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 32 #include "llvm/IR/CallingConv.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include "llvm/Target/TargetOptions.h" 42 43 using namespace llvm; 44 45 // FIXME: Remove this once soft-float is supported. 46 static cl::opt<bool> DisablePPCFloatInVariadic("disable-ppc-float-in-variadic", 47 cl::desc("disable saving float registers for va_start on PPC"), cl::Hidden); 48 49 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 50 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 51 52 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 53 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 54 55 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 56 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 57 58 // FIXME: Remove this once the bug has been fixed! 59 extern cl::opt<bool> ANDIGlueBug; 60 61 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 62 const PPCSubtarget &STI) 63 : TargetLowering(TM), Subtarget(STI) { 64 // Use _setjmp/_longjmp instead of setjmp/longjmp. 65 setUseUnderscoreSetJmp(true); 66 setUseUnderscoreLongJmp(true); 67 68 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 69 // arguments are at least 4/8 bytes aligned. 70 bool isPPC64 = Subtarget.isPPC64(); 71 setMinStackArgumentAlignment(isPPC64 ? 8:4); 72 73 // Set up the register classes. 74 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 75 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 76 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 77 78 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 79 for (MVT VT : MVT::integer_valuetypes()) { 80 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 81 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 82 } 83 84 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 85 86 // PowerPC has pre-inc load and store's. 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 91 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 92 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 93 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 96 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 97 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 98 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 99 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 100 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 101 102 if (Subtarget.useCRBits()) { 103 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 104 105 if (isPPC64 || Subtarget.hasFPCVT()) { 106 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 107 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 108 isPPC64 ? MVT::i64 : MVT::i32); 109 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 110 AddPromotedToType (ISD::UINT_TO_FP, MVT::i1, 111 isPPC64 ? MVT::i64 : MVT::i32); 112 } else { 113 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 114 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 115 } 116 117 // PowerPC does not support direct load / store of condition registers 118 setOperationAction(ISD::LOAD, MVT::i1, Custom); 119 setOperationAction(ISD::STORE, MVT::i1, Custom); 120 121 // FIXME: Remove this once the ANDI glue bug is fixed: 122 if (ANDIGlueBug) 123 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 124 125 for (MVT VT : MVT::integer_valuetypes()) { 126 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 127 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 128 setTruncStoreAction(VT, MVT::i1, Expand); 129 } 130 131 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 132 } 133 134 // This is used in the ppcf128->int sequence. Note it has different semantics 135 // from FP_ROUND: that rounds to nearest, this rounds to zero. 136 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 137 138 // We do not currently implement these libm ops for PowerPC. 139 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 140 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 141 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 142 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 143 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 144 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 145 146 // PowerPC has no SREM/UREM instructions 147 setOperationAction(ISD::SREM, MVT::i32, Expand); 148 setOperationAction(ISD::UREM, MVT::i32, Expand); 149 setOperationAction(ISD::SREM, MVT::i64, Expand); 150 setOperationAction(ISD::UREM, MVT::i64, Expand); 151 152 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 153 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 154 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 155 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 156 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 157 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 158 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 159 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 160 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 161 162 // We don't support sin/cos/sqrt/fmod/pow 163 setOperationAction(ISD::FSIN , MVT::f64, Expand); 164 setOperationAction(ISD::FCOS , MVT::f64, Expand); 165 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 166 setOperationAction(ISD::FREM , MVT::f64, Expand); 167 setOperationAction(ISD::FPOW , MVT::f64, Expand); 168 setOperationAction(ISD::FMA , MVT::f64, Legal); 169 setOperationAction(ISD::FSIN , MVT::f32, Expand); 170 setOperationAction(ISD::FCOS , MVT::f32, Expand); 171 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 172 setOperationAction(ISD::FREM , MVT::f32, Expand); 173 setOperationAction(ISD::FPOW , MVT::f32, Expand); 174 setOperationAction(ISD::FMA , MVT::f32, Legal); 175 176 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 177 178 // If we're enabling GP optimizations, use hardware square root 179 if (!Subtarget.hasFSQRT() && 180 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 181 Subtarget.hasFRE())) 182 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 183 184 if (!Subtarget.hasFSQRT() && 185 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 186 Subtarget.hasFRES())) 187 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 188 189 if (Subtarget.hasFCPSGN()) { 190 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 191 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 192 } else { 193 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 194 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 195 } 196 197 if (Subtarget.hasFPRND()) { 198 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 199 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 200 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 201 setOperationAction(ISD::FROUND, MVT::f64, Legal); 202 203 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 204 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 205 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 206 setOperationAction(ISD::FROUND, MVT::f32, Legal); 207 } 208 209 // PowerPC does not have BSWAP, CTPOP or CTTZ 210 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 211 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 212 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 213 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 214 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 215 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 216 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 217 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 218 219 if (Subtarget.hasPOPCNTD()) { 220 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 221 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 222 } else { 223 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 224 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 225 } 226 227 // PowerPC does not have ROTR 228 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 229 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 230 231 if (!Subtarget.useCRBits()) { 232 // PowerPC does not have Select 233 setOperationAction(ISD::SELECT, MVT::i32, Expand); 234 setOperationAction(ISD::SELECT, MVT::i64, Expand); 235 setOperationAction(ISD::SELECT, MVT::f32, Expand); 236 setOperationAction(ISD::SELECT, MVT::f64, Expand); 237 } 238 239 // PowerPC wants to turn select_cc of FP into fsel when possible. 240 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 241 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 242 243 // PowerPC wants to optimize integer setcc a bit 244 if (!Subtarget.useCRBits()) 245 setOperationAction(ISD::SETCC, MVT::i32, Custom); 246 247 // PowerPC does not have BRCOND which requires SetCC 248 if (!Subtarget.useCRBits()) 249 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 250 251 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 252 253 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 254 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 255 256 // PowerPC does not have [U|S]INT_TO_FP 257 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 258 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 259 260 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 261 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 262 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 263 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 264 265 // We cannot sextinreg(i1). Expand to shifts. 266 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 267 268 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 269 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 270 // support continuation, user-level threading, and etc.. As a result, no 271 // other SjLj exception interfaces are implemented and please don't build 272 // your own exception handling based on them. 273 // LLVM/Clang supports zero-cost DWARF exception handling. 274 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 275 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 276 277 // We want to legalize GlobalAddress and ConstantPool nodes into the 278 // appropriate instructions to materialize the address. 279 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 280 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 281 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 282 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 283 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 284 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 285 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 286 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 287 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 288 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 289 290 // TRAP is legal. 291 setOperationAction(ISD::TRAP, MVT::Other, Legal); 292 293 // TRAMPOLINE is custom lowered. 294 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 295 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 296 297 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 298 setOperationAction(ISD::VASTART , MVT::Other, Custom); 299 300 if (Subtarget.isSVR4ABI()) { 301 if (isPPC64) { 302 // VAARG always uses double-word chunks, so promote anything smaller. 303 setOperationAction(ISD::VAARG, MVT::i1, Promote); 304 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 305 setOperationAction(ISD::VAARG, MVT::i8, Promote); 306 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 307 setOperationAction(ISD::VAARG, MVT::i16, Promote); 308 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 309 setOperationAction(ISD::VAARG, MVT::i32, Promote); 310 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 311 setOperationAction(ISD::VAARG, MVT::Other, Expand); 312 } else { 313 // VAARG is custom lowered with the 32-bit SVR4 ABI. 314 setOperationAction(ISD::VAARG, MVT::Other, Custom); 315 setOperationAction(ISD::VAARG, MVT::i64, Custom); 316 } 317 } else 318 setOperationAction(ISD::VAARG, MVT::Other, Expand); 319 320 if (Subtarget.isSVR4ABI() && !isPPC64) 321 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 322 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 323 else 324 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 325 326 // Use the default implementation. 327 setOperationAction(ISD::VAEND , MVT::Other, Expand); 328 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 329 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 330 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 331 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 332 333 // We want to custom lower some of our intrinsics. 334 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 335 336 // To handle counter-based loop conditions. 337 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 338 339 // Comparisons that require checking two conditions. 340 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 341 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 342 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 343 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 344 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 345 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 346 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 347 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 348 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 349 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 350 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 351 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 352 353 if (Subtarget.has64BitSupport()) { 354 // They also have instructions for converting between i64 and fp. 355 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 356 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 357 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 358 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 359 // This is just the low 32 bits of a (signed) fp->i64 conversion. 360 // We cannot do this with Promote because i64 is not a legal type. 361 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 362 363 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 364 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 365 } else { 366 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 367 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 368 } 369 370 // With the instructions enabled under FPCVT, we can do everything. 371 if (Subtarget.hasFPCVT()) { 372 if (Subtarget.has64BitSupport()) { 373 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 374 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 375 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 376 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 377 } 378 379 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 380 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 381 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 382 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 383 } 384 385 if (Subtarget.use64BitRegs()) { 386 // 64-bit PowerPC implementations can support i64 types directly 387 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 388 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 389 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 390 // 64-bit PowerPC wants to expand i128 shifts itself. 391 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 392 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 393 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 394 } else { 395 // 32-bit PowerPC wants to expand i64 shifts itself. 396 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 397 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 398 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 399 } 400 401 if (Subtarget.hasAltivec()) { 402 // First set operation action for all vector types to expand. Then we 403 // will selectively turn on ones that can be effectively codegen'd. 404 for (MVT VT : MVT::vector_valuetypes()) { 405 // add/sub are legal for all supported vector VT's. 406 setOperationAction(ISD::ADD , VT, Legal); 407 setOperationAction(ISD::SUB , VT, Legal); 408 409 // Vector instructions introduced in P8 410 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 411 setOperationAction(ISD::CTPOP, VT, Legal); 412 setOperationAction(ISD::CTLZ, VT, Legal); 413 } 414 else { 415 setOperationAction(ISD::CTPOP, VT, Expand); 416 setOperationAction(ISD::CTLZ, VT, Expand); 417 } 418 419 // We promote all shuffles to v16i8. 420 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 421 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 422 423 // We promote all non-typed operations to v4i32. 424 setOperationAction(ISD::AND , VT, Promote); 425 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 426 setOperationAction(ISD::OR , VT, Promote); 427 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 428 setOperationAction(ISD::XOR , VT, Promote); 429 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 430 setOperationAction(ISD::LOAD , VT, Promote); 431 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 432 setOperationAction(ISD::SELECT, VT, Promote); 433 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 434 setOperationAction(ISD::STORE, VT, Promote); 435 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 436 437 // No other operations are legal. 438 setOperationAction(ISD::MUL , VT, Expand); 439 setOperationAction(ISD::SDIV, VT, Expand); 440 setOperationAction(ISD::SREM, VT, Expand); 441 setOperationAction(ISD::UDIV, VT, Expand); 442 setOperationAction(ISD::UREM, VT, Expand); 443 setOperationAction(ISD::FDIV, VT, Expand); 444 setOperationAction(ISD::FREM, VT, Expand); 445 setOperationAction(ISD::FNEG, VT, Expand); 446 setOperationAction(ISD::FSQRT, VT, Expand); 447 setOperationAction(ISD::FLOG, VT, Expand); 448 setOperationAction(ISD::FLOG10, VT, Expand); 449 setOperationAction(ISD::FLOG2, VT, Expand); 450 setOperationAction(ISD::FEXP, VT, Expand); 451 setOperationAction(ISD::FEXP2, VT, Expand); 452 setOperationAction(ISD::FSIN, VT, Expand); 453 setOperationAction(ISD::FCOS, VT, Expand); 454 setOperationAction(ISD::FABS, VT, Expand); 455 setOperationAction(ISD::FPOWI, VT, Expand); 456 setOperationAction(ISD::FFLOOR, VT, Expand); 457 setOperationAction(ISD::FCEIL, VT, Expand); 458 setOperationAction(ISD::FTRUNC, VT, Expand); 459 setOperationAction(ISD::FRINT, VT, Expand); 460 setOperationAction(ISD::FNEARBYINT, VT, Expand); 461 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 462 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 463 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 464 setOperationAction(ISD::MULHU, VT, Expand); 465 setOperationAction(ISD::MULHS, VT, Expand); 466 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 467 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 468 setOperationAction(ISD::UDIVREM, VT, Expand); 469 setOperationAction(ISD::SDIVREM, VT, Expand); 470 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 471 setOperationAction(ISD::FPOW, VT, Expand); 472 setOperationAction(ISD::BSWAP, VT, Expand); 473 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 474 setOperationAction(ISD::CTTZ, VT, Expand); 475 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 476 setOperationAction(ISD::VSELECT, VT, Expand); 477 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 478 479 for (MVT InnerVT : MVT::vector_valuetypes()) { 480 setTruncStoreAction(VT, InnerVT, Expand); 481 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 482 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 483 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 484 } 485 } 486 487 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 488 // with merges, splats, etc. 489 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 490 491 setOperationAction(ISD::AND , MVT::v4i32, Legal); 492 setOperationAction(ISD::OR , MVT::v4i32, Legal); 493 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 494 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 495 setOperationAction(ISD::SELECT, MVT::v4i32, 496 Subtarget.useCRBits() ? Legal : Expand); 497 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 498 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 499 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 500 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 501 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 502 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 503 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 504 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 505 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 506 507 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 508 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 509 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 510 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 511 512 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 513 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 514 515 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 516 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 517 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 518 } 519 520 521 if (Subtarget.hasP8Altivec()) 522 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 523 else 524 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 525 526 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 527 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 528 529 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 530 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 531 532 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 533 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 534 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 535 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 536 537 // Altivec does not contain unordered floating-point compare instructions 538 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 539 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 540 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 541 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 542 543 if (Subtarget.hasVSX()) { 544 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 545 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 546 547 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 548 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 549 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 550 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 551 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 552 553 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 554 555 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 556 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 557 558 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 559 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 560 561 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 562 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 563 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 564 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 565 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 566 567 // Share the Altivec comparison restrictions. 568 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 569 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 570 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 571 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 572 573 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 574 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 575 576 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 577 578 if (Subtarget.hasP8Vector()) 579 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 580 581 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 582 583 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 584 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 585 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 586 587 if (Subtarget.hasP8Altivec()) { 588 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 589 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 590 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 591 592 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 593 } 594 else { 595 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 596 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 597 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 598 599 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 600 601 // VSX v2i64 only supports non-arithmetic operations. 602 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 603 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 604 } 605 606 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 607 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 608 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 609 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 610 611 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 612 613 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 614 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 615 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 616 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 617 618 // Vector operation legalization checks the result type of 619 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 620 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 621 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 622 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 623 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 624 625 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 626 } 627 628 if (Subtarget.hasP8Altivec()) { 629 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 630 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 631 } 632 } 633 634 if (Subtarget.hasQPX()) { 635 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 636 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 637 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 638 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 639 640 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 641 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 642 643 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 644 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 645 646 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 647 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 648 649 if (!Subtarget.useCRBits()) 650 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 651 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 652 653 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 654 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 655 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 656 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 657 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 658 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 659 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 660 661 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 662 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 663 664 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 665 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 666 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 667 668 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 669 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 670 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 671 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 672 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 673 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 674 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 675 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 676 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 677 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 678 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 679 680 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 681 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 682 683 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 684 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 685 686 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 687 688 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 689 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 690 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 691 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 692 693 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 694 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 695 696 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 697 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 698 699 if (!Subtarget.useCRBits()) 700 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 701 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 702 703 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 704 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 705 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 706 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 707 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 708 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 709 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 710 711 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 712 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 713 714 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 715 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 716 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 717 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 718 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 719 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 720 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 721 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 722 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 723 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 724 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 725 726 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 727 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 728 729 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 730 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 731 732 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 733 734 setOperationAction(ISD::AND , MVT::v4i1, Legal); 735 setOperationAction(ISD::OR , MVT::v4i1, Legal); 736 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 737 738 if (!Subtarget.useCRBits()) 739 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 740 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 741 742 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 743 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 744 745 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 746 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 747 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 748 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 749 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 750 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 751 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 752 753 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 754 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 755 756 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 757 758 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 759 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 760 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 761 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 762 763 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 764 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 765 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 766 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 767 768 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 769 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 770 771 // These need to set FE_INEXACT, and so cannot be vectorized here. 772 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 773 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 774 775 if (TM.Options.UnsafeFPMath) { 776 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 777 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 778 779 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 780 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 781 } else { 782 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 783 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 784 785 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 786 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 787 } 788 } 789 790 if (Subtarget.has64BitSupport()) 791 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 792 793 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 794 795 if (!isPPC64) { 796 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 797 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 798 } 799 800 setBooleanContents(ZeroOrOneBooleanContent); 801 802 if (Subtarget.hasAltivec()) { 803 // Altivec instructions set fields to all zeros or all ones. 804 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 805 } 806 807 if (!isPPC64) { 808 // These libcalls are not available in 32-bit. 809 setLibcallName(RTLIB::SHL_I128, nullptr); 810 setLibcallName(RTLIB::SRL_I128, nullptr); 811 setLibcallName(RTLIB::SRA_I128, nullptr); 812 } 813 814 if (isPPC64) { 815 setStackPointerRegisterToSaveRestore(PPC::X1); 816 setExceptionPointerRegister(PPC::X3); 817 setExceptionSelectorRegister(PPC::X4); 818 } else { 819 setStackPointerRegisterToSaveRestore(PPC::R1); 820 setExceptionPointerRegister(PPC::R3); 821 setExceptionSelectorRegister(PPC::R4); 822 } 823 824 // We have target-specific dag combine patterns for the following nodes: 825 setTargetDAGCombine(ISD::SINT_TO_FP); 826 if (Subtarget.hasFPCVT()) 827 setTargetDAGCombine(ISD::UINT_TO_FP); 828 setTargetDAGCombine(ISD::LOAD); 829 setTargetDAGCombine(ISD::STORE); 830 setTargetDAGCombine(ISD::BR_CC); 831 if (Subtarget.useCRBits()) 832 setTargetDAGCombine(ISD::BRCOND); 833 setTargetDAGCombine(ISD::BSWAP); 834 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 835 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 836 setTargetDAGCombine(ISD::INTRINSIC_VOID); 837 838 setTargetDAGCombine(ISD::SIGN_EXTEND); 839 setTargetDAGCombine(ISD::ZERO_EXTEND); 840 setTargetDAGCombine(ISD::ANY_EXTEND); 841 842 if (Subtarget.useCRBits()) { 843 setTargetDAGCombine(ISD::TRUNCATE); 844 setTargetDAGCombine(ISD::SETCC); 845 setTargetDAGCombine(ISD::SELECT_CC); 846 } 847 848 // Use reciprocal estimates. 849 if (TM.Options.UnsafeFPMath) { 850 setTargetDAGCombine(ISD::FDIV); 851 setTargetDAGCombine(ISD::FSQRT); 852 } 853 854 // Darwin long double math library functions have $LDBL128 appended. 855 if (Subtarget.isDarwin()) { 856 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 857 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 858 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 859 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 860 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 861 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 862 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 863 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 864 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 865 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 866 } 867 868 // With 32 condition bits, we don't need to sink (and duplicate) compares 869 // aggressively in CodeGenPrep. 870 if (Subtarget.useCRBits()) { 871 setHasMultipleConditionRegisters(); 872 setJumpIsExpensive(); 873 } 874 875 setMinFunctionAlignment(2); 876 if (Subtarget.isDarwin()) 877 setPrefFunctionAlignment(4); 878 879 switch (Subtarget.getDarwinDirective()) { 880 default: break; 881 case PPC::DIR_970: 882 case PPC::DIR_A2: 883 case PPC::DIR_E500mc: 884 case PPC::DIR_E5500: 885 case PPC::DIR_PWR4: 886 case PPC::DIR_PWR5: 887 case PPC::DIR_PWR5X: 888 case PPC::DIR_PWR6: 889 case PPC::DIR_PWR6X: 890 case PPC::DIR_PWR7: 891 case PPC::DIR_PWR8: 892 setPrefFunctionAlignment(4); 893 setPrefLoopAlignment(4); 894 break; 895 } 896 897 setInsertFencesForAtomic(true); 898 899 if (Subtarget.enableMachineScheduler()) 900 setSchedulingPreference(Sched::Source); 901 else 902 setSchedulingPreference(Sched::Hybrid); 903 904 computeRegisterProperties(STI.getRegisterInfo()); 905 906 // The Freescale cores do better with aggressive inlining of memcpy and 907 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 908 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 909 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 910 MaxStoresPerMemset = 32; 911 MaxStoresPerMemsetOptSize = 16; 912 MaxStoresPerMemcpy = 32; 913 MaxStoresPerMemcpyOptSize = 8; 914 MaxStoresPerMemmove = 32; 915 MaxStoresPerMemmoveOptSize = 8; 916 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 917 // The A2 also benefits from (very) aggressive inlining of memcpy and 918 // friends. The overhead of a the function call, even when warm, can be 919 // over one hundred cycles. 920 MaxStoresPerMemset = 128; 921 MaxStoresPerMemcpy = 128; 922 MaxStoresPerMemmove = 128; 923 } 924 } 925 926 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 927 /// the desired ByVal argument alignment. 928 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 929 unsigned MaxMaxAlign) { 930 if (MaxAlign == MaxMaxAlign) 931 return; 932 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 933 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 934 MaxAlign = 32; 935 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 936 MaxAlign = 16; 937 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 938 unsigned EltAlign = 0; 939 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 940 if (EltAlign > MaxAlign) 941 MaxAlign = EltAlign; 942 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 943 for (auto *EltTy : STy->elements()) { 944 unsigned EltAlign = 0; 945 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 946 if (EltAlign > MaxAlign) 947 MaxAlign = EltAlign; 948 if (MaxAlign == MaxMaxAlign) 949 break; 950 } 951 } 952 } 953 954 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 955 /// function arguments in the caller parameter area. 956 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 957 const DataLayout &DL) const { 958 // Darwin passes everything on 4 byte boundary. 959 if (Subtarget.isDarwin()) 960 return 4; 961 962 // 16byte and wider vectors are passed on 16byte boundary. 963 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 964 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 965 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 966 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 967 return Align; 968 } 969 970 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 971 switch ((PPCISD::NodeType)Opcode) { 972 case PPCISD::FIRST_NUMBER: break; 973 case PPCISD::FSEL: return "PPCISD::FSEL"; 974 case PPCISD::FCFID: return "PPCISD::FCFID"; 975 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 976 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 977 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 978 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 979 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 980 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 981 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 982 case PPCISD::FRE: return "PPCISD::FRE"; 983 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 984 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 985 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 986 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 987 case PPCISD::VPERM: return "PPCISD::VPERM"; 988 case PPCISD::CMPB: return "PPCISD::CMPB"; 989 case PPCISD::Hi: return "PPCISD::Hi"; 990 case PPCISD::Lo: return "PPCISD::Lo"; 991 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 992 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 993 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 994 case PPCISD::SRL: return "PPCISD::SRL"; 995 case PPCISD::SRA: return "PPCISD::SRA"; 996 case PPCISD::SHL: return "PPCISD::SHL"; 997 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 998 case PPCISD::CALL: return "PPCISD::CALL"; 999 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1000 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1001 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1002 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1003 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1004 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1005 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1006 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1007 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1008 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1009 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1010 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1011 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1012 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1013 case PPCISD::VCMP: return "PPCISD::VCMP"; 1014 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1015 case PPCISD::LBRX: return "PPCISD::LBRX"; 1016 case PPCISD::STBRX: return "PPCISD::STBRX"; 1017 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1018 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1019 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1020 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1021 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1022 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1023 case PPCISD::BDZ: return "PPCISD::BDZ"; 1024 case PPCISD::MFFS: return "PPCISD::MFFS"; 1025 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1026 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1027 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1028 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1029 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1030 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1031 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1032 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1033 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1034 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1035 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1036 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1037 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1038 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1039 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1040 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1041 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1042 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1043 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1044 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1045 case PPCISD::SC: return "PPCISD::SC"; 1046 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1047 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1048 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1049 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1050 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1051 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1052 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1053 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1054 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1055 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1056 } 1057 return nullptr; 1058 } 1059 1060 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1061 EVT VT) const { 1062 if (!VT.isVector()) 1063 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1064 1065 if (Subtarget.hasQPX()) 1066 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1067 1068 return VT.changeVectorElementTypeToInteger(); 1069 } 1070 1071 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1072 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1073 return true; 1074 } 1075 1076 //===----------------------------------------------------------------------===// 1077 // Node matching predicates, for use by the tblgen matching code. 1078 //===----------------------------------------------------------------------===// 1079 1080 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1081 static bool isFloatingPointZero(SDValue Op) { 1082 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1083 return CFP->getValueAPF().isZero(); 1084 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1085 // Maybe this has already been legalized into the constant pool? 1086 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1087 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1088 return CFP->getValueAPF().isZero(); 1089 } 1090 return false; 1091 } 1092 1093 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1094 /// true if Op is undef or if it matches the specified value. 1095 static bool isConstantOrUndef(int Op, int Val) { 1096 return Op < 0 || Op == Val; 1097 } 1098 1099 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1100 /// VPKUHUM instruction. 1101 /// The ShuffleKind distinguishes between big-endian operations with 1102 /// two different inputs (0), either-endian operations with two identical 1103 /// inputs (1), and little-endian operations with two different inputs (2). 1104 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1105 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1106 SelectionDAG &DAG) { 1107 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1108 if (ShuffleKind == 0) { 1109 if (IsLE) 1110 return false; 1111 for (unsigned i = 0; i != 16; ++i) 1112 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1113 return false; 1114 } else if (ShuffleKind == 2) { 1115 if (!IsLE) 1116 return false; 1117 for (unsigned i = 0; i != 16; ++i) 1118 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1119 return false; 1120 } else if (ShuffleKind == 1) { 1121 unsigned j = IsLE ? 0 : 1; 1122 for (unsigned i = 0; i != 8; ++i) 1123 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1124 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1125 return false; 1126 } 1127 return true; 1128 } 1129 1130 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1131 /// VPKUWUM instruction. 1132 /// The ShuffleKind distinguishes between big-endian operations with 1133 /// two different inputs (0), either-endian operations with two identical 1134 /// inputs (1), and little-endian operations with two different inputs (2). 1135 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1136 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1137 SelectionDAG &DAG) { 1138 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1139 if (ShuffleKind == 0) { 1140 if (IsLE) 1141 return false; 1142 for (unsigned i = 0; i != 16; i += 2) 1143 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1144 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1145 return false; 1146 } else if (ShuffleKind == 2) { 1147 if (!IsLE) 1148 return false; 1149 for (unsigned i = 0; i != 16; i += 2) 1150 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1151 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1152 return false; 1153 } else if (ShuffleKind == 1) { 1154 unsigned j = IsLE ? 0 : 2; 1155 for (unsigned i = 0; i != 8; i += 2) 1156 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1157 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1158 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1159 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1160 return false; 1161 } 1162 return true; 1163 } 1164 1165 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1166 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1167 /// current subtarget. 1168 /// 1169 /// The ShuffleKind distinguishes between big-endian operations with 1170 /// two different inputs (0), either-endian operations with two identical 1171 /// inputs (1), and little-endian operations with two different inputs (2). 1172 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1173 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1174 SelectionDAG &DAG) { 1175 const PPCSubtarget& Subtarget = 1176 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1177 if (!Subtarget.hasP8Vector()) 1178 return false; 1179 1180 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1181 if (ShuffleKind == 0) { 1182 if (IsLE) 1183 return false; 1184 for (unsigned i = 0; i != 16; i += 4) 1185 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1186 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1187 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1188 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1189 return false; 1190 } else if (ShuffleKind == 2) { 1191 if (!IsLE) 1192 return false; 1193 for (unsigned i = 0; i != 16; i += 4) 1194 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1195 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1196 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1197 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1198 return false; 1199 } else if (ShuffleKind == 1) { 1200 unsigned j = IsLE ? 0 : 4; 1201 for (unsigned i = 0; i != 8; i += 4) 1202 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1203 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1204 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1205 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1206 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1207 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1208 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1209 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1210 return false; 1211 } 1212 return true; 1213 } 1214 1215 /// isVMerge - Common function, used to match vmrg* shuffles. 1216 /// 1217 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1218 unsigned LHSStart, unsigned RHSStart) { 1219 if (N->getValueType(0) != MVT::v16i8) 1220 return false; 1221 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1222 "Unsupported merge size!"); 1223 1224 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1225 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1226 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1227 LHSStart+j+i*UnitSize) || 1228 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1229 RHSStart+j+i*UnitSize)) 1230 return false; 1231 } 1232 return true; 1233 } 1234 1235 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1236 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1237 /// The ShuffleKind distinguishes between big-endian merges with two 1238 /// different inputs (0), either-endian merges with two identical inputs (1), 1239 /// and little-endian merges with two different inputs (2). For the latter, 1240 /// the input operands are swapped (see PPCInstrAltivec.td). 1241 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1242 unsigned ShuffleKind, SelectionDAG &DAG) { 1243 if (DAG.getDataLayout().isLittleEndian()) { 1244 if (ShuffleKind == 1) // unary 1245 return isVMerge(N, UnitSize, 0, 0); 1246 else if (ShuffleKind == 2) // swapped 1247 return isVMerge(N, UnitSize, 0, 16); 1248 else 1249 return false; 1250 } else { 1251 if (ShuffleKind == 1) // unary 1252 return isVMerge(N, UnitSize, 8, 8); 1253 else if (ShuffleKind == 0) // normal 1254 return isVMerge(N, UnitSize, 8, 24); 1255 else 1256 return false; 1257 } 1258 } 1259 1260 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1261 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1262 /// The ShuffleKind distinguishes between big-endian merges with two 1263 /// different inputs (0), either-endian merges with two identical inputs (1), 1264 /// and little-endian merges with two different inputs (2). For the latter, 1265 /// the input operands are swapped (see PPCInstrAltivec.td). 1266 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1267 unsigned ShuffleKind, SelectionDAG &DAG) { 1268 if (DAG.getDataLayout().isLittleEndian()) { 1269 if (ShuffleKind == 1) // unary 1270 return isVMerge(N, UnitSize, 8, 8); 1271 else if (ShuffleKind == 2) // swapped 1272 return isVMerge(N, UnitSize, 8, 24); 1273 else 1274 return false; 1275 } else { 1276 if (ShuffleKind == 1) // unary 1277 return isVMerge(N, UnitSize, 0, 0); 1278 else if (ShuffleKind == 0) // normal 1279 return isVMerge(N, UnitSize, 0, 16); 1280 else 1281 return false; 1282 } 1283 } 1284 1285 /** 1286 * \brief Common function used to match vmrgew and vmrgow shuffles 1287 * 1288 * The indexOffset determines whether to look for even or odd words in 1289 * the shuffle mask. This is based on the of the endianness of the target 1290 * machine. 1291 * - Little Endian: 1292 * - Use offset of 0 to check for odd elements 1293 * - Use offset of 4 to check for even elements 1294 * - Big Endian: 1295 * - Use offset of 0 to check for even elements 1296 * - Use offset of 4 to check for odd elements 1297 * A detailed description of the vector element ordering for little endian and 1298 * big endian can be found at 1299 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1300 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1301 * compiler differences mean to you 1302 * 1303 * The mask to the shuffle vector instruction specifies the indices of the 1304 * elements from the two input vectors to place in the result. The elements are 1305 * numbered in array-access order, starting with the first vector. These vectors 1306 * are always of type v16i8, thus each vector will contain 16 elements of size 1307 * 8. More info on the shuffle vector can be found in the 1308 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1309 * Language Reference. 1310 * 1311 * The RHSStartValue indicates whether the same input vectors are used (unary) 1312 * or two different input vectors are used, based on the following: 1313 * - If the instruction uses the same vector for both inputs, the range of the 1314 * indices will be 0 to 15. In this case, the RHSStart value passed should 1315 * be 0. 1316 * - If the instruction has two different vectors then the range of the 1317 * indices will be 0 to 31. In this case, the RHSStart value passed should 1318 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1319 * to 31 specify elements in the second vector). 1320 * 1321 * \param[in] N The shuffle vector SD Node to analyze 1322 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1323 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1324 * vector to the shuffle_vector instruction 1325 * \return true iff this shuffle vector represents an even or odd word merge 1326 */ 1327 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1328 unsigned RHSStartValue) { 1329 if (N->getValueType(0) != MVT::v16i8) 1330 return false; 1331 1332 for (unsigned i = 0; i < 2; ++i) 1333 for (unsigned j = 0; j < 4; ++j) 1334 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1335 i*RHSStartValue+j+IndexOffset) || 1336 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1337 i*RHSStartValue+j+IndexOffset+8)) 1338 return false; 1339 return true; 1340 } 1341 1342 /** 1343 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1344 * vmrgow instructions. 1345 * 1346 * \param[in] N The shuffle vector SD Node to analyze 1347 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1348 * \param[in] ShuffleKind Identify the type of merge: 1349 * - 0 = big-endian merge with two different inputs; 1350 * - 1 = either-endian merge with two identical inputs; 1351 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1352 * little-endian merges). 1353 * \param[in] DAG The current SelectionDAG 1354 * \return true iff this shuffle mask 1355 */ 1356 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1357 unsigned ShuffleKind, SelectionDAG &DAG) { 1358 if (DAG.getDataLayout().isLittleEndian()) { 1359 unsigned indexOffset = CheckEven ? 4 : 0; 1360 if (ShuffleKind == 1) // Unary 1361 return isVMerge(N, indexOffset, 0); 1362 else if (ShuffleKind == 2) // swapped 1363 return isVMerge(N, indexOffset, 16); 1364 else 1365 return false; 1366 } 1367 else { 1368 unsigned indexOffset = CheckEven ? 0 : 4; 1369 if (ShuffleKind == 1) // Unary 1370 return isVMerge(N, indexOffset, 0); 1371 else if (ShuffleKind == 0) // Normal 1372 return isVMerge(N, indexOffset, 16); 1373 else 1374 return false; 1375 } 1376 return false; 1377 } 1378 1379 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1380 /// amount, otherwise return -1. 1381 /// The ShuffleKind distinguishes between big-endian operations with two 1382 /// different inputs (0), either-endian operations with two identical inputs 1383 /// (1), and little-endian operations with two different inputs (2). For the 1384 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1385 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1386 SelectionDAG &DAG) { 1387 if (N->getValueType(0) != MVT::v16i8) 1388 return -1; 1389 1390 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1391 1392 // Find the first non-undef value in the shuffle mask. 1393 unsigned i; 1394 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1395 /*search*/; 1396 1397 if (i == 16) return -1; // all undef. 1398 1399 // Otherwise, check to see if the rest of the elements are consecutively 1400 // numbered from this value. 1401 unsigned ShiftAmt = SVOp->getMaskElt(i); 1402 if (ShiftAmt < i) return -1; 1403 1404 ShiftAmt -= i; 1405 bool isLE = DAG.getDataLayout().isLittleEndian(); 1406 1407 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1408 // Check the rest of the elements to see if they are consecutive. 1409 for (++i; i != 16; ++i) 1410 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1411 return -1; 1412 } else if (ShuffleKind == 1) { 1413 // Check the rest of the elements to see if they are consecutive. 1414 for (++i; i != 16; ++i) 1415 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1416 return -1; 1417 } else 1418 return -1; 1419 1420 if (isLE) 1421 ShiftAmt = 16 - ShiftAmt; 1422 1423 return ShiftAmt; 1424 } 1425 1426 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1427 /// specifies a splat of a single element that is suitable for input to 1428 /// VSPLTB/VSPLTH/VSPLTW. 1429 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1430 assert(N->getValueType(0) == MVT::v16i8 && 1431 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1432 1433 // The consecutive indices need to specify an element, not part of two 1434 // different elements. So abandon ship early if this isn't the case. 1435 if (N->getMaskElt(0) % EltSize != 0) 1436 return false; 1437 1438 // This is a splat operation if each element of the permute is the same, and 1439 // if the value doesn't reference the second vector. 1440 unsigned ElementBase = N->getMaskElt(0); 1441 1442 // FIXME: Handle UNDEF elements too! 1443 if (ElementBase >= 16) 1444 return false; 1445 1446 // Check that the indices are consecutive, in the case of a multi-byte element 1447 // splatted with a v16i8 mask. 1448 for (unsigned i = 1; i != EltSize; ++i) 1449 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1450 return false; 1451 1452 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1453 if (N->getMaskElt(i) < 0) continue; 1454 for (unsigned j = 0; j != EltSize; ++j) 1455 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1456 return false; 1457 } 1458 return true; 1459 } 1460 1461 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1462 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1463 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1464 SelectionDAG &DAG) { 1465 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1466 assert(isSplatShuffleMask(SVOp, EltSize)); 1467 if (DAG.getDataLayout().isLittleEndian()) 1468 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1469 else 1470 return SVOp->getMaskElt(0) / EltSize; 1471 } 1472 1473 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1474 /// by using a vspltis[bhw] instruction of the specified element size, return 1475 /// the constant being splatted. The ByteSize field indicates the number of 1476 /// bytes of each element [124] -> [bhw]. 1477 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1478 SDValue OpVal(nullptr, 0); 1479 1480 // If ByteSize of the splat is bigger than the element size of the 1481 // build_vector, then we have a case where we are checking for a splat where 1482 // multiple elements of the buildvector are folded together into a single 1483 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1484 unsigned EltSize = 16/N->getNumOperands(); 1485 if (EltSize < ByteSize) { 1486 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1487 SDValue UniquedVals[4]; 1488 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1489 1490 // See if all of the elements in the buildvector agree across. 1491 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1492 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1493 // If the element isn't a constant, bail fully out. 1494 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1495 1496 1497 if (!UniquedVals[i&(Multiple-1)].getNode()) 1498 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1499 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1500 return SDValue(); // no match. 1501 } 1502 1503 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1504 // either constant or undef values that are identical for each chunk. See 1505 // if these chunks can form into a larger vspltis*. 1506 1507 // Check to see if all of the leading entries are either 0 or -1. If 1508 // neither, then this won't fit into the immediate field. 1509 bool LeadingZero = true; 1510 bool LeadingOnes = true; 1511 for (unsigned i = 0; i != Multiple-1; ++i) { 1512 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1513 1514 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 1515 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 1516 } 1517 // Finally, check the least significant entry. 1518 if (LeadingZero) { 1519 if (!UniquedVals[Multiple-1].getNode()) 1520 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1521 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1522 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1523 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1524 } 1525 if (LeadingOnes) { 1526 if (!UniquedVals[Multiple-1].getNode()) 1527 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1528 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1529 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1530 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1531 } 1532 1533 return SDValue(); 1534 } 1535 1536 // Check to see if this buildvec has a single non-undef value in its elements. 1537 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1538 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1539 if (!OpVal.getNode()) 1540 OpVal = N->getOperand(i); 1541 else if (OpVal != N->getOperand(i)) 1542 return SDValue(); 1543 } 1544 1545 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1546 1547 unsigned ValSizeInBytes = EltSize; 1548 uint64_t Value = 0; 1549 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1550 Value = CN->getZExtValue(); 1551 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1552 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1553 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1554 } 1555 1556 // If the splat value is larger than the element value, then we can never do 1557 // this splat. The only case that we could fit the replicated bits into our 1558 // immediate field for would be zero, and we prefer to use vxor for it. 1559 if (ValSizeInBytes < ByteSize) return SDValue(); 1560 1561 // If the element value is larger than the splat value, check if it consists 1562 // of a repeated bit pattern of size ByteSize. 1563 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1564 return SDValue(); 1565 1566 // Properly sign extend the value. 1567 int MaskVal = SignExtend32(Value, ByteSize * 8); 1568 1569 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1570 if (MaskVal == 0) return SDValue(); 1571 1572 // Finally, if this value fits in a 5 bit sext field, return it 1573 if (SignExtend32<5>(MaskVal) == MaskVal) 1574 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1575 return SDValue(); 1576 } 1577 1578 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1579 /// amount, otherwise return -1. 1580 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1581 EVT VT = N->getValueType(0); 1582 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1583 return -1; 1584 1585 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1586 1587 // Find the first non-undef value in the shuffle mask. 1588 unsigned i; 1589 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1590 /*search*/; 1591 1592 if (i == 4) return -1; // all undef. 1593 1594 // Otherwise, check to see if the rest of the elements are consecutively 1595 // numbered from this value. 1596 unsigned ShiftAmt = SVOp->getMaskElt(i); 1597 if (ShiftAmt < i) return -1; 1598 ShiftAmt -= i; 1599 1600 // Check the rest of the elements to see if they are consecutive. 1601 for (++i; i != 4; ++i) 1602 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1603 return -1; 1604 1605 return ShiftAmt; 1606 } 1607 1608 //===----------------------------------------------------------------------===// 1609 // Addressing Mode Selection 1610 //===----------------------------------------------------------------------===// 1611 1612 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1613 /// or 64-bit immediate, and if the value can be accurately represented as a 1614 /// sign extension from a 16-bit value. If so, this returns true and the 1615 /// immediate. 1616 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1617 if (!isa<ConstantSDNode>(N)) 1618 return false; 1619 1620 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1621 if (N->getValueType(0) == MVT::i32) 1622 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1623 else 1624 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1625 } 1626 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1627 return isIntS16Immediate(Op.getNode(), Imm); 1628 } 1629 1630 1631 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1632 /// can be represented as an indexed [r+r] operation. Returns false if it 1633 /// can be more efficiently represented with [r+imm]. 1634 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1635 SDValue &Index, 1636 SelectionDAG &DAG) const { 1637 short imm = 0; 1638 if (N.getOpcode() == ISD::ADD) { 1639 if (isIntS16Immediate(N.getOperand(1), imm)) 1640 return false; // r+i 1641 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1642 return false; // r+i 1643 1644 Base = N.getOperand(0); 1645 Index = N.getOperand(1); 1646 return true; 1647 } else if (N.getOpcode() == ISD::OR) { 1648 if (isIntS16Immediate(N.getOperand(1), imm)) 1649 return false; // r+i can fold it if we can. 1650 1651 // If this is an or of disjoint bitfields, we can codegen this as an add 1652 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1653 // disjoint. 1654 APInt LHSKnownZero, LHSKnownOne; 1655 APInt RHSKnownZero, RHSKnownOne; 1656 DAG.computeKnownBits(N.getOperand(0), 1657 LHSKnownZero, LHSKnownOne); 1658 1659 if (LHSKnownZero.getBoolValue()) { 1660 DAG.computeKnownBits(N.getOperand(1), 1661 RHSKnownZero, RHSKnownOne); 1662 // If all of the bits are known zero on the LHS or RHS, the add won't 1663 // carry. 1664 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1665 Base = N.getOperand(0); 1666 Index = N.getOperand(1); 1667 return true; 1668 } 1669 } 1670 } 1671 1672 return false; 1673 } 1674 1675 // If we happen to be doing an i64 load or store into a stack slot that has 1676 // less than a 4-byte alignment, then the frame-index elimination may need to 1677 // use an indexed load or store instruction (because the offset may not be a 1678 // multiple of 4). The extra register needed to hold the offset comes from the 1679 // register scavenger, and it is possible that the scavenger will need to use 1680 // an emergency spill slot. As a result, we need to make sure that a spill slot 1681 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1682 // stack slot. 1683 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1684 // FIXME: This does not handle the LWA case. 1685 if (VT != MVT::i64) 1686 return; 1687 1688 // NOTE: We'll exclude negative FIs here, which come from argument 1689 // lowering, because there are no known test cases triggering this problem 1690 // using packed structures (or similar). We can remove this exclusion if 1691 // we find such a test case. The reason why this is so test-case driven is 1692 // because this entire 'fixup' is only to prevent crashes (from the 1693 // register scavenger) on not-really-valid inputs. For example, if we have: 1694 // %a = alloca i1 1695 // %b = bitcast i1* %a to i64* 1696 // store i64* a, i64 b 1697 // then the store should really be marked as 'align 1', but is not. If it 1698 // were marked as 'align 1' then the indexed form would have been 1699 // instruction-selected initially, and the problem this 'fixup' is preventing 1700 // won't happen regardless. 1701 if (FrameIdx < 0) 1702 return; 1703 1704 MachineFunction &MF = DAG.getMachineFunction(); 1705 MachineFrameInfo *MFI = MF.getFrameInfo(); 1706 1707 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1708 if (Align >= 4) 1709 return; 1710 1711 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1712 FuncInfo->setHasNonRISpills(); 1713 } 1714 1715 /// Returns true if the address N can be represented by a base register plus 1716 /// a signed 16-bit displacement [r+imm], and if it is not better 1717 /// represented as reg+reg. If Aligned is true, only accept displacements 1718 /// suitable for STD and friends, i.e. multiples of 4. 1719 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1720 SDValue &Base, 1721 SelectionDAG &DAG, 1722 bool Aligned) const { 1723 // FIXME dl should come from parent load or store, not from address 1724 SDLoc dl(N); 1725 // If this can be more profitably realized as r+r, fail. 1726 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1727 return false; 1728 1729 if (N.getOpcode() == ISD::ADD) { 1730 short imm = 0; 1731 if (isIntS16Immediate(N.getOperand(1), imm) && 1732 (!Aligned || (imm & 3) == 0)) { 1733 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1734 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1735 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1736 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1737 } else { 1738 Base = N.getOperand(0); 1739 } 1740 return true; // [r+i] 1741 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1742 // Match LOAD (ADD (X, Lo(G))). 1743 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1744 && "Cannot handle constant offsets yet!"); 1745 Disp = N.getOperand(1).getOperand(0); // The global address. 1746 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1747 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1748 Disp.getOpcode() == ISD::TargetConstantPool || 1749 Disp.getOpcode() == ISD::TargetJumpTable); 1750 Base = N.getOperand(0); 1751 return true; // [&g+r] 1752 } 1753 } else if (N.getOpcode() == ISD::OR) { 1754 short imm = 0; 1755 if (isIntS16Immediate(N.getOperand(1), imm) && 1756 (!Aligned || (imm & 3) == 0)) { 1757 // If this is an or of disjoint bitfields, we can codegen this as an add 1758 // (for better address arithmetic) if the LHS and RHS of the OR are 1759 // provably disjoint. 1760 APInt LHSKnownZero, LHSKnownOne; 1761 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1762 1763 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1764 // If all of the bits are known zero on the LHS or RHS, the add won't 1765 // carry. 1766 if (FrameIndexSDNode *FI = 1767 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1768 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1769 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1770 } else { 1771 Base = N.getOperand(0); 1772 } 1773 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1774 return true; 1775 } 1776 } 1777 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1778 // Loading from a constant address. 1779 1780 // If this address fits entirely in a 16-bit sext immediate field, codegen 1781 // this as "d, 0" 1782 short Imm; 1783 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1784 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1785 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1786 CN->getValueType(0)); 1787 return true; 1788 } 1789 1790 // Handle 32-bit sext immediates with LIS + addr mode. 1791 if ((CN->getValueType(0) == MVT::i32 || 1792 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1793 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1794 int Addr = (int)CN->getZExtValue(); 1795 1796 // Otherwise, break this down into an LIS + disp. 1797 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1798 1799 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1800 MVT::i32); 1801 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1802 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1803 return true; 1804 } 1805 } 1806 1807 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 1808 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1809 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1810 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1811 } else 1812 Base = N; 1813 return true; // [r+0] 1814 } 1815 1816 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1817 /// represented as an indexed [r+r] operation. 1818 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1819 SDValue &Index, 1820 SelectionDAG &DAG) const { 1821 // Check to see if we can easily represent this as an [r+r] address. This 1822 // will fail if it thinks that the address is more profitably represented as 1823 // reg+imm, e.g. where imm = 0. 1824 if (SelectAddressRegReg(N, Base, Index, DAG)) 1825 return true; 1826 1827 // If the operand is an addition, always emit this as [r+r], since this is 1828 // better (for code size, and execution, as the memop does the add for free) 1829 // than emitting an explicit add. 1830 if (N.getOpcode() == ISD::ADD) { 1831 Base = N.getOperand(0); 1832 Index = N.getOperand(1); 1833 return true; 1834 } 1835 1836 // Otherwise, do it the hard way, using R0 as the base register. 1837 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1838 N.getValueType()); 1839 Index = N; 1840 return true; 1841 } 1842 1843 /// getPreIndexedAddressParts - returns true by value, base pointer and 1844 /// offset pointer and addressing mode by reference if the node's address 1845 /// can be legally represented as pre-indexed load / store address. 1846 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1847 SDValue &Offset, 1848 ISD::MemIndexedMode &AM, 1849 SelectionDAG &DAG) const { 1850 if (DisablePPCPreinc) return false; 1851 1852 bool isLoad = true; 1853 SDValue Ptr; 1854 EVT VT; 1855 unsigned Alignment; 1856 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1857 Ptr = LD->getBasePtr(); 1858 VT = LD->getMemoryVT(); 1859 Alignment = LD->getAlignment(); 1860 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1861 Ptr = ST->getBasePtr(); 1862 VT = ST->getMemoryVT(); 1863 Alignment = ST->getAlignment(); 1864 isLoad = false; 1865 } else 1866 return false; 1867 1868 // PowerPC doesn't have preinc load/store instructions for vectors (except 1869 // for QPX, which does have preinc r+r forms). 1870 if (VT.isVector()) { 1871 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 1872 return false; 1873 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 1874 AM = ISD::PRE_INC; 1875 return true; 1876 } 1877 } 1878 1879 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1880 1881 // Common code will reject creating a pre-inc form if the base pointer 1882 // is a frame index, or if N is a store and the base pointer is either 1883 // the same as or a predecessor of the value being stored. Check for 1884 // those situations here, and try with swapped Base/Offset instead. 1885 bool Swap = false; 1886 1887 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1888 Swap = true; 1889 else if (!isLoad) { 1890 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1891 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1892 Swap = true; 1893 } 1894 1895 if (Swap) 1896 std::swap(Base, Offset); 1897 1898 AM = ISD::PRE_INC; 1899 return true; 1900 } 1901 1902 // LDU/STU can only handle immediates that are a multiple of 4. 1903 if (VT != MVT::i64) { 1904 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1905 return false; 1906 } else { 1907 // LDU/STU need an address with at least 4-byte alignment. 1908 if (Alignment < 4) 1909 return false; 1910 1911 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1912 return false; 1913 } 1914 1915 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1916 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1917 // sext i32 to i64 when addr mode is r+i. 1918 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1919 LD->getExtensionType() == ISD::SEXTLOAD && 1920 isa<ConstantSDNode>(Offset)) 1921 return false; 1922 } 1923 1924 AM = ISD::PRE_INC; 1925 return true; 1926 } 1927 1928 //===----------------------------------------------------------------------===// 1929 // LowerOperation implementation 1930 //===----------------------------------------------------------------------===// 1931 1932 /// GetLabelAccessInfo - Return true if we should reference labels using a 1933 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1934 static bool GetLabelAccessInfo(const TargetMachine &TM, 1935 const PPCSubtarget &Subtarget, 1936 unsigned &HiOpFlags, unsigned &LoOpFlags, 1937 const GlobalValue *GV = nullptr) { 1938 HiOpFlags = PPCII::MO_HA; 1939 LoOpFlags = PPCII::MO_LO; 1940 1941 // Don't use the pic base if not in PIC relocation model. 1942 bool isPIC = TM.getRelocationModel() == Reloc::PIC_; 1943 1944 if (isPIC) { 1945 HiOpFlags |= PPCII::MO_PIC_FLAG; 1946 LoOpFlags |= PPCII::MO_PIC_FLAG; 1947 } 1948 1949 // If this is a reference to a global value that requires a non-lazy-ptr, make 1950 // sure that instruction lowering adds it. 1951 if (GV && Subtarget.hasLazyResolverStub(GV)) { 1952 HiOpFlags |= PPCII::MO_NLP_FLAG; 1953 LoOpFlags |= PPCII::MO_NLP_FLAG; 1954 1955 if (GV->hasHiddenVisibility()) { 1956 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1957 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1958 } 1959 } 1960 1961 return isPIC; 1962 } 1963 1964 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1965 SelectionDAG &DAG) { 1966 SDLoc DL(HiPart); 1967 EVT PtrVT = HiPart.getValueType(); 1968 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 1969 1970 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1971 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1972 1973 // With PIC, the first instruction is actually "GR+hi(&G)". 1974 if (isPIC) 1975 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1976 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1977 1978 // Generate non-pic code that has direct accesses to the constant pool. 1979 // The address of the global is just (hi(&g)+lo(&g)). 1980 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1981 } 1982 1983 static void setUsesTOCBasePtr(MachineFunction &MF) { 1984 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1985 FuncInfo->setUsesTOCBasePtr(); 1986 } 1987 1988 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 1989 setUsesTOCBasePtr(DAG.getMachineFunction()); 1990 } 1991 1992 static SDValue getTOCEntry(SelectionDAG &DAG, SDLoc dl, bool Is64Bit, 1993 SDValue GA) { 1994 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 1995 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 1996 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 1997 1998 SDValue Ops[] = { GA, Reg }; 1999 return DAG.getMemIntrinsicNode( 2000 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2001 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2002 false, 0); 2003 } 2004 2005 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2006 SelectionDAG &DAG) const { 2007 EVT PtrVT = Op.getValueType(); 2008 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2009 const Constant *C = CP->getConstVal(); 2010 2011 // 64-bit SVR4 ABI code is always position-independent. 2012 // The actual address of the GlobalValue is stored in the TOC. 2013 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2014 setUsesTOCBasePtr(DAG); 2015 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2016 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2017 } 2018 2019 unsigned MOHiFlag, MOLoFlag; 2020 bool isPIC = 2021 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2022 2023 if (isPIC && Subtarget.isSVR4ABI()) { 2024 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2025 PPCII::MO_PIC_FLAG); 2026 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2027 } 2028 2029 SDValue CPIHi = 2030 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2031 SDValue CPILo = 2032 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2033 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 2034 } 2035 2036 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2037 EVT PtrVT = Op.getValueType(); 2038 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2039 2040 // 64-bit SVR4 ABI code is always position-independent. 2041 // The actual address of the GlobalValue is stored in the TOC. 2042 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2043 setUsesTOCBasePtr(DAG); 2044 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2045 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2046 } 2047 2048 unsigned MOHiFlag, MOLoFlag; 2049 bool isPIC = 2050 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2051 2052 if (isPIC && Subtarget.isSVR4ABI()) { 2053 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2054 PPCII::MO_PIC_FLAG); 2055 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2056 } 2057 2058 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2059 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2060 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 2061 } 2062 2063 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2064 SelectionDAG &DAG) const { 2065 EVT PtrVT = Op.getValueType(); 2066 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2067 const BlockAddress *BA = BASDN->getBlockAddress(); 2068 2069 // 64-bit SVR4 ABI code is always position-independent. 2070 // The actual BlockAddress is stored in the TOC. 2071 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2072 setUsesTOCBasePtr(DAG); 2073 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2074 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2075 } 2076 2077 unsigned MOHiFlag, MOLoFlag; 2078 bool isPIC = 2079 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2080 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2081 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2082 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 2083 } 2084 2085 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2086 SelectionDAG &DAG) const { 2087 2088 // FIXME: TLS addresses currently use medium model code sequences, 2089 // which is the most useful form. Eventually support for small and 2090 // large models could be added if users need it, at the cost of 2091 // additional complexity. 2092 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2093 if (DAG.getTarget().Options.EmulatedTLS) 2094 return LowerToTLSEmulatedModel(GA, DAG); 2095 2096 SDLoc dl(GA); 2097 const GlobalValue *GV = GA->getGlobal(); 2098 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2099 bool is64bit = Subtarget.isPPC64(); 2100 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2101 PICLevel::Level picLevel = M->getPICLevel(); 2102 2103 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2104 2105 if (Model == TLSModel::LocalExec) { 2106 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2107 PPCII::MO_TPREL_HA); 2108 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2109 PPCII::MO_TPREL_LO); 2110 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2111 is64bit ? MVT::i64 : MVT::i32); 2112 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2113 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2114 } 2115 2116 if (Model == TLSModel::InitialExec) { 2117 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2118 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2119 PPCII::MO_TLS); 2120 SDValue GOTPtr; 2121 if (is64bit) { 2122 setUsesTOCBasePtr(DAG); 2123 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2124 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2125 PtrVT, GOTReg, TGA); 2126 } else 2127 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2128 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2129 PtrVT, TGA, GOTPtr); 2130 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2131 } 2132 2133 if (Model == TLSModel::GeneralDynamic) { 2134 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2135 SDValue GOTPtr; 2136 if (is64bit) { 2137 setUsesTOCBasePtr(DAG); 2138 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2139 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2140 GOTReg, TGA); 2141 } else { 2142 if (picLevel == PICLevel::Small) 2143 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2144 else 2145 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2146 } 2147 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2148 GOTPtr, TGA, TGA); 2149 } 2150 2151 if (Model == TLSModel::LocalDynamic) { 2152 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2153 SDValue GOTPtr; 2154 if (is64bit) { 2155 setUsesTOCBasePtr(DAG); 2156 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2157 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2158 GOTReg, TGA); 2159 } else { 2160 if (picLevel == PICLevel::Small) 2161 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2162 else 2163 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2164 } 2165 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2166 PtrVT, GOTPtr, TGA, TGA); 2167 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2168 PtrVT, TLSAddr, TGA); 2169 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2170 } 2171 2172 llvm_unreachable("Unknown TLS model!"); 2173 } 2174 2175 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2176 SelectionDAG &DAG) const { 2177 EVT PtrVT = Op.getValueType(); 2178 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2179 SDLoc DL(GSDN); 2180 const GlobalValue *GV = GSDN->getGlobal(); 2181 2182 // 64-bit SVR4 ABI code is always position-independent. 2183 // The actual address of the GlobalValue is stored in the TOC. 2184 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2185 setUsesTOCBasePtr(DAG); 2186 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2187 return getTOCEntry(DAG, DL, true, GA); 2188 } 2189 2190 unsigned MOHiFlag, MOLoFlag; 2191 bool isPIC = 2192 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag, GV); 2193 2194 if (isPIC && Subtarget.isSVR4ABI()) { 2195 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2196 GSDN->getOffset(), 2197 PPCII::MO_PIC_FLAG); 2198 return getTOCEntry(DAG, DL, false, GA); 2199 } 2200 2201 SDValue GAHi = 2202 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2203 SDValue GALo = 2204 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2205 2206 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 2207 2208 // If the global reference is actually to a non-lazy-pointer, we have to do an 2209 // extra load to get the address of the global. 2210 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2211 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 2212 false, false, false, 0); 2213 return Ptr; 2214 } 2215 2216 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2217 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2218 SDLoc dl(Op); 2219 2220 if (Op.getValueType() == MVT::v2i64) { 2221 // When the operands themselves are v2i64 values, we need to do something 2222 // special because VSX has no underlying comparison operations for these. 2223 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2224 // Equality can be handled by casting to the legal type for Altivec 2225 // comparisons, everything else needs to be expanded. 2226 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2227 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2228 DAG.getSetCC(dl, MVT::v4i32, 2229 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2230 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2231 CC)); 2232 } 2233 2234 return SDValue(); 2235 } 2236 2237 // We handle most of these in the usual way. 2238 return Op; 2239 } 2240 2241 // If we're comparing for equality to zero, expose the fact that this is 2242 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 2243 // fold the new nodes. 2244 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2245 if (C->isNullValue() && CC == ISD::SETEQ) { 2246 EVT VT = Op.getOperand(0).getValueType(); 2247 SDValue Zext = Op.getOperand(0); 2248 if (VT.bitsLT(MVT::i32)) { 2249 VT = MVT::i32; 2250 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 2251 } 2252 unsigned Log2b = Log2_32(VT.getSizeInBits()); 2253 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 2254 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 2255 DAG.getConstant(Log2b, dl, MVT::i32)); 2256 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 2257 } 2258 // Leave comparisons against 0 and -1 alone for now, since they're usually 2259 // optimized. FIXME: revisit this when we can custom lower all setcc 2260 // optimizations. 2261 if (C->isAllOnesValue() || C->isNullValue()) 2262 return SDValue(); 2263 } 2264 2265 // If we have an integer seteq/setne, turn it into a compare against zero 2266 // by xor'ing the rhs with the lhs, which is faster than setting a 2267 // condition register, reading it back out, and masking the correct bit. The 2268 // normal approach here uses sub to do this instead of xor. Using xor exposes 2269 // the result to other bit-twiddling opportunities. 2270 EVT LHSVT = Op.getOperand(0).getValueType(); 2271 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2272 EVT VT = Op.getValueType(); 2273 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2274 Op.getOperand(1)); 2275 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2276 } 2277 return SDValue(); 2278 } 2279 2280 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 2281 const PPCSubtarget &Subtarget) const { 2282 SDNode *Node = Op.getNode(); 2283 EVT VT = Node->getValueType(0); 2284 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2285 SDValue InChain = Node->getOperand(0); 2286 SDValue VAListPtr = Node->getOperand(1); 2287 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2288 SDLoc dl(Node); 2289 2290 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2291 2292 // gpr_index 2293 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2294 VAListPtr, MachinePointerInfo(SV), MVT::i8, 2295 false, false, false, 0); 2296 InChain = GprIndex.getValue(1); 2297 2298 if (VT == MVT::i64) { 2299 // Check if GprIndex is even 2300 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2301 DAG.getConstant(1, dl, MVT::i32)); 2302 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2303 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2304 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2305 DAG.getConstant(1, dl, MVT::i32)); 2306 // Align GprIndex to be even if it isn't 2307 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2308 GprIndex); 2309 } 2310 2311 // fpr index is 1 byte after gpr 2312 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2313 DAG.getConstant(1, dl, MVT::i32)); 2314 2315 // fpr 2316 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2317 FprPtr, MachinePointerInfo(SV), MVT::i8, 2318 false, false, false, 0); 2319 InChain = FprIndex.getValue(1); 2320 2321 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2322 DAG.getConstant(8, dl, MVT::i32)); 2323 2324 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2325 DAG.getConstant(4, dl, MVT::i32)); 2326 2327 // areas 2328 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 2329 MachinePointerInfo(), false, false, 2330 false, 0); 2331 InChain = OverflowArea.getValue(1); 2332 2333 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 2334 MachinePointerInfo(), false, false, 2335 false, 0); 2336 InChain = RegSaveArea.getValue(1); 2337 2338 // select overflow_area if index > 8 2339 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2340 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2341 2342 // adjustment constant gpr_index * 4/8 2343 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2344 VT.isInteger() ? GprIndex : FprIndex, 2345 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2346 MVT::i32)); 2347 2348 // OurReg = RegSaveArea + RegConstant 2349 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2350 RegConstant); 2351 2352 // Floating types are 32 bytes into RegSaveArea 2353 if (VT.isFloatingPoint()) 2354 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2355 DAG.getConstant(32, dl, MVT::i32)); 2356 2357 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2358 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2359 VT.isInteger() ? GprIndex : FprIndex, 2360 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2361 MVT::i32)); 2362 2363 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2364 VT.isInteger() ? VAListPtr : FprPtr, 2365 MachinePointerInfo(SV), 2366 MVT::i8, false, false, 0); 2367 2368 // determine if we should load from reg_save_area or overflow_area 2369 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2370 2371 // increase overflow_area by 4/8 if gpr/fpr > 8 2372 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2373 DAG.getConstant(VT.isInteger() ? 4 : 8, 2374 dl, MVT::i32)); 2375 2376 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2377 OverflowAreaPlusN); 2378 2379 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 2380 OverflowAreaPtr, 2381 MachinePointerInfo(), 2382 MVT::i32, false, false, 0); 2383 2384 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 2385 false, false, false, 0); 2386 } 2387 2388 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 2389 const PPCSubtarget &Subtarget) const { 2390 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2391 2392 // We have to copy the entire va_list struct: 2393 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2394 return DAG.getMemcpy(Op.getOperand(0), Op, 2395 Op.getOperand(1), Op.getOperand(2), 2396 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2397 false, MachinePointerInfo(), MachinePointerInfo()); 2398 } 2399 2400 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2401 SelectionDAG &DAG) const { 2402 return Op.getOperand(0); 2403 } 2404 2405 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2406 SelectionDAG &DAG) const { 2407 SDValue Chain = Op.getOperand(0); 2408 SDValue Trmp = Op.getOperand(1); // trampoline 2409 SDValue FPtr = Op.getOperand(2); // nested function 2410 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2411 SDLoc dl(Op); 2412 2413 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2414 bool isPPC64 = (PtrVT == MVT::i64); 2415 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2416 2417 TargetLowering::ArgListTy Args; 2418 TargetLowering::ArgListEntry Entry; 2419 2420 Entry.Ty = IntPtrTy; 2421 Entry.Node = Trmp; Args.push_back(Entry); 2422 2423 // TrampSize == (isPPC64 ? 48 : 40); 2424 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2425 isPPC64 ? MVT::i64 : MVT::i32); 2426 Args.push_back(Entry); 2427 2428 Entry.Node = FPtr; Args.push_back(Entry); 2429 Entry.Node = Nest; Args.push_back(Entry); 2430 2431 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2432 TargetLowering::CallLoweringInfo CLI(DAG); 2433 CLI.setDebugLoc(dl).setChain(Chain) 2434 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2435 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2436 std::move(Args), 0); 2437 2438 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2439 return CallResult.second; 2440 } 2441 2442 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 2443 const PPCSubtarget &Subtarget) const { 2444 MachineFunction &MF = DAG.getMachineFunction(); 2445 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2446 2447 SDLoc dl(Op); 2448 2449 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2450 // vastart just stores the address of the VarArgsFrameIndex slot into the 2451 // memory location argument. 2452 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2453 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2454 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2455 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2456 MachinePointerInfo(SV), 2457 false, false, 0); 2458 } 2459 2460 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2461 // We suppose the given va_list is already allocated. 2462 // 2463 // typedef struct { 2464 // char gpr; /* index into the array of 8 GPRs 2465 // * stored in the register save area 2466 // * gpr=0 corresponds to r3, 2467 // * gpr=1 to r4, etc. 2468 // */ 2469 // char fpr; /* index into the array of 8 FPRs 2470 // * stored in the register save area 2471 // * fpr=0 corresponds to f1, 2472 // * fpr=1 to f2, etc. 2473 // */ 2474 // char *overflow_arg_area; 2475 // /* location on stack that holds 2476 // * the next overflow argument 2477 // */ 2478 // char *reg_save_area; 2479 // /* where r3:r10 and f1:f8 (if saved) 2480 // * are stored 2481 // */ 2482 // } va_list[1]; 2483 2484 2485 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2486 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2487 2488 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2489 2490 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2491 PtrVT); 2492 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2493 PtrVT); 2494 2495 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2496 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2497 2498 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2499 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2500 2501 uint64_t FPROffset = 1; 2502 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2503 2504 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2505 2506 // Store first byte : number of int regs 2507 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2508 Op.getOperand(1), 2509 MachinePointerInfo(SV), 2510 MVT::i8, false, false, 0); 2511 uint64_t nextOffset = FPROffset; 2512 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2513 ConstFPROffset); 2514 2515 // Store second byte : number of float regs 2516 SDValue secondStore = 2517 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2518 MachinePointerInfo(SV, nextOffset), MVT::i8, 2519 false, false, 0); 2520 nextOffset += StackOffset; 2521 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2522 2523 // Store second word : arguments given on stack 2524 SDValue thirdStore = 2525 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2526 MachinePointerInfo(SV, nextOffset), 2527 false, false, 0); 2528 nextOffset += FrameOffset; 2529 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2530 2531 // Store third word : arguments given in registers 2532 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2533 MachinePointerInfo(SV, nextOffset), 2534 false, false, 0); 2535 2536 } 2537 2538 #include "PPCGenCallingConv.inc" 2539 2540 // Function whose sole purpose is to kill compiler warnings 2541 // stemming from unused functions included from PPCGenCallingConv.inc. 2542 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2543 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2544 } 2545 2546 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2547 CCValAssign::LocInfo &LocInfo, 2548 ISD::ArgFlagsTy &ArgFlags, 2549 CCState &State) { 2550 return true; 2551 } 2552 2553 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2554 MVT &LocVT, 2555 CCValAssign::LocInfo &LocInfo, 2556 ISD::ArgFlagsTy &ArgFlags, 2557 CCState &State) { 2558 static const MCPhysReg ArgRegs[] = { 2559 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2560 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2561 }; 2562 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2563 2564 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2565 2566 // Skip one register if the first unallocated register has an even register 2567 // number and there are still argument registers available which have not been 2568 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2569 // need to skip a register if RegNum is odd. 2570 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2571 State.AllocateReg(ArgRegs[RegNum]); 2572 } 2573 2574 // Always return false here, as this function only makes sure that the first 2575 // unallocated register has an odd register number and does not actually 2576 // allocate a register for the current argument. 2577 return false; 2578 } 2579 2580 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2581 MVT &LocVT, 2582 CCValAssign::LocInfo &LocInfo, 2583 ISD::ArgFlagsTy &ArgFlags, 2584 CCState &State) { 2585 static const MCPhysReg ArgRegs[] = { 2586 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2587 PPC::F8 2588 }; 2589 2590 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2591 2592 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2593 2594 // If there is only one Floating-point register left we need to put both f64 2595 // values of a split ppc_fp128 value on the stack. 2596 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2597 State.AllocateReg(ArgRegs[RegNum]); 2598 } 2599 2600 // Always return false here, as this function only makes sure that the two f64 2601 // values a ppc_fp128 value is split into are both passed in registers or both 2602 // passed on the stack and does not actually allocate a register for the 2603 // current argument. 2604 return false; 2605 } 2606 2607 /// FPR - The set of FP registers that should be allocated for arguments, 2608 /// on Darwin. 2609 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2610 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2611 PPC::F11, PPC::F12, PPC::F13}; 2612 2613 /// QFPR - The set of QPX registers that should be allocated for arguments. 2614 static const MCPhysReg QFPR[] = { 2615 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2616 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2617 2618 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2619 /// the stack. 2620 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2621 unsigned PtrByteSize) { 2622 unsigned ArgSize = ArgVT.getStoreSize(); 2623 if (Flags.isByVal()) 2624 ArgSize = Flags.getByValSize(); 2625 2626 // Round up to multiples of the pointer size, except for array members, 2627 // which are always packed. 2628 if (!Flags.isInConsecutiveRegs()) 2629 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2630 2631 return ArgSize; 2632 } 2633 2634 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2635 /// on the stack. 2636 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2637 ISD::ArgFlagsTy Flags, 2638 unsigned PtrByteSize) { 2639 unsigned Align = PtrByteSize; 2640 2641 // Altivec parameters are padded to a 16 byte boundary. 2642 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2643 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2644 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2645 ArgVT == MVT::v1i128) 2646 Align = 16; 2647 // QPX vector types stored in double-precision are padded to a 32 byte 2648 // boundary. 2649 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2650 Align = 32; 2651 2652 // ByVal parameters are aligned as requested. 2653 if (Flags.isByVal()) { 2654 unsigned BVAlign = Flags.getByValAlign(); 2655 if (BVAlign > PtrByteSize) { 2656 if (BVAlign % PtrByteSize != 0) 2657 llvm_unreachable( 2658 "ByVal alignment is not a multiple of the pointer size"); 2659 2660 Align = BVAlign; 2661 } 2662 } 2663 2664 // Array members are always packed to their original alignment. 2665 if (Flags.isInConsecutiveRegs()) { 2666 // If the array member was split into multiple registers, the first 2667 // needs to be aligned to the size of the full type. (Except for 2668 // ppcf128, which is only aligned as its f64 components.) 2669 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2670 Align = OrigVT.getStoreSize(); 2671 else 2672 Align = ArgVT.getStoreSize(); 2673 } 2674 2675 return Align; 2676 } 2677 2678 /// CalculateStackSlotUsed - Return whether this argument will use its 2679 /// stack slot (instead of being passed in registers). ArgOffset, 2680 /// AvailableFPRs, and AvailableVRs must hold the current argument 2681 /// position, and will be updated to account for this argument. 2682 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2683 ISD::ArgFlagsTy Flags, 2684 unsigned PtrByteSize, 2685 unsigned LinkageSize, 2686 unsigned ParamAreaSize, 2687 unsigned &ArgOffset, 2688 unsigned &AvailableFPRs, 2689 unsigned &AvailableVRs, bool HasQPX) { 2690 bool UseMemory = false; 2691 2692 // Respect alignment of argument on the stack. 2693 unsigned Align = 2694 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2695 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2696 // If there's no space left in the argument save area, we must 2697 // use memory (this check also catches zero-sized arguments). 2698 if (ArgOffset >= LinkageSize + ParamAreaSize) 2699 UseMemory = true; 2700 2701 // Allocate argument on the stack. 2702 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2703 if (Flags.isInConsecutiveRegsLast()) 2704 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2705 // If we overran the argument save area, we must use memory 2706 // (this check catches arguments passed partially in memory) 2707 if (ArgOffset > LinkageSize + ParamAreaSize) 2708 UseMemory = true; 2709 2710 // However, if the argument is actually passed in an FPR or a VR, 2711 // we don't use memory after all. 2712 if (!Flags.isByVal()) { 2713 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2714 // QPX registers overlap with the scalar FP registers. 2715 (HasQPX && (ArgVT == MVT::v4f32 || 2716 ArgVT == MVT::v4f64 || 2717 ArgVT == MVT::v4i1))) 2718 if (AvailableFPRs > 0) { 2719 --AvailableFPRs; 2720 return false; 2721 } 2722 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2723 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2724 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2725 ArgVT == MVT::v1i128) 2726 if (AvailableVRs > 0) { 2727 --AvailableVRs; 2728 return false; 2729 } 2730 } 2731 2732 return UseMemory; 2733 } 2734 2735 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2736 /// ensure minimum alignment required for target. 2737 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2738 unsigned NumBytes) { 2739 unsigned TargetAlign = Lowering->getStackAlignment(); 2740 unsigned AlignMask = TargetAlign - 1; 2741 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2742 return NumBytes; 2743 } 2744 2745 SDValue 2746 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 2747 CallingConv::ID CallConv, bool isVarArg, 2748 const SmallVectorImpl<ISD::InputArg> 2749 &Ins, 2750 SDLoc dl, SelectionDAG &DAG, 2751 SmallVectorImpl<SDValue> &InVals) 2752 const { 2753 if (Subtarget.isSVR4ABI()) { 2754 if (Subtarget.isPPC64()) 2755 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2756 dl, DAG, InVals); 2757 else 2758 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2759 dl, DAG, InVals); 2760 } else { 2761 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2762 dl, DAG, InVals); 2763 } 2764 } 2765 2766 SDValue 2767 PPCTargetLowering::LowerFormalArguments_32SVR4( 2768 SDValue Chain, 2769 CallingConv::ID CallConv, bool isVarArg, 2770 const SmallVectorImpl<ISD::InputArg> 2771 &Ins, 2772 SDLoc dl, SelectionDAG &DAG, 2773 SmallVectorImpl<SDValue> &InVals) const { 2774 2775 // 32-bit SVR4 ABI Stack Frame Layout: 2776 // +-----------------------------------+ 2777 // +--> | Back chain | 2778 // | +-----------------------------------+ 2779 // | | Floating-point register save area | 2780 // | +-----------------------------------+ 2781 // | | General register save area | 2782 // | +-----------------------------------+ 2783 // | | CR save word | 2784 // | +-----------------------------------+ 2785 // | | VRSAVE save word | 2786 // | +-----------------------------------+ 2787 // | | Alignment padding | 2788 // | +-----------------------------------+ 2789 // | | Vector register save area | 2790 // | +-----------------------------------+ 2791 // | | Local variable space | 2792 // | +-----------------------------------+ 2793 // | | Parameter list area | 2794 // | +-----------------------------------+ 2795 // | | LR save word | 2796 // | +-----------------------------------+ 2797 // SP--> +--- | Back chain | 2798 // +-----------------------------------+ 2799 // 2800 // Specifications: 2801 // System V Application Binary Interface PowerPC Processor Supplement 2802 // AltiVec Technology Programming Interface Manual 2803 2804 MachineFunction &MF = DAG.getMachineFunction(); 2805 MachineFrameInfo *MFI = MF.getFrameInfo(); 2806 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2807 2808 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2809 // Potential tail calls could cause overwriting of argument stack slots. 2810 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2811 (CallConv == CallingConv::Fast)); 2812 unsigned PtrByteSize = 4; 2813 2814 // Assign locations to all of the incoming arguments. 2815 SmallVector<CCValAssign, 16> ArgLocs; 2816 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2817 *DAG.getContext()); 2818 2819 // Reserve space for the linkage area on the stack. 2820 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2821 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2822 2823 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2824 2825 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2826 CCValAssign &VA = ArgLocs[i]; 2827 2828 // Arguments stored in registers. 2829 if (VA.isRegLoc()) { 2830 const TargetRegisterClass *RC; 2831 EVT ValVT = VA.getValVT(); 2832 2833 switch (ValVT.getSimpleVT().SimpleTy) { 2834 default: 2835 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2836 case MVT::i1: 2837 case MVT::i32: 2838 RC = &PPC::GPRCRegClass; 2839 break; 2840 case MVT::f32: 2841 if (Subtarget.hasP8Vector()) 2842 RC = &PPC::VSSRCRegClass; 2843 else 2844 RC = &PPC::F4RCRegClass; 2845 break; 2846 case MVT::f64: 2847 if (Subtarget.hasVSX()) 2848 RC = &PPC::VSFRCRegClass; 2849 else 2850 RC = &PPC::F8RCRegClass; 2851 break; 2852 case MVT::v16i8: 2853 case MVT::v8i16: 2854 case MVT::v4i32: 2855 RC = &PPC::VRRCRegClass; 2856 break; 2857 case MVT::v4f32: 2858 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 2859 break; 2860 case MVT::v2f64: 2861 case MVT::v2i64: 2862 RC = &PPC::VSHRCRegClass; 2863 break; 2864 case MVT::v4f64: 2865 RC = &PPC::QFRCRegClass; 2866 break; 2867 case MVT::v4i1: 2868 RC = &PPC::QBRCRegClass; 2869 break; 2870 } 2871 2872 // Transform the arguments stored in physical registers into virtual ones. 2873 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2874 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2875 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2876 2877 if (ValVT == MVT::i1) 2878 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2879 2880 InVals.push_back(ArgValue); 2881 } else { 2882 // Argument stored in memory. 2883 assert(VA.isMemLoc()); 2884 2885 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2886 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2887 isImmutable); 2888 2889 // Create load nodes to retrieve arguments from the stack. 2890 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2891 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2892 MachinePointerInfo(), 2893 false, false, false, 0)); 2894 } 2895 } 2896 2897 // Assign locations to all of the incoming aggregate by value arguments. 2898 // Aggregates passed by value are stored in the local variable space of the 2899 // caller's stack frame, right above the parameter list area. 2900 SmallVector<CCValAssign, 16> ByValArgLocs; 2901 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2902 ByValArgLocs, *DAG.getContext()); 2903 2904 // Reserve stack space for the allocations in CCInfo. 2905 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2906 2907 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2908 2909 // Area that is at least reserved in the caller of this function. 2910 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2911 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2912 2913 // Set the size that is at least reserved in caller of this function. Tail 2914 // call optimized function's reserved stack space needs to be aligned so that 2915 // taking the difference between two stack areas will result in an aligned 2916 // stack. 2917 MinReservedArea = 2918 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 2919 FuncInfo->setMinReservedArea(MinReservedArea); 2920 2921 SmallVector<SDValue, 8> MemOps; 2922 2923 // If the function takes variable number of arguments, make a frame index for 2924 // the start of the first vararg value... for expansion of llvm.va_start. 2925 if (isVarArg) { 2926 static const MCPhysReg GPArgRegs[] = { 2927 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2928 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2929 }; 2930 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2931 2932 static const MCPhysReg FPArgRegs[] = { 2933 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2934 PPC::F8 2935 }; 2936 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2937 if (DisablePPCFloatInVariadic) 2938 NumFPArgRegs = 0; 2939 2940 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 2941 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 2942 2943 // Make room for NumGPArgRegs and NumFPArgRegs. 2944 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2945 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 2946 2947 FuncInfo->setVarArgsStackOffset( 2948 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2949 CCInfo.getNextStackOffset(), true)); 2950 2951 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2952 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2953 2954 // The fixed integer arguments of a variadic function are stored to the 2955 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2956 // the result of va_next. 2957 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2958 // Get an existing live-in vreg, or add a new one. 2959 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2960 if (!VReg) 2961 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2962 2963 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2964 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2965 MachinePointerInfo(), false, false, 0); 2966 MemOps.push_back(Store); 2967 // Increment the address by four for the next argument to store 2968 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 2969 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2970 } 2971 2972 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2973 // is set. 2974 // The double arguments are stored to the VarArgsFrameIndex 2975 // on the stack. 2976 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2977 // Get an existing live-in vreg, or add a new one. 2978 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2979 if (!VReg) 2980 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2981 2982 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2983 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2984 MachinePointerInfo(), false, false, 0); 2985 MemOps.push_back(Store); 2986 // Increment the address by eight for the next argument to store 2987 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 2988 PtrVT); 2989 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2990 } 2991 } 2992 2993 if (!MemOps.empty()) 2994 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 2995 2996 return Chain; 2997 } 2998 2999 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3000 // value to MVT::i64 and then truncate to the correct register size. 3001 SDValue 3002 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 3003 SelectionDAG &DAG, SDValue ArgVal, 3004 SDLoc dl) const { 3005 if (Flags.isSExt()) 3006 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3007 DAG.getValueType(ObjectVT)); 3008 else if (Flags.isZExt()) 3009 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3010 DAG.getValueType(ObjectVT)); 3011 3012 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3013 } 3014 3015 SDValue 3016 PPCTargetLowering::LowerFormalArguments_64SVR4( 3017 SDValue Chain, 3018 CallingConv::ID CallConv, bool isVarArg, 3019 const SmallVectorImpl<ISD::InputArg> 3020 &Ins, 3021 SDLoc dl, SelectionDAG &DAG, 3022 SmallVectorImpl<SDValue> &InVals) const { 3023 // TODO: add description of PPC stack frame format, or at least some docs. 3024 // 3025 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3026 bool isLittleEndian = Subtarget.isLittleEndian(); 3027 MachineFunction &MF = DAG.getMachineFunction(); 3028 MachineFrameInfo *MFI = MF.getFrameInfo(); 3029 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3030 3031 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3032 "fastcc not supported on varargs functions"); 3033 3034 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3035 // Potential tail calls could cause overwriting of argument stack slots. 3036 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3037 (CallConv == CallingConv::Fast)); 3038 unsigned PtrByteSize = 8; 3039 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3040 3041 static const MCPhysReg GPR[] = { 3042 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3043 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3044 }; 3045 static const MCPhysReg VR[] = { 3046 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3047 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3048 }; 3049 static const MCPhysReg VSRH[] = { 3050 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 3051 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 3052 }; 3053 3054 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3055 const unsigned Num_FPR_Regs = 13; 3056 const unsigned Num_VR_Regs = array_lengthof(VR); 3057 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3058 3059 // Do a first pass over the arguments to determine whether the ABI 3060 // guarantees that our caller has allocated the parameter save area 3061 // on its stack frame. In the ELFv1 ABI, this is always the case; 3062 // in the ELFv2 ABI, it is true if this is a vararg function or if 3063 // any parameter is located in a stack slot. 3064 3065 bool HasParameterArea = !isELFv2ABI || isVarArg; 3066 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3067 unsigned NumBytes = LinkageSize; 3068 unsigned AvailableFPRs = Num_FPR_Regs; 3069 unsigned AvailableVRs = Num_VR_Regs; 3070 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3071 if (Ins[i].Flags.isNest()) 3072 continue; 3073 3074 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3075 PtrByteSize, LinkageSize, ParamAreaSize, 3076 NumBytes, AvailableFPRs, AvailableVRs, 3077 Subtarget.hasQPX())) 3078 HasParameterArea = true; 3079 } 3080 3081 // Add DAG nodes to load the arguments or copy them out of registers. On 3082 // entry to a function on PPC, the arguments start after the linkage area, 3083 // although the first ones are often in registers. 3084 3085 unsigned ArgOffset = LinkageSize; 3086 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3087 unsigned &QFPR_idx = FPR_idx; 3088 SmallVector<SDValue, 8> MemOps; 3089 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3090 unsigned CurArgIdx = 0; 3091 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3092 SDValue ArgVal; 3093 bool needsLoad = false; 3094 EVT ObjectVT = Ins[ArgNo].VT; 3095 EVT OrigVT = Ins[ArgNo].ArgVT; 3096 unsigned ObjSize = ObjectVT.getStoreSize(); 3097 unsigned ArgSize = ObjSize; 3098 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3099 if (Ins[ArgNo].isOrigArg()) { 3100 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3101 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3102 } 3103 // We re-align the argument offset for each argument, except when using the 3104 // fast calling convention, when we need to make sure we do that only when 3105 // we'll actually use a stack slot. 3106 unsigned CurArgOffset, Align; 3107 auto ComputeArgOffset = [&]() { 3108 /* Respect alignment of argument on the stack. */ 3109 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3110 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3111 CurArgOffset = ArgOffset; 3112 }; 3113 3114 if (CallConv != CallingConv::Fast) { 3115 ComputeArgOffset(); 3116 3117 /* Compute GPR index associated with argument offset. */ 3118 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3119 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3120 } 3121 3122 // FIXME the codegen can be much improved in some cases. 3123 // We do not have to keep everything in memory. 3124 if (Flags.isByVal()) { 3125 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3126 3127 if (CallConv == CallingConv::Fast) 3128 ComputeArgOffset(); 3129 3130 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3131 ObjSize = Flags.getByValSize(); 3132 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3133 // Empty aggregate parameters do not take up registers. Examples: 3134 // struct { } a; 3135 // union { } b; 3136 // int c[0]; 3137 // etc. However, we have to provide a place-holder in InVals, so 3138 // pretend we have an 8-byte item at the current address for that 3139 // purpose. 3140 if (!ObjSize) { 3141 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3142 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3143 InVals.push_back(FIN); 3144 continue; 3145 } 3146 3147 // Create a stack object covering all stack doublewords occupied 3148 // by the argument. If the argument is (fully or partially) on 3149 // the stack, or if the argument is fully in registers but the 3150 // caller has allocated the parameter save anyway, we can refer 3151 // directly to the caller's stack frame. Otherwise, create a 3152 // local copy in our own frame. 3153 int FI; 3154 if (HasParameterArea || 3155 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3156 FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true); 3157 else 3158 FI = MFI->CreateStackObject(ArgSize, Align, false); 3159 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3160 3161 // Handle aggregates smaller than 8 bytes. 3162 if (ObjSize < PtrByteSize) { 3163 // The value of the object is its address, which differs from the 3164 // address of the enclosing doubleword on big-endian systems. 3165 SDValue Arg = FIN; 3166 if (!isLittleEndian) { 3167 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3168 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3169 } 3170 InVals.push_back(Arg); 3171 3172 if (GPR_idx != Num_GPR_Regs) { 3173 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3174 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3175 SDValue Store; 3176 3177 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3178 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3179 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3180 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3181 MachinePointerInfo(FuncArg), 3182 ObjType, false, false, 0); 3183 } else { 3184 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3185 // store the whole register as-is to the parameter save area 3186 // slot. 3187 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3188 MachinePointerInfo(FuncArg), 3189 false, false, 0); 3190 } 3191 3192 MemOps.push_back(Store); 3193 } 3194 // Whether we copied from a register or not, advance the offset 3195 // into the parameter save area by a full doubleword. 3196 ArgOffset += PtrByteSize; 3197 continue; 3198 } 3199 3200 // The value of the object is its address, which is the address of 3201 // its first stack doubleword. 3202 InVals.push_back(FIN); 3203 3204 // Store whatever pieces of the object are in registers to memory. 3205 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3206 if (GPR_idx == Num_GPR_Regs) 3207 break; 3208 3209 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3210 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3211 SDValue Addr = FIN; 3212 if (j) { 3213 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3214 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3215 } 3216 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3217 MachinePointerInfo(FuncArg, j), 3218 false, false, 0); 3219 MemOps.push_back(Store); 3220 ++GPR_idx; 3221 } 3222 ArgOffset += ArgSize; 3223 continue; 3224 } 3225 3226 switch (ObjectVT.getSimpleVT().SimpleTy) { 3227 default: llvm_unreachable("Unhandled argument type!"); 3228 case MVT::i1: 3229 case MVT::i32: 3230 case MVT::i64: 3231 if (Flags.isNest()) { 3232 // The 'nest' parameter, if any, is passed in R11. 3233 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3234 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3235 3236 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3237 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3238 3239 break; 3240 } 3241 3242 // These can be scalar arguments or elements of an integer array type 3243 // passed directly. Clang may use those instead of "byval" aggregate 3244 // types to avoid forcing arguments to memory unnecessarily. 3245 if (GPR_idx != Num_GPR_Regs) { 3246 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3247 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3248 3249 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3250 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3251 // value to MVT::i64 and then truncate to the correct register size. 3252 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3253 } else { 3254 if (CallConv == CallingConv::Fast) 3255 ComputeArgOffset(); 3256 3257 needsLoad = true; 3258 ArgSize = PtrByteSize; 3259 } 3260 if (CallConv != CallingConv::Fast || needsLoad) 3261 ArgOffset += 8; 3262 break; 3263 3264 case MVT::f32: 3265 case MVT::f64: 3266 // These can be scalar arguments or elements of a float array type 3267 // passed directly. The latter are used to implement ELFv2 homogenous 3268 // float aggregates. 3269 if (FPR_idx != Num_FPR_Regs) { 3270 unsigned VReg; 3271 3272 if (ObjectVT == MVT::f32) 3273 VReg = MF.addLiveIn(FPR[FPR_idx], 3274 Subtarget.hasP8Vector() 3275 ? &PPC::VSSRCRegClass 3276 : &PPC::F4RCRegClass); 3277 else 3278 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3279 ? &PPC::VSFRCRegClass 3280 : &PPC::F8RCRegClass); 3281 3282 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3283 ++FPR_idx; 3284 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3285 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3286 // once we support fp <-> gpr moves. 3287 3288 // This can only ever happen in the presence of f32 array types, 3289 // since otherwise we never run out of FPRs before running out 3290 // of GPRs. 3291 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3292 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3293 3294 if (ObjectVT == MVT::f32) { 3295 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3296 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3297 DAG.getConstant(32, dl, MVT::i32)); 3298 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3299 } 3300 3301 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3302 } else { 3303 if (CallConv == CallingConv::Fast) 3304 ComputeArgOffset(); 3305 3306 needsLoad = true; 3307 } 3308 3309 // When passing an array of floats, the array occupies consecutive 3310 // space in the argument area; only round up to the next doubleword 3311 // at the end of the array. Otherwise, each float takes 8 bytes. 3312 if (CallConv != CallingConv::Fast || needsLoad) { 3313 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3314 ArgOffset += ArgSize; 3315 if (Flags.isInConsecutiveRegsLast()) 3316 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3317 } 3318 break; 3319 case MVT::v4f32: 3320 case MVT::v4i32: 3321 case MVT::v8i16: 3322 case MVT::v16i8: 3323 case MVT::v2f64: 3324 case MVT::v2i64: 3325 case MVT::v1i128: 3326 if (!Subtarget.hasQPX()) { 3327 // These can be scalar arguments or elements of a vector array type 3328 // passed directly. The latter are used to implement ELFv2 homogenous 3329 // vector aggregates. 3330 if (VR_idx != Num_VR_Regs) { 3331 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 3332 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 3333 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3334 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3335 ++VR_idx; 3336 } else { 3337 if (CallConv == CallingConv::Fast) 3338 ComputeArgOffset(); 3339 3340 needsLoad = true; 3341 } 3342 if (CallConv != CallingConv::Fast || needsLoad) 3343 ArgOffset += 16; 3344 break; 3345 } // not QPX 3346 3347 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3348 "Invalid QPX parameter type"); 3349 /* fall through */ 3350 3351 case MVT::v4f64: 3352 case MVT::v4i1: 3353 // QPX vectors are treated like their scalar floating-point subregisters 3354 // (except that they're larger). 3355 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3356 if (QFPR_idx != Num_QFPR_Regs) { 3357 const TargetRegisterClass *RC; 3358 switch (ObjectVT.getSimpleVT().SimpleTy) { 3359 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3360 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3361 default: RC = &PPC::QBRCRegClass; break; 3362 } 3363 3364 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3365 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3366 ++QFPR_idx; 3367 } else { 3368 if (CallConv == CallingConv::Fast) 3369 ComputeArgOffset(); 3370 needsLoad = true; 3371 } 3372 if (CallConv != CallingConv::Fast || needsLoad) 3373 ArgOffset += Sz; 3374 break; 3375 } 3376 3377 // We need to load the argument to a virtual register if we determined 3378 // above that we ran out of physical registers of the appropriate type. 3379 if (needsLoad) { 3380 if (ObjSize < ArgSize && !isLittleEndian) 3381 CurArgOffset += ArgSize - ObjSize; 3382 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3383 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3384 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3385 false, false, false, 0); 3386 } 3387 3388 InVals.push_back(ArgVal); 3389 } 3390 3391 // Area that is at least reserved in the caller of this function. 3392 unsigned MinReservedArea; 3393 if (HasParameterArea) 3394 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3395 else 3396 MinReservedArea = LinkageSize; 3397 3398 // Set the size that is at least reserved in caller of this function. Tail 3399 // call optimized functions' reserved stack space needs to be aligned so that 3400 // taking the difference between two stack areas will result in an aligned 3401 // stack. 3402 MinReservedArea = 3403 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3404 FuncInfo->setMinReservedArea(MinReservedArea); 3405 3406 // If the function takes variable number of arguments, make a frame index for 3407 // the start of the first vararg value... for expansion of llvm.va_start. 3408 if (isVarArg) { 3409 int Depth = ArgOffset; 3410 3411 FuncInfo->setVarArgsFrameIndex( 3412 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 3413 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3414 3415 // If this function is vararg, store any remaining integer argument regs 3416 // to their spots on the stack so that they may be loaded by deferencing the 3417 // result of va_next. 3418 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3419 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3420 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3421 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3422 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3423 MachinePointerInfo(), false, false, 0); 3424 MemOps.push_back(Store); 3425 // Increment the address by four for the next argument to store 3426 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3427 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3428 } 3429 } 3430 3431 if (!MemOps.empty()) 3432 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3433 3434 return Chain; 3435 } 3436 3437 SDValue 3438 PPCTargetLowering::LowerFormalArguments_Darwin( 3439 SDValue Chain, 3440 CallingConv::ID CallConv, bool isVarArg, 3441 const SmallVectorImpl<ISD::InputArg> 3442 &Ins, 3443 SDLoc dl, SelectionDAG &DAG, 3444 SmallVectorImpl<SDValue> &InVals) const { 3445 // TODO: add description of PPC stack frame format, or at least some docs. 3446 // 3447 MachineFunction &MF = DAG.getMachineFunction(); 3448 MachineFrameInfo *MFI = MF.getFrameInfo(); 3449 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3450 3451 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3452 bool isPPC64 = PtrVT == MVT::i64; 3453 // Potential tail calls could cause overwriting of argument stack slots. 3454 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3455 (CallConv == CallingConv::Fast)); 3456 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3457 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3458 unsigned ArgOffset = LinkageSize; 3459 // Area that is at least reserved in caller of this function. 3460 unsigned MinReservedArea = ArgOffset; 3461 3462 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3463 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3464 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3465 }; 3466 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3467 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3468 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3469 }; 3470 static const MCPhysReg VR[] = { 3471 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3472 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3473 }; 3474 3475 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3476 const unsigned Num_FPR_Regs = 13; 3477 const unsigned Num_VR_Regs = array_lengthof( VR); 3478 3479 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3480 3481 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3482 3483 // In 32-bit non-varargs functions, the stack space for vectors is after the 3484 // stack space for non-vectors. We do not use this space unless we have 3485 // too many vectors to fit in registers, something that only occurs in 3486 // constructed examples:), but we have to walk the arglist to figure 3487 // that out...for the pathological case, compute VecArgOffset as the 3488 // start of the vector parameter area. Computing VecArgOffset is the 3489 // entire point of the following loop. 3490 unsigned VecArgOffset = ArgOffset; 3491 if (!isVarArg && !isPPC64) { 3492 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3493 ++ArgNo) { 3494 EVT ObjectVT = Ins[ArgNo].VT; 3495 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3496 3497 if (Flags.isByVal()) { 3498 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3499 unsigned ObjSize = Flags.getByValSize(); 3500 unsigned ArgSize = 3501 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3502 VecArgOffset += ArgSize; 3503 continue; 3504 } 3505 3506 switch(ObjectVT.getSimpleVT().SimpleTy) { 3507 default: llvm_unreachable("Unhandled argument type!"); 3508 case MVT::i1: 3509 case MVT::i32: 3510 case MVT::f32: 3511 VecArgOffset += 4; 3512 break; 3513 case MVT::i64: // PPC64 3514 case MVT::f64: 3515 // FIXME: We are guaranteed to be !isPPC64 at this point. 3516 // Does MVT::i64 apply? 3517 VecArgOffset += 8; 3518 break; 3519 case MVT::v4f32: 3520 case MVT::v4i32: 3521 case MVT::v8i16: 3522 case MVT::v16i8: 3523 // Nothing to do, we're only looking at Nonvector args here. 3524 break; 3525 } 3526 } 3527 } 3528 // We've found where the vector parameter area in memory is. Skip the 3529 // first 12 parameters; these don't use that memory. 3530 VecArgOffset = ((VecArgOffset+15)/16)*16; 3531 VecArgOffset += 12*16; 3532 3533 // Add DAG nodes to load the arguments or copy them out of registers. On 3534 // entry to a function on PPC, the arguments start after the linkage area, 3535 // although the first ones are often in registers. 3536 3537 SmallVector<SDValue, 8> MemOps; 3538 unsigned nAltivecParamsAtEnd = 0; 3539 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3540 unsigned CurArgIdx = 0; 3541 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3542 SDValue ArgVal; 3543 bool needsLoad = false; 3544 EVT ObjectVT = Ins[ArgNo].VT; 3545 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3546 unsigned ArgSize = ObjSize; 3547 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3548 if (Ins[ArgNo].isOrigArg()) { 3549 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3550 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3551 } 3552 unsigned CurArgOffset = ArgOffset; 3553 3554 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3555 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3556 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3557 if (isVarArg || isPPC64) { 3558 MinReservedArea = ((MinReservedArea+15)/16)*16; 3559 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3560 Flags, 3561 PtrByteSize); 3562 } else nAltivecParamsAtEnd++; 3563 } else 3564 // Calculate min reserved area. 3565 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3566 Flags, 3567 PtrByteSize); 3568 3569 // FIXME the codegen can be much improved in some cases. 3570 // We do not have to keep everything in memory. 3571 if (Flags.isByVal()) { 3572 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3573 3574 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3575 ObjSize = Flags.getByValSize(); 3576 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3577 // Objects of size 1 and 2 are right justified, everything else is 3578 // left justified. This means the memory address is adjusted forwards. 3579 if (ObjSize==1 || ObjSize==2) { 3580 CurArgOffset = CurArgOffset + (4 - ObjSize); 3581 } 3582 // The value of the object is its address. 3583 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true); 3584 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3585 InVals.push_back(FIN); 3586 if (ObjSize==1 || ObjSize==2) { 3587 if (GPR_idx != Num_GPR_Regs) { 3588 unsigned VReg; 3589 if (isPPC64) 3590 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3591 else 3592 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3593 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3594 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3595 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3596 MachinePointerInfo(FuncArg), 3597 ObjType, false, false, 0); 3598 MemOps.push_back(Store); 3599 ++GPR_idx; 3600 } 3601 3602 ArgOffset += PtrByteSize; 3603 3604 continue; 3605 } 3606 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3607 // Store whatever pieces of the object are in registers 3608 // to memory. ArgOffset will be the address of the beginning 3609 // of the object. 3610 if (GPR_idx != Num_GPR_Regs) { 3611 unsigned VReg; 3612 if (isPPC64) 3613 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3614 else 3615 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3616 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3617 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3618 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3619 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3620 MachinePointerInfo(FuncArg, j), 3621 false, false, 0); 3622 MemOps.push_back(Store); 3623 ++GPR_idx; 3624 ArgOffset += PtrByteSize; 3625 } else { 3626 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3627 break; 3628 } 3629 } 3630 continue; 3631 } 3632 3633 switch (ObjectVT.getSimpleVT().SimpleTy) { 3634 default: llvm_unreachable("Unhandled argument type!"); 3635 case MVT::i1: 3636 case MVT::i32: 3637 if (!isPPC64) { 3638 if (GPR_idx != Num_GPR_Regs) { 3639 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3640 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3641 3642 if (ObjectVT == MVT::i1) 3643 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3644 3645 ++GPR_idx; 3646 } else { 3647 needsLoad = true; 3648 ArgSize = PtrByteSize; 3649 } 3650 // All int arguments reserve stack space in the Darwin ABI. 3651 ArgOffset += PtrByteSize; 3652 break; 3653 } 3654 // FALLTHROUGH 3655 case MVT::i64: // PPC64 3656 if (GPR_idx != Num_GPR_Regs) { 3657 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3658 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3659 3660 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3661 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3662 // value to MVT::i64 and then truncate to the correct register size. 3663 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3664 3665 ++GPR_idx; 3666 } else { 3667 needsLoad = true; 3668 ArgSize = PtrByteSize; 3669 } 3670 // All int arguments reserve stack space in the Darwin ABI. 3671 ArgOffset += 8; 3672 break; 3673 3674 case MVT::f32: 3675 case MVT::f64: 3676 // Every 4 bytes of argument space consumes one of the GPRs available for 3677 // argument passing. 3678 if (GPR_idx != Num_GPR_Regs) { 3679 ++GPR_idx; 3680 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3681 ++GPR_idx; 3682 } 3683 if (FPR_idx != Num_FPR_Regs) { 3684 unsigned VReg; 3685 3686 if (ObjectVT == MVT::f32) 3687 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3688 else 3689 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3690 3691 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3692 ++FPR_idx; 3693 } else { 3694 needsLoad = true; 3695 } 3696 3697 // All FP arguments reserve stack space in the Darwin ABI. 3698 ArgOffset += isPPC64 ? 8 : ObjSize; 3699 break; 3700 case MVT::v4f32: 3701 case MVT::v4i32: 3702 case MVT::v8i16: 3703 case MVT::v16i8: 3704 // Note that vector arguments in registers don't reserve stack space, 3705 // except in varargs functions. 3706 if (VR_idx != Num_VR_Regs) { 3707 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3708 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3709 if (isVarArg) { 3710 while ((ArgOffset % 16) != 0) { 3711 ArgOffset += PtrByteSize; 3712 if (GPR_idx != Num_GPR_Regs) 3713 GPR_idx++; 3714 } 3715 ArgOffset += 16; 3716 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3717 } 3718 ++VR_idx; 3719 } else { 3720 if (!isVarArg && !isPPC64) { 3721 // Vectors go after all the nonvectors. 3722 CurArgOffset = VecArgOffset; 3723 VecArgOffset += 16; 3724 } else { 3725 // Vectors are aligned. 3726 ArgOffset = ((ArgOffset+15)/16)*16; 3727 CurArgOffset = ArgOffset; 3728 ArgOffset += 16; 3729 } 3730 needsLoad = true; 3731 } 3732 break; 3733 } 3734 3735 // We need to load the argument to a virtual register if we determined above 3736 // that we ran out of physical registers of the appropriate type. 3737 if (needsLoad) { 3738 int FI = MFI->CreateFixedObject(ObjSize, 3739 CurArgOffset + (ArgSize - ObjSize), 3740 isImmutable); 3741 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3742 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3743 false, false, false, 0); 3744 } 3745 3746 InVals.push_back(ArgVal); 3747 } 3748 3749 // Allow for Altivec parameters at the end, if needed. 3750 if (nAltivecParamsAtEnd) { 3751 MinReservedArea = ((MinReservedArea+15)/16)*16; 3752 MinReservedArea += 16*nAltivecParamsAtEnd; 3753 } 3754 3755 // Area that is at least reserved in the caller of this function. 3756 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3757 3758 // Set the size that is at least reserved in caller of this function. Tail 3759 // call optimized functions' reserved stack space needs to be aligned so that 3760 // taking the difference between two stack areas will result in an aligned 3761 // stack. 3762 MinReservedArea = 3763 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3764 FuncInfo->setMinReservedArea(MinReservedArea); 3765 3766 // If the function takes variable number of arguments, make a frame index for 3767 // the start of the first vararg value... for expansion of llvm.va_start. 3768 if (isVarArg) { 3769 int Depth = ArgOffset; 3770 3771 FuncInfo->setVarArgsFrameIndex( 3772 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3773 Depth, true)); 3774 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3775 3776 // If this function is vararg, store any remaining integer argument regs 3777 // to their spots on the stack so that they may be loaded by deferencing the 3778 // result of va_next. 3779 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3780 unsigned VReg; 3781 3782 if (isPPC64) 3783 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3784 else 3785 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3786 3787 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3788 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3789 MachinePointerInfo(), false, false, 0); 3790 MemOps.push_back(Store); 3791 // Increment the address by four for the next argument to store 3792 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3793 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3794 } 3795 } 3796 3797 if (!MemOps.empty()) 3798 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3799 3800 return Chain; 3801 } 3802 3803 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3804 /// adjusted to accommodate the arguments for the tailcall. 3805 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3806 unsigned ParamSize) { 3807 3808 if (!isTailCall) return 0; 3809 3810 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3811 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3812 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3813 // Remember only if the new adjustement is bigger. 3814 if (SPDiff < FI->getTailCallSPDelta()) 3815 FI->setTailCallSPDelta(SPDiff); 3816 3817 return SPDiff; 3818 } 3819 3820 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 3821 /// for tail call optimization. Targets which want to do tail call 3822 /// optimization should implement this function. 3823 bool 3824 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 3825 CallingConv::ID CalleeCC, 3826 bool isVarArg, 3827 const SmallVectorImpl<ISD::InputArg> &Ins, 3828 SelectionDAG& DAG) const { 3829 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 3830 return false; 3831 3832 // Variable argument functions are not supported. 3833 if (isVarArg) 3834 return false; 3835 3836 MachineFunction &MF = DAG.getMachineFunction(); 3837 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3838 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 3839 // Functions containing by val parameters are not supported. 3840 for (unsigned i = 0; i != Ins.size(); i++) { 3841 ISD::ArgFlagsTy Flags = Ins[i].Flags; 3842 if (Flags.isByVal()) return false; 3843 } 3844 3845 // Non-PIC/GOT tail calls are supported. 3846 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 3847 return true; 3848 3849 // At the moment we can only do local tail calls (in same module, hidden 3850 // or protected) if we are generating PIC. 3851 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3852 return G->getGlobal()->hasHiddenVisibility() 3853 || G->getGlobal()->hasProtectedVisibility(); 3854 } 3855 3856 return false; 3857 } 3858 3859 /// isCallCompatibleAddress - Return the immediate to use if the specified 3860 /// 32-bit value is representable in the immediate field of a BxA instruction. 3861 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 3862 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 3863 if (!C) return nullptr; 3864 3865 int Addr = C->getZExtValue(); 3866 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 3867 SignExtend32<26>(Addr) != Addr) 3868 return nullptr; // Top 6 bits have to be sext of immediate. 3869 3870 return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op), 3871 DAG.getTargetLoweringInfo().getPointerTy( 3872 DAG.getDataLayout())).getNode(); 3873 } 3874 3875 namespace { 3876 3877 struct TailCallArgumentInfo { 3878 SDValue Arg; 3879 SDValue FrameIdxOp; 3880 int FrameIdx; 3881 3882 TailCallArgumentInfo() : FrameIdx(0) {} 3883 }; 3884 3885 } 3886 3887 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 3888 static void 3889 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 3890 SDValue Chain, 3891 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 3892 SmallVectorImpl<SDValue> &MemOpChains, 3893 SDLoc dl) { 3894 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 3895 SDValue Arg = TailCallArgs[i].Arg; 3896 SDValue FIN = TailCallArgs[i].FrameIdxOp; 3897 int FI = TailCallArgs[i].FrameIdx; 3898 // Store relative to framepointer. 3899 MemOpChains.push_back(DAG.getStore( 3900 Chain, dl, Arg, FIN, 3901 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 3902 false, 0)); 3903 } 3904 } 3905 3906 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 3907 /// the appropriate stack slot for the tail call optimized function call. 3908 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 3909 MachineFunction &MF, 3910 SDValue Chain, 3911 SDValue OldRetAddr, 3912 SDValue OldFP, 3913 int SPDiff, 3914 bool isPPC64, 3915 bool isDarwinABI, 3916 SDLoc dl) { 3917 if (SPDiff) { 3918 // Calculate the new stack slot for the return address. 3919 int SlotSize = isPPC64 ? 8 : 4; 3920 const PPCFrameLowering *FL = 3921 MF.getSubtarget<PPCSubtarget>().getFrameLowering(); 3922 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 3923 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3924 NewRetAddrLoc, true); 3925 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3926 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 3927 Chain = DAG.getStore( 3928 Chain, dl, OldRetAddr, NewRetAddrFrIdx, 3929 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewRetAddr), 3930 false, false, 0); 3931 3932 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 3933 // slot as the FP is never overwritten. 3934 if (isDarwinABI) { 3935 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 3936 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3937 true); 3938 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3939 Chain = DAG.getStore( 3940 Chain, dl, OldFP, NewFramePtrIdx, 3941 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFPIdx), 3942 false, false, 0); 3943 } 3944 } 3945 return Chain; 3946 } 3947 3948 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3949 /// the position of the argument. 3950 static void 3951 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3952 SDValue Arg, int SPDiff, unsigned ArgOffset, 3953 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3954 int Offset = ArgOffset + SPDiff; 3955 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3956 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3957 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3958 SDValue FIN = DAG.getFrameIndex(FI, VT); 3959 TailCallArgumentInfo Info; 3960 Info.Arg = Arg; 3961 Info.FrameIdxOp = FIN; 3962 Info.FrameIdx = FI; 3963 TailCallArguments.push_back(Info); 3964 } 3965 3966 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3967 /// stack slot. Returns the chain as result and the loaded frame pointers in 3968 /// LROpOut/FPOpout. Used when tail calling. 3969 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3970 int SPDiff, 3971 SDValue Chain, 3972 SDValue &LROpOut, 3973 SDValue &FPOpOut, 3974 bool isDarwinABI, 3975 SDLoc dl) const { 3976 if (SPDiff) { 3977 // Load the LR and FP stack slot for later adjusting. 3978 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 3979 LROpOut = getReturnAddrFrameIndex(DAG); 3980 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3981 false, false, false, 0); 3982 Chain = SDValue(LROpOut.getNode(), 1); 3983 3984 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3985 // slot as the FP is never overwritten. 3986 if (isDarwinABI) { 3987 FPOpOut = getFramePointerFrameIndex(DAG); 3988 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 3989 false, false, false, 0); 3990 Chain = SDValue(FPOpOut.getNode(), 1); 3991 } 3992 } 3993 return Chain; 3994 } 3995 3996 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 3997 /// by "Src" to address "Dst" of size "Size". Alignment information is 3998 /// specified by the specific parameter attribute. The copy will be passed as 3999 /// a byval function parameter. 4000 /// Sometimes what we are copying is the end of a larger object, the part that 4001 /// does not fit in registers. 4002 static SDValue 4003 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 4004 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 4005 SDLoc dl) { 4006 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4007 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4008 false, false, false, MachinePointerInfo(), 4009 MachinePointerInfo()); 4010 } 4011 4012 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4013 /// tail calls. 4014 static void 4015 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 4016 SDValue Arg, SDValue PtrOff, int SPDiff, 4017 unsigned ArgOffset, bool isPPC64, bool isTailCall, 4018 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4019 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 4020 SDLoc dl) { 4021 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4022 if (!isTailCall) { 4023 if (isVector) { 4024 SDValue StackPtr; 4025 if (isPPC64) 4026 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4027 else 4028 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4029 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4030 DAG.getConstant(ArgOffset, dl, PtrVT)); 4031 } 4032 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4033 MachinePointerInfo(), false, false, 0)); 4034 // Calculate and remember argument location. 4035 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4036 TailCallArguments); 4037 } 4038 4039 static 4040 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4041 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 4042 SDValue LROp, SDValue FPOp, bool isDarwinABI, 4043 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4044 MachineFunction &MF = DAG.getMachineFunction(); 4045 4046 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4047 // might overwrite each other in case of tail call optimization. 4048 SmallVector<SDValue, 8> MemOpChains2; 4049 // Do not flag preceding copytoreg stuff together with the following stuff. 4050 InFlag = SDValue(); 4051 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4052 MemOpChains2, dl); 4053 if (!MemOpChains2.empty()) 4054 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4055 4056 // Store the return address to the appropriate stack slot. 4057 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 4058 isPPC64, isDarwinABI, dl); 4059 4060 // Emit callseq_end just before tailcall node. 4061 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4062 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4063 InFlag = Chain.getValue(1); 4064 } 4065 4066 // Is this global address that of a function that can be called by name? (as 4067 // opposed to something that must hold a descriptor for an indirect call). 4068 static bool isFunctionGlobalAddress(SDValue Callee) { 4069 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4070 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4071 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4072 return false; 4073 4074 return G->getGlobal()->getType()->getElementType()->isFunctionTy(); 4075 } 4076 4077 return false; 4078 } 4079 4080 static 4081 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 4082 SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff, 4083 bool isTailCall, bool IsPatchPoint, bool hasNest, 4084 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 4085 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4086 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4087 4088 bool isPPC64 = Subtarget.isPPC64(); 4089 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4090 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4091 4092 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4093 NodeTys.push_back(MVT::Other); // Returns a chain 4094 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4095 4096 unsigned CallOpc = PPCISD::CALL; 4097 4098 bool needIndirectCall = true; 4099 if (!isSVR4ABI || !isPPC64) 4100 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4101 // If this is an absolute destination address, use the munged value. 4102 Callee = SDValue(Dest, 0); 4103 needIndirectCall = false; 4104 } 4105 4106 if (isFunctionGlobalAddress(Callee)) { 4107 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4108 // A call to a TLS address is actually an indirect call to a 4109 // thread-specific pointer. 4110 unsigned OpFlags = 0; 4111 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4112 (Subtarget.getTargetTriple().isMacOSX() && 4113 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 4114 !G->getGlobal()->isStrongDefinitionForLinker()) || 4115 (Subtarget.isTargetELF() && !isPPC64 && 4116 !G->getGlobal()->hasLocalLinkage() && 4117 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4118 // PC-relative references to external symbols should go through $stub, 4119 // unless we're building with the leopard linker or later, which 4120 // automatically synthesizes these stubs. 4121 OpFlags = PPCII::MO_PLT_OR_STUB; 4122 } 4123 4124 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4125 // every direct call is) turn it into a TargetGlobalAddress / 4126 // TargetExternalSymbol node so that legalize doesn't hack it. 4127 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4128 Callee.getValueType(), 0, OpFlags); 4129 needIndirectCall = false; 4130 } 4131 4132 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4133 unsigned char OpFlags = 0; 4134 4135 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4136 (Subtarget.getTargetTriple().isMacOSX() && 4137 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) || 4138 (Subtarget.isTargetELF() && !isPPC64 && 4139 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4140 // PC-relative references to external symbols should go through $stub, 4141 // unless we're building with the leopard linker or later, which 4142 // automatically synthesizes these stubs. 4143 OpFlags = PPCII::MO_PLT_OR_STUB; 4144 } 4145 4146 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4147 OpFlags); 4148 needIndirectCall = false; 4149 } 4150 4151 if (IsPatchPoint) { 4152 // We'll form an invalid direct call when lowering a patchpoint; the full 4153 // sequence for an indirect call is complicated, and many of the 4154 // instructions introduced might have side effects (and, thus, can't be 4155 // removed later). The call itself will be removed as soon as the 4156 // argument/return lowering is complete, so the fact that it has the wrong 4157 // kind of operands should not really matter. 4158 needIndirectCall = false; 4159 } 4160 4161 if (needIndirectCall) { 4162 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4163 // to do the call, we can't use PPCISD::CALL. 4164 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4165 4166 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4167 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4168 // entry point, but to the function descriptor (the function entry point 4169 // address is part of the function descriptor though). 4170 // The function descriptor is a three doubleword structure with the 4171 // following fields: function entry point, TOC base address and 4172 // environment pointer. 4173 // Thus for a call through a function pointer, the following actions need 4174 // to be performed: 4175 // 1. Save the TOC of the caller in the TOC save area of its stack 4176 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4177 // 2. Load the address of the function entry point from the function 4178 // descriptor. 4179 // 3. Load the TOC of the callee from the function descriptor into r2. 4180 // 4. Load the environment pointer from the function descriptor into 4181 // r11. 4182 // 5. Branch to the function entry point address. 4183 // 6. On return of the callee, the TOC of the caller needs to be 4184 // restored (this is done in FinishCall()). 4185 // 4186 // The loads are scheduled at the beginning of the call sequence, and the 4187 // register copies are flagged together to ensure that no other 4188 // operations can be scheduled in between. E.g. without flagging the 4189 // copies together, a TOC access in the caller could be scheduled between 4190 // the assignment of the callee TOC and the branch to the callee, which 4191 // results in the TOC access going through the TOC of the callee instead 4192 // of going through the TOC of the caller, which leads to incorrect code. 4193 4194 // Load the address of the function entry point from the function 4195 // descriptor. 4196 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4197 if (LDChain.getValueType() == MVT::Glue) 4198 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4199 4200 bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors(); 4201 4202 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4203 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4204 false, false, LoadsInv, 8); 4205 4206 // Load environment pointer into r11. 4207 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4208 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4209 SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, 4210 MPI.getWithOffset(16), false, false, 4211 LoadsInv, 8); 4212 4213 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4214 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4215 SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, 4216 MPI.getWithOffset(8), false, false, 4217 LoadsInv, 8); 4218 4219 setUsesTOCBasePtr(DAG); 4220 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4221 InFlag); 4222 Chain = TOCVal.getValue(0); 4223 InFlag = TOCVal.getValue(1); 4224 4225 // If the function call has an explicit 'nest' parameter, it takes the 4226 // place of the environment pointer. 4227 if (!hasNest) { 4228 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4229 InFlag); 4230 4231 Chain = EnvVal.getValue(0); 4232 InFlag = EnvVal.getValue(1); 4233 } 4234 4235 MTCTROps[0] = Chain; 4236 MTCTROps[1] = LoadFuncPtr; 4237 MTCTROps[2] = InFlag; 4238 } 4239 4240 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4241 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4242 InFlag = Chain.getValue(1); 4243 4244 NodeTys.clear(); 4245 NodeTys.push_back(MVT::Other); 4246 NodeTys.push_back(MVT::Glue); 4247 Ops.push_back(Chain); 4248 CallOpc = PPCISD::BCTRL; 4249 Callee.setNode(nullptr); 4250 // Add use of X11 (holding environment pointer) 4251 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4252 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4253 // Add CTR register as callee so a bctr can be emitted later. 4254 if (isTailCall) 4255 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4256 } 4257 4258 // If this is a direct call, pass the chain and the callee. 4259 if (Callee.getNode()) { 4260 Ops.push_back(Chain); 4261 Ops.push_back(Callee); 4262 } 4263 // If this is a tail call add stack pointer delta. 4264 if (isTailCall) 4265 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4266 4267 // Add argument registers to the end of the list so that they are known live 4268 // into the call. 4269 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4270 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4271 RegsToPass[i].second.getValueType())); 4272 4273 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4274 // into the call. 4275 if (isSVR4ABI && isPPC64 && !IsPatchPoint) { 4276 setUsesTOCBasePtr(DAG); 4277 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4278 } 4279 4280 return CallOpc; 4281 } 4282 4283 static 4284 bool isLocalCall(const SDValue &Callee) 4285 { 4286 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4287 return G->getGlobal()->isStrongDefinitionForLinker(); 4288 return false; 4289 } 4290 4291 SDValue 4292 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 4293 CallingConv::ID CallConv, bool isVarArg, 4294 const SmallVectorImpl<ISD::InputArg> &Ins, 4295 SDLoc dl, SelectionDAG &DAG, 4296 SmallVectorImpl<SDValue> &InVals) const { 4297 4298 SmallVector<CCValAssign, 16> RVLocs; 4299 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4300 *DAG.getContext()); 4301 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4302 4303 // Copy all of the result registers out of their specified physreg. 4304 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4305 CCValAssign &VA = RVLocs[i]; 4306 assert(VA.isRegLoc() && "Can only return in registers!"); 4307 4308 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4309 VA.getLocReg(), VA.getLocVT(), InFlag); 4310 Chain = Val.getValue(1); 4311 InFlag = Val.getValue(2); 4312 4313 switch (VA.getLocInfo()) { 4314 default: llvm_unreachable("Unknown loc info!"); 4315 case CCValAssign::Full: break; 4316 case CCValAssign::AExt: 4317 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4318 break; 4319 case CCValAssign::ZExt: 4320 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4321 DAG.getValueType(VA.getValVT())); 4322 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4323 break; 4324 case CCValAssign::SExt: 4325 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4326 DAG.getValueType(VA.getValVT())); 4327 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4328 break; 4329 } 4330 4331 InVals.push_back(Val); 4332 } 4333 4334 return Chain; 4335 } 4336 4337 SDValue 4338 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 4339 bool isTailCall, bool isVarArg, bool IsPatchPoint, 4340 bool hasNest, SelectionDAG &DAG, 4341 SmallVector<std::pair<unsigned, SDValue>, 8> 4342 &RegsToPass, 4343 SDValue InFlag, SDValue Chain, 4344 SDValue CallSeqStart, SDValue &Callee, 4345 int SPDiff, unsigned NumBytes, 4346 const SmallVectorImpl<ISD::InputArg> &Ins, 4347 SmallVectorImpl<SDValue> &InVals, 4348 ImmutableCallSite *CS) const { 4349 4350 std::vector<EVT> NodeTys; 4351 SmallVector<SDValue, 8> Ops; 4352 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4353 SPDiff, isTailCall, IsPatchPoint, hasNest, 4354 RegsToPass, Ops, NodeTys, CS, Subtarget); 4355 4356 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4357 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4358 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4359 4360 // When performing tail call optimization the callee pops its arguments off 4361 // the stack. Account for this here so these bytes can be pushed back on in 4362 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4363 int BytesCalleePops = 4364 (CallConv == CallingConv::Fast && 4365 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4366 4367 // Add a register mask operand representing the call-preserved registers. 4368 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4369 const uint32_t *Mask = 4370 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4371 assert(Mask && "Missing call preserved mask for calling convention"); 4372 Ops.push_back(DAG.getRegisterMask(Mask)); 4373 4374 if (InFlag.getNode()) 4375 Ops.push_back(InFlag); 4376 4377 // Emit tail call. 4378 if (isTailCall) { 4379 assert(((Callee.getOpcode() == ISD::Register && 4380 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4381 Callee.getOpcode() == ISD::TargetExternalSymbol || 4382 Callee.getOpcode() == ISD::TargetGlobalAddress || 4383 isa<ConstantSDNode>(Callee)) && 4384 "Expecting an global address, external symbol, absolute value or register"); 4385 4386 DAG.getMachineFunction().getFrameInfo()->setHasTailCall(); 4387 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4388 } 4389 4390 // Add a NOP immediately after the branch instruction when using the 64-bit 4391 // SVR4 ABI. At link time, if caller and callee are in a different module and 4392 // thus have a different TOC, the call will be replaced with a call to a stub 4393 // function which saves the current TOC, loads the TOC of the callee and 4394 // branches to the callee. The NOP will be replaced with a load instruction 4395 // which restores the TOC of the caller from the TOC save slot of the current 4396 // stack frame. If caller and callee belong to the same module (and have the 4397 // same TOC), the NOP will remain unchanged. 4398 4399 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4400 !IsPatchPoint) { 4401 if (CallOpc == PPCISD::BCTRL) { 4402 // This is a call through a function pointer. 4403 // Restore the caller TOC from the save area into R2. 4404 // See PrepareCall() for more information about calls through function 4405 // pointers in the 64-bit SVR4 ABI. 4406 // We are using a target-specific load with r2 hard coded, because the 4407 // result of a target-independent load would never go directly into r2, 4408 // since r2 is a reserved register (which prevents the register allocator 4409 // from allocating it), resulting in an additional register being 4410 // allocated and an unnecessary move instruction being generated. 4411 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4412 4413 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4414 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4415 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4416 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4417 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4418 4419 // The address needs to go after the chain input but before the flag (or 4420 // any other variadic arguments). 4421 Ops.insert(std::next(Ops.begin()), AddTOC); 4422 } else if ((CallOpc == PPCISD::CALL) && 4423 (!isLocalCall(Callee) || 4424 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) 4425 // Otherwise insert NOP for non-local calls. 4426 CallOpc = PPCISD::CALL_NOP; 4427 } 4428 4429 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4430 InFlag = Chain.getValue(1); 4431 4432 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4433 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4434 InFlag, dl); 4435 if (!Ins.empty()) 4436 InFlag = Chain.getValue(1); 4437 4438 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4439 Ins, dl, DAG, InVals); 4440 } 4441 4442 SDValue 4443 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4444 SmallVectorImpl<SDValue> &InVals) const { 4445 SelectionDAG &DAG = CLI.DAG; 4446 SDLoc &dl = CLI.DL; 4447 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4448 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4449 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4450 SDValue Chain = CLI.Chain; 4451 SDValue Callee = CLI.Callee; 4452 bool &isTailCall = CLI.IsTailCall; 4453 CallingConv::ID CallConv = CLI.CallConv; 4454 bool isVarArg = CLI.IsVarArg; 4455 bool IsPatchPoint = CLI.IsPatchPoint; 4456 ImmutableCallSite *CS = CLI.CS; 4457 4458 if (isTailCall) 4459 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4460 Ins, DAG); 4461 4462 if (!isTailCall && CS && CS->isMustTailCall()) 4463 report_fatal_error("failed to perform tail call elimination on a call " 4464 "site marked musttail"); 4465 4466 if (Subtarget.isSVR4ABI()) { 4467 if (Subtarget.isPPC64()) 4468 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4469 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4470 dl, DAG, InVals, CS); 4471 else 4472 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4473 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4474 dl, DAG, InVals, CS); 4475 } 4476 4477 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4478 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4479 dl, DAG, InVals, CS); 4480 } 4481 4482 SDValue 4483 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 4484 CallingConv::ID CallConv, bool isVarArg, 4485 bool isTailCall, bool IsPatchPoint, 4486 const SmallVectorImpl<ISD::OutputArg> &Outs, 4487 const SmallVectorImpl<SDValue> &OutVals, 4488 const SmallVectorImpl<ISD::InputArg> &Ins, 4489 SDLoc dl, SelectionDAG &DAG, 4490 SmallVectorImpl<SDValue> &InVals, 4491 ImmutableCallSite *CS) const { 4492 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4493 // of the 32-bit SVR4 ABI stack frame layout. 4494 4495 assert((CallConv == CallingConv::C || 4496 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4497 4498 unsigned PtrByteSize = 4; 4499 4500 MachineFunction &MF = DAG.getMachineFunction(); 4501 4502 // Mark this function as potentially containing a function that contains a 4503 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4504 // and restoring the callers stack pointer in this functions epilog. This is 4505 // done because by tail calling the called function might overwrite the value 4506 // in this function's (MF) stack pointer stack slot 0(SP). 4507 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4508 CallConv == CallingConv::Fast) 4509 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4510 4511 // Count how many bytes are to be pushed on the stack, including the linkage 4512 // area, parameter list area and the part of the local variable space which 4513 // contains copies of aggregates which are passed by value. 4514 4515 // Assign locations to all of the outgoing arguments. 4516 SmallVector<CCValAssign, 16> ArgLocs; 4517 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4518 *DAG.getContext()); 4519 4520 // Reserve space for the linkage area on the stack. 4521 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4522 PtrByteSize); 4523 4524 if (isVarArg) { 4525 // Handle fixed and variable vector arguments differently. 4526 // Fixed vector arguments go into registers as long as registers are 4527 // available. Variable vector arguments always go into memory. 4528 unsigned NumArgs = Outs.size(); 4529 4530 for (unsigned i = 0; i != NumArgs; ++i) { 4531 MVT ArgVT = Outs[i].VT; 4532 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4533 bool Result; 4534 4535 if (Outs[i].IsFixed) { 4536 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4537 CCInfo); 4538 } else { 4539 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4540 ArgFlags, CCInfo); 4541 } 4542 4543 if (Result) { 4544 #ifndef NDEBUG 4545 errs() << "Call operand #" << i << " has unhandled type " 4546 << EVT(ArgVT).getEVTString() << "\n"; 4547 #endif 4548 llvm_unreachable(nullptr); 4549 } 4550 } 4551 } else { 4552 // All arguments are treated the same. 4553 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4554 } 4555 4556 // Assign locations to all of the outgoing aggregate by value arguments. 4557 SmallVector<CCValAssign, 16> ByValArgLocs; 4558 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4559 ByValArgLocs, *DAG.getContext()); 4560 4561 // Reserve stack space for the allocations in CCInfo. 4562 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4563 4564 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4565 4566 // Size of the linkage area, parameter list area and the part of the local 4567 // space variable where copies of aggregates which are passed by value are 4568 // stored. 4569 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4570 4571 // Calculate by how many bytes the stack has to be adjusted in case of tail 4572 // call optimization. 4573 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4574 4575 // Adjust the stack pointer for the new arguments... 4576 // These operations are automatically eliminated by the prolog/epilog pass 4577 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4578 dl); 4579 SDValue CallSeqStart = Chain; 4580 4581 // Load the return address and frame pointer so it can be moved somewhere else 4582 // later. 4583 SDValue LROp, FPOp; 4584 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 4585 dl); 4586 4587 // Set up a copy of the stack pointer for use loading and storing any 4588 // arguments that may not fit in the registers available for argument 4589 // passing. 4590 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4591 4592 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4593 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4594 SmallVector<SDValue, 8> MemOpChains; 4595 4596 bool seenFloatArg = false; 4597 // Walk the register/memloc assignments, inserting copies/loads. 4598 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4599 i != e; 4600 ++i) { 4601 CCValAssign &VA = ArgLocs[i]; 4602 SDValue Arg = OutVals[i]; 4603 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4604 4605 if (Flags.isByVal()) { 4606 // Argument is an aggregate which is passed by value, thus we need to 4607 // create a copy of it in the local variable space of the current stack 4608 // frame (which is the stack frame of the caller) and pass the address of 4609 // this copy to the callee. 4610 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4611 CCValAssign &ByValVA = ByValArgLocs[j++]; 4612 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4613 4614 // Memory reserved in the local variable space of the callers stack frame. 4615 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4616 4617 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4618 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4619 StackPtr, PtrOff); 4620 4621 // Create a copy of the argument in the local area of the current 4622 // stack frame. 4623 SDValue MemcpyCall = 4624 CreateCopyOfByValArgument(Arg, PtrOff, 4625 CallSeqStart.getNode()->getOperand(0), 4626 Flags, DAG, dl); 4627 4628 // This must go outside the CALLSEQ_START..END. 4629 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4630 CallSeqStart.getNode()->getOperand(1), 4631 SDLoc(MemcpyCall)); 4632 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4633 NewCallSeqStart.getNode()); 4634 Chain = CallSeqStart = NewCallSeqStart; 4635 4636 // Pass the address of the aggregate copy on the stack either in a 4637 // physical register or in the parameter list area of the current stack 4638 // frame to the callee. 4639 Arg = PtrOff; 4640 } 4641 4642 if (VA.isRegLoc()) { 4643 if (Arg.getValueType() == MVT::i1) 4644 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4645 4646 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4647 // Put argument in a physical register. 4648 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4649 } else { 4650 // Put argument in the parameter list area of the current stack frame. 4651 assert(VA.isMemLoc()); 4652 unsigned LocMemOffset = VA.getLocMemOffset(); 4653 4654 if (!isTailCall) { 4655 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4656 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4657 StackPtr, PtrOff); 4658 4659 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4660 MachinePointerInfo(), 4661 false, false, 0)); 4662 } else { 4663 // Calculate and remember argument location. 4664 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4665 TailCallArguments); 4666 } 4667 } 4668 } 4669 4670 if (!MemOpChains.empty()) 4671 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4672 4673 // Build a sequence of copy-to-reg nodes chained together with token chain 4674 // and flag operands which copy the outgoing args into the appropriate regs. 4675 SDValue InFlag; 4676 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4677 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4678 RegsToPass[i].second, InFlag); 4679 InFlag = Chain.getValue(1); 4680 } 4681 4682 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4683 // registers. 4684 if (isVarArg) { 4685 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4686 SDValue Ops[] = { Chain, InFlag }; 4687 4688 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4689 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4690 4691 InFlag = Chain.getValue(1); 4692 } 4693 4694 if (isTailCall) 4695 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 4696 false, TailCallArguments); 4697 4698 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 4699 /* unused except on PPC64 ELFv1 */ false, DAG, 4700 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 4701 NumBytes, Ins, InVals, CS); 4702 } 4703 4704 // Copy an argument into memory, being careful to do this outside the 4705 // call sequence for the call to which the argument belongs. 4706 SDValue 4707 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 4708 SDValue CallSeqStart, 4709 ISD::ArgFlagsTy Flags, 4710 SelectionDAG &DAG, 4711 SDLoc dl) const { 4712 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4713 CallSeqStart.getNode()->getOperand(0), 4714 Flags, DAG, dl); 4715 // The MEMCPY must go outside the CALLSEQ_START..END. 4716 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4717 CallSeqStart.getNode()->getOperand(1), 4718 SDLoc(MemcpyCall)); 4719 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4720 NewCallSeqStart.getNode()); 4721 return NewCallSeqStart; 4722 } 4723 4724 SDValue 4725 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 4726 CallingConv::ID CallConv, bool isVarArg, 4727 bool isTailCall, bool IsPatchPoint, 4728 const SmallVectorImpl<ISD::OutputArg> &Outs, 4729 const SmallVectorImpl<SDValue> &OutVals, 4730 const SmallVectorImpl<ISD::InputArg> &Ins, 4731 SDLoc dl, SelectionDAG &DAG, 4732 SmallVectorImpl<SDValue> &InVals, 4733 ImmutableCallSite *CS) const { 4734 4735 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4736 bool isLittleEndian = Subtarget.isLittleEndian(); 4737 unsigned NumOps = Outs.size(); 4738 bool hasNest = false; 4739 4740 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4741 unsigned PtrByteSize = 8; 4742 4743 MachineFunction &MF = DAG.getMachineFunction(); 4744 4745 // Mark this function as potentially containing a function that contains a 4746 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4747 // and restoring the callers stack pointer in this functions epilog. This is 4748 // done because by tail calling the called function might overwrite the value 4749 // in this function's (MF) stack pointer stack slot 0(SP). 4750 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4751 CallConv == CallingConv::Fast) 4752 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4753 4754 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4755 "fastcc not supported on varargs functions"); 4756 4757 // Count how many bytes are to be pushed on the stack, including the linkage 4758 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 4759 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 4760 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 4761 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4762 unsigned NumBytes = LinkageSize; 4763 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4764 unsigned &QFPR_idx = FPR_idx; 4765 4766 static const MCPhysReg GPR[] = { 4767 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4768 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4769 }; 4770 static const MCPhysReg VR[] = { 4771 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4772 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4773 }; 4774 static const MCPhysReg VSRH[] = { 4775 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 4776 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 4777 }; 4778 4779 const unsigned NumGPRs = array_lengthof(GPR); 4780 const unsigned NumFPRs = 13; 4781 const unsigned NumVRs = array_lengthof(VR); 4782 const unsigned NumQFPRs = NumFPRs; 4783 4784 // When using the fast calling convention, we don't provide backing for 4785 // arguments that will be in registers. 4786 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 4787 4788 // Add up all the space actually used. 4789 for (unsigned i = 0; i != NumOps; ++i) { 4790 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4791 EVT ArgVT = Outs[i].VT; 4792 EVT OrigVT = Outs[i].ArgVT; 4793 4794 if (Flags.isNest()) 4795 continue; 4796 4797 if (CallConv == CallingConv::Fast) { 4798 if (Flags.isByVal()) 4799 NumGPRsUsed += (Flags.getByValSize()+7)/8; 4800 else 4801 switch (ArgVT.getSimpleVT().SimpleTy) { 4802 default: llvm_unreachable("Unexpected ValueType for argument!"); 4803 case MVT::i1: 4804 case MVT::i32: 4805 case MVT::i64: 4806 if (++NumGPRsUsed <= NumGPRs) 4807 continue; 4808 break; 4809 case MVT::v4i32: 4810 case MVT::v8i16: 4811 case MVT::v16i8: 4812 case MVT::v2f64: 4813 case MVT::v2i64: 4814 case MVT::v1i128: 4815 if (++NumVRsUsed <= NumVRs) 4816 continue; 4817 break; 4818 case MVT::v4f32: 4819 // When using QPX, this is handled like a FP register, otherwise, it 4820 // is an Altivec register. 4821 if (Subtarget.hasQPX()) { 4822 if (++NumFPRsUsed <= NumFPRs) 4823 continue; 4824 } else { 4825 if (++NumVRsUsed <= NumVRs) 4826 continue; 4827 } 4828 break; 4829 case MVT::f32: 4830 case MVT::f64: 4831 case MVT::v4f64: // QPX 4832 case MVT::v4i1: // QPX 4833 if (++NumFPRsUsed <= NumFPRs) 4834 continue; 4835 break; 4836 } 4837 } 4838 4839 /* Respect alignment of argument on the stack. */ 4840 unsigned Align = 4841 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4842 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 4843 4844 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 4845 if (Flags.isInConsecutiveRegsLast()) 4846 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4847 } 4848 4849 unsigned NumBytesActuallyUsed = NumBytes; 4850 4851 // The prolog code of the callee may store up to 8 GPR argument registers to 4852 // the stack, allowing va_start to index over them in memory if its varargs. 4853 // Because we cannot tell if this is needed on the caller side, we have to 4854 // conservatively assume that it is needed. As such, make sure we have at 4855 // least enough stack space for the caller to store the 8 GPRs. 4856 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 4857 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 4858 4859 // Tail call needs the stack to be aligned. 4860 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4861 CallConv == CallingConv::Fast) 4862 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 4863 4864 // Calculate by how many bytes the stack has to be adjusted in case of tail 4865 // call optimization. 4866 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4867 4868 // To protect arguments on the stack from being clobbered in a tail call, 4869 // force all the loads to happen before doing any other lowering. 4870 if (isTailCall) 4871 Chain = DAG.getStackArgumentTokenFactor(Chain); 4872 4873 // Adjust the stack pointer for the new arguments... 4874 // These operations are automatically eliminated by the prolog/epilog pass 4875 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4876 dl); 4877 SDValue CallSeqStart = Chain; 4878 4879 // Load the return address and frame pointer so it can be move somewhere else 4880 // later. 4881 SDValue LROp, FPOp; 4882 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4883 dl); 4884 4885 // Set up a copy of the stack pointer for use loading and storing any 4886 // arguments that may not fit in the registers available for argument 4887 // passing. 4888 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4889 4890 // Figure out which arguments are going to go in registers, and which in 4891 // memory. Also, if this is a vararg function, floating point operations 4892 // must be stored to our stack, and loaded into integer regs as well, if 4893 // any integer regs are available for argument passing. 4894 unsigned ArgOffset = LinkageSize; 4895 4896 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4897 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4898 4899 SmallVector<SDValue, 8> MemOpChains; 4900 for (unsigned i = 0; i != NumOps; ++i) { 4901 SDValue Arg = OutVals[i]; 4902 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4903 EVT ArgVT = Outs[i].VT; 4904 EVT OrigVT = Outs[i].ArgVT; 4905 4906 // PtrOff will be used to store the current argument to the stack if a 4907 // register cannot be found for it. 4908 SDValue PtrOff; 4909 4910 // We re-align the argument offset for each argument, except when using the 4911 // fast calling convention, when we need to make sure we do that only when 4912 // we'll actually use a stack slot. 4913 auto ComputePtrOff = [&]() { 4914 /* Respect alignment of argument on the stack. */ 4915 unsigned Align = 4916 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4917 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 4918 4919 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 4920 4921 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4922 }; 4923 4924 if (CallConv != CallingConv::Fast) { 4925 ComputePtrOff(); 4926 4927 /* Compute GPR index associated with argument offset. */ 4928 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4929 GPR_idx = std::min(GPR_idx, NumGPRs); 4930 } 4931 4932 // Promote integers to 64-bit values. 4933 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 4934 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4935 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4936 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4937 } 4938 4939 // FIXME memcpy is used way more than necessary. Correctness first. 4940 // Note: "by value" is code for passing a structure by value, not 4941 // basic types. 4942 if (Flags.isByVal()) { 4943 // Note: Size includes alignment padding, so 4944 // struct x { short a; char b; } 4945 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 4946 // These are the proper values we need for right-justifying the 4947 // aggregate in a parameter register. 4948 unsigned Size = Flags.getByValSize(); 4949 4950 // An empty aggregate parameter takes up no storage and no 4951 // registers. 4952 if (Size == 0) 4953 continue; 4954 4955 if (CallConv == CallingConv::Fast) 4956 ComputePtrOff(); 4957 4958 // All aggregates smaller than 8 bytes must be passed right-justified. 4959 if (Size==1 || Size==2 || Size==4) { 4960 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 4961 if (GPR_idx != NumGPRs) { 4962 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4963 MachinePointerInfo(), VT, 4964 false, false, false, 0); 4965 MemOpChains.push_back(Load.getValue(1)); 4966 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4967 4968 ArgOffset += PtrByteSize; 4969 continue; 4970 } 4971 } 4972 4973 if (GPR_idx == NumGPRs && Size < 8) { 4974 SDValue AddPtr = PtrOff; 4975 if (!isLittleEndian) { 4976 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 4977 PtrOff.getValueType()); 4978 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4979 } 4980 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4981 CallSeqStart, 4982 Flags, DAG, dl); 4983 ArgOffset += PtrByteSize; 4984 continue; 4985 } 4986 // Copy entire object into memory. There are cases where gcc-generated 4987 // code assumes it is there, even if it could be put entirely into 4988 // registers. (This is not what the doc says.) 4989 4990 // FIXME: The above statement is likely due to a misunderstanding of the 4991 // documents. All arguments must be copied into the parameter area BY 4992 // THE CALLEE in the event that the callee takes the address of any 4993 // formal argument. That has not yet been implemented. However, it is 4994 // reasonable to use the stack area as a staging area for the register 4995 // load. 4996 4997 // Skip this for small aggregates, as we will use the same slot for a 4998 // right-justified copy, below. 4999 if (Size >= 8) 5000 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5001 CallSeqStart, 5002 Flags, DAG, dl); 5003 5004 // When a register is available, pass a small aggregate right-justified. 5005 if (Size < 8 && GPR_idx != NumGPRs) { 5006 // The easiest way to get this right-justified in a register 5007 // is to copy the structure into the rightmost portion of a 5008 // local variable slot, then load the whole slot into the 5009 // register. 5010 // FIXME: The memcpy seems to produce pretty awful code for 5011 // small aggregates, particularly for packed ones. 5012 // FIXME: It would be preferable to use the slot in the 5013 // parameter save area instead of a new local variable. 5014 SDValue AddPtr = PtrOff; 5015 if (!isLittleEndian) { 5016 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5017 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5018 } 5019 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5020 CallSeqStart, 5021 Flags, DAG, dl); 5022 5023 // Load the slot into the register. 5024 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 5025 MachinePointerInfo(), 5026 false, false, false, 0); 5027 MemOpChains.push_back(Load.getValue(1)); 5028 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5029 5030 // Done with this argument. 5031 ArgOffset += PtrByteSize; 5032 continue; 5033 } 5034 5035 // For aggregates larger than PtrByteSize, copy the pieces of the 5036 // object that fit into registers from the parameter save area. 5037 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5038 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5039 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5040 if (GPR_idx != NumGPRs) { 5041 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5042 MachinePointerInfo(), 5043 false, false, false, 0); 5044 MemOpChains.push_back(Load.getValue(1)); 5045 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5046 ArgOffset += PtrByteSize; 5047 } else { 5048 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5049 break; 5050 } 5051 } 5052 continue; 5053 } 5054 5055 switch (Arg.getSimpleValueType().SimpleTy) { 5056 default: llvm_unreachable("Unexpected ValueType for argument!"); 5057 case MVT::i1: 5058 case MVT::i32: 5059 case MVT::i64: 5060 if (Flags.isNest()) { 5061 // The 'nest' parameter, if any, is passed in R11. 5062 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5063 hasNest = true; 5064 break; 5065 } 5066 5067 // These can be scalar arguments or elements of an integer array type 5068 // passed directly. Clang may use those instead of "byval" aggregate 5069 // types to avoid forcing arguments to memory unnecessarily. 5070 if (GPR_idx != NumGPRs) { 5071 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5072 } else { 5073 if (CallConv == CallingConv::Fast) 5074 ComputePtrOff(); 5075 5076 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5077 true, isTailCall, false, MemOpChains, 5078 TailCallArguments, dl); 5079 if (CallConv == CallingConv::Fast) 5080 ArgOffset += PtrByteSize; 5081 } 5082 if (CallConv != CallingConv::Fast) 5083 ArgOffset += PtrByteSize; 5084 break; 5085 case MVT::f32: 5086 case MVT::f64: { 5087 // These can be scalar arguments or elements of a float array type 5088 // passed directly. The latter are used to implement ELFv2 homogenous 5089 // float aggregates. 5090 5091 // Named arguments go into FPRs first, and once they overflow, the 5092 // remaining arguments go into GPRs and then the parameter save area. 5093 // Unnamed arguments for vararg functions always go to GPRs and 5094 // then the parameter save area. For now, put all arguments to vararg 5095 // routines always in both locations (FPR *and* GPR or stack slot). 5096 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5097 bool NeededLoad = false; 5098 5099 // First load the argument into the next available FPR. 5100 if (FPR_idx != NumFPRs) 5101 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5102 5103 // Next, load the argument into GPR or stack slot if needed. 5104 if (!NeedGPROrStack) 5105 ; 5106 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5107 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5108 // once we support fp <-> gpr moves. 5109 5110 // In the non-vararg case, this can only ever happen in the 5111 // presence of f32 array types, since otherwise we never run 5112 // out of FPRs before running out of GPRs. 5113 SDValue ArgVal; 5114 5115 // Double values are always passed in a single GPR. 5116 if (Arg.getValueType() != MVT::f32) { 5117 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5118 5119 // Non-array float values are extended and passed in a GPR. 5120 } else if (!Flags.isInConsecutiveRegs()) { 5121 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5122 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5123 5124 // If we have an array of floats, we collect every odd element 5125 // together with its predecessor into one GPR. 5126 } else if (ArgOffset % PtrByteSize != 0) { 5127 SDValue Lo, Hi; 5128 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5129 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5130 if (!isLittleEndian) 5131 std::swap(Lo, Hi); 5132 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5133 5134 // The final element, if even, goes into the first half of a GPR. 5135 } else if (Flags.isInConsecutiveRegsLast()) { 5136 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5137 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5138 if (!isLittleEndian) 5139 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5140 DAG.getConstant(32, dl, MVT::i32)); 5141 5142 // Non-final even elements are skipped; they will be handled 5143 // together the with subsequent argument on the next go-around. 5144 } else 5145 ArgVal = SDValue(); 5146 5147 if (ArgVal.getNode()) 5148 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5149 } else { 5150 if (CallConv == CallingConv::Fast) 5151 ComputePtrOff(); 5152 5153 // Single-precision floating-point values are mapped to the 5154 // second (rightmost) word of the stack doubleword. 5155 if (Arg.getValueType() == MVT::f32 && 5156 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5157 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5158 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5159 } 5160 5161 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5162 true, isTailCall, false, MemOpChains, 5163 TailCallArguments, dl); 5164 5165 NeededLoad = true; 5166 } 5167 // When passing an array of floats, the array occupies consecutive 5168 // space in the argument area; only round up to the next doubleword 5169 // at the end of the array. Otherwise, each float takes 8 bytes. 5170 if (CallConv != CallingConv::Fast || NeededLoad) { 5171 ArgOffset += (Arg.getValueType() == MVT::f32 && 5172 Flags.isInConsecutiveRegs()) ? 4 : 8; 5173 if (Flags.isInConsecutiveRegsLast()) 5174 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5175 } 5176 break; 5177 } 5178 case MVT::v4f32: 5179 case MVT::v4i32: 5180 case MVT::v8i16: 5181 case MVT::v16i8: 5182 case MVT::v2f64: 5183 case MVT::v2i64: 5184 case MVT::v1i128: 5185 if (!Subtarget.hasQPX()) { 5186 // These can be scalar arguments or elements of a vector array type 5187 // passed directly. The latter are used to implement ELFv2 homogenous 5188 // vector aggregates. 5189 5190 // For a varargs call, named arguments go into VRs or on the stack as 5191 // usual; unnamed arguments always go to the stack or the corresponding 5192 // GPRs when within range. For now, we always put the value in both 5193 // locations (or even all three). 5194 if (isVarArg) { 5195 // We could elide this store in the case where the object fits 5196 // entirely in R registers. Maybe later. 5197 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5198 MachinePointerInfo(), false, false, 0); 5199 MemOpChains.push_back(Store); 5200 if (VR_idx != NumVRs) { 5201 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5202 MachinePointerInfo(), 5203 false, false, false, 0); 5204 MemOpChains.push_back(Load.getValue(1)); 5205 5206 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5207 Arg.getSimpleValueType() == MVT::v2i64) ? 5208 VSRH[VR_idx] : VR[VR_idx]; 5209 ++VR_idx; 5210 5211 RegsToPass.push_back(std::make_pair(VReg, Load)); 5212 } 5213 ArgOffset += 16; 5214 for (unsigned i=0; i<16; i+=PtrByteSize) { 5215 if (GPR_idx == NumGPRs) 5216 break; 5217 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5218 DAG.getConstant(i, dl, PtrVT)); 5219 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5220 false, false, false, 0); 5221 MemOpChains.push_back(Load.getValue(1)); 5222 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5223 } 5224 break; 5225 } 5226 5227 // Non-varargs Altivec params go into VRs or on the stack. 5228 if (VR_idx != NumVRs) { 5229 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5230 Arg.getSimpleValueType() == MVT::v2i64) ? 5231 VSRH[VR_idx] : VR[VR_idx]; 5232 ++VR_idx; 5233 5234 RegsToPass.push_back(std::make_pair(VReg, Arg)); 5235 } else { 5236 if (CallConv == CallingConv::Fast) 5237 ComputePtrOff(); 5238 5239 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5240 true, isTailCall, true, MemOpChains, 5241 TailCallArguments, dl); 5242 if (CallConv == CallingConv::Fast) 5243 ArgOffset += 16; 5244 } 5245 5246 if (CallConv != CallingConv::Fast) 5247 ArgOffset += 16; 5248 break; 5249 } // not QPX 5250 5251 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5252 "Invalid QPX parameter type"); 5253 5254 /* fall through */ 5255 case MVT::v4f64: 5256 case MVT::v4i1: { 5257 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5258 if (isVarArg) { 5259 // We could elide this store in the case where the object fits 5260 // entirely in R registers. Maybe later. 5261 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5262 MachinePointerInfo(), false, false, 0); 5263 MemOpChains.push_back(Store); 5264 if (QFPR_idx != NumQFPRs) { 5265 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, 5266 Store, PtrOff, MachinePointerInfo(), 5267 false, false, false, 0); 5268 MemOpChains.push_back(Load.getValue(1)); 5269 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5270 } 5271 ArgOffset += (IsF32 ? 16 : 32); 5272 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5273 if (GPR_idx == NumGPRs) 5274 break; 5275 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5276 DAG.getConstant(i, dl, PtrVT)); 5277 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5278 false, false, false, 0); 5279 MemOpChains.push_back(Load.getValue(1)); 5280 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5281 } 5282 break; 5283 } 5284 5285 // Non-varargs QPX params go into registers or on the stack. 5286 if (QFPR_idx != NumQFPRs) { 5287 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5288 } else { 5289 if (CallConv == CallingConv::Fast) 5290 ComputePtrOff(); 5291 5292 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5293 true, isTailCall, true, MemOpChains, 5294 TailCallArguments, dl); 5295 if (CallConv == CallingConv::Fast) 5296 ArgOffset += (IsF32 ? 16 : 32); 5297 } 5298 5299 if (CallConv != CallingConv::Fast) 5300 ArgOffset += (IsF32 ? 16 : 32); 5301 break; 5302 } 5303 } 5304 } 5305 5306 assert(NumBytesActuallyUsed == ArgOffset); 5307 (void)NumBytesActuallyUsed; 5308 5309 if (!MemOpChains.empty()) 5310 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5311 5312 // Check if this is an indirect call (MTCTR/BCTRL). 5313 // See PrepareCall() for more information about calls through function 5314 // pointers in the 64-bit SVR4 ABI. 5315 if (!isTailCall && !IsPatchPoint && 5316 !isFunctionGlobalAddress(Callee) && 5317 !isa<ExternalSymbolSDNode>(Callee)) { 5318 // Load r2 into a virtual register and store it to the TOC save area. 5319 setUsesTOCBasePtr(DAG); 5320 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5321 // TOC save area offset. 5322 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5323 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5324 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5325 Chain = DAG.getStore( 5326 Val.getValue(1), dl, Val, AddPtr, 5327 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset), 5328 false, false, 0); 5329 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5330 // This does not mean the MTCTR instruction must use R12; it's easier 5331 // to model this as an extra parameter, so do that. 5332 if (isELFv2ABI && !IsPatchPoint) 5333 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5334 } 5335 5336 // Build a sequence of copy-to-reg nodes chained together with token chain 5337 // and flag operands which copy the outgoing args into the appropriate regs. 5338 SDValue InFlag; 5339 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5340 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5341 RegsToPass[i].second, InFlag); 5342 InFlag = Chain.getValue(1); 5343 } 5344 5345 if (isTailCall) 5346 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 5347 FPOp, true, TailCallArguments); 5348 5349 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5350 hasNest, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 5351 Callee, SPDiff, NumBytes, Ins, InVals, CS); 5352 } 5353 5354 SDValue 5355 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 5356 CallingConv::ID CallConv, bool isVarArg, 5357 bool isTailCall, bool IsPatchPoint, 5358 const SmallVectorImpl<ISD::OutputArg> &Outs, 5359 const SmallVectorImpl<SDValue> &OutVals, 5360 const SmallVectorImpl<ISD::InputArg> &Ins, 5361 SDLoc dl, SelectionDAG &DAG, 5362 SmallVectorImpl<SDValue> &InVals, 5363 ImmutableCallSite *CS) const { 5364 5365 unsigned NumOps = Outs.size(); 5366 5367 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5368 bool isPPC64 = PtrVT == MVT::i64; 5369 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5370 5371 MachineFunction &MF = DAG.getMachineFunction(); 5372 5373 // Mark this function as potentially containing a function that contains a 5374 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5375 // and restoring the callers stack pointer in this functions epilog. This is 5376 // done because by tail calling the called function might overwrite the value 5377 // in this function's (MF) stack pointer stack slot 0(SP). 5378 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5379 CallConv == CallingConv::Fast) 5380 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5381 5382 // Count how many bytes are to be pushed on the stack, including the linkage 5383 // area, and parameter passing area. We start with 24/48 bytes, which is 5384 // prereserved space for [SP][CR][LR][3 x unused]. 5385 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5386 unsigned NumBytes = LinkageSize; 5387 5388 // Add up all the space actually used. 5389 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5390 // they all go in registers, but we must reserve stack space for them for 5391 // possible use by the caller. In varargs or 64-bit calls, parameters are 5392 // assigned stack space in order, with padding so Altivec parameters are 5393 // 16-byte aligned. 5394 unsigned nAltivecParamsAtEnd = 0; 5395 for (unsigned i = 0; i != NumOps; ++i) { 5396 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5397 EVT ArgVT = Outs[i].VT; 5398 // Varargs Altivec parameters are padded to a 16 byte boundary. 5399 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5400 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5401 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5402 if (!isVarArg && !isPPC64) { 5403 // Non-varargs Altivec parameters go after all the non-Altivec 5404 // parameters; handle those later so we know how much padding we need. 5405 nAltivecParamsAtEnd++; 5406 continue; 5407 } 5408 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5409 NumBytes = ((NumBytes+15)/16)*16; 5410 } 5411 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5412 } 5413 5414 // Allow for Altivec parameters at the end, if needed. 5415 if (nAltivecParamsAtEnd) { 5416 NumBytes = ((NumBytes+15)/16)*16; 5417 NumBytes += 16*nAltivecParamsAtEnd; 5418 } 5419 5420 // The prolog code of the callee may store up to 8 GPR argument registers to 5421 // the stack, allowing va_start to index over them in memory if its varargs. 5422 // Because we cannot tell if this is needed on the caller side, we have to 5423 // conservatively assume that it is needed. As such, make sure we have at 5424 // least enough stack space for the caller to store the 8 GPRs. 5425 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5426 5427 // Tail call needs the stack to be aligned. 5428 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5429 CallConv == CallingConv::Fast) 5430 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5431 5432 // Calculate by how many bytes the stack has to be adjusted in case of tail 5433 // call optimization. 5434 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5435 5436 // To protect arguments on the stack from being clobbered in a tail call, 5437 // force all the loads to happen before doing any other lowering. 5438 if (isTailCall) 5439 Chain = DAG.getStackArgumentTokenFactor(Chain); 5440 5441 // Adjust the stack pointer for the new arguments... 5442 // These operations are automatically eliminated by the prolog/epilog pass 5443 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5444 dl); 5445 SDValue CallSeqStart = Chain; 5446 5447 // Load the return address and frame pointer so it can be move somewhere else 5448 // later. 5449 SDValue LROp, FPOp; 5450 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5451 dl); 5452 5453 // Set up a copy of the stack pointer for use loading and storing any 5454 // arguments that may not fit in the registers available for argument 5455 // passing. 5456 SDValue StackPtr; 5457 if (isPPC64) 5458 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5459 else 5460 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5461 5462 // Figure out which arguments are going to go in registers, and which in 5463 // memory. Also, if this is a vararg function, floating point operations 5464 // must be stored to our stack, and loaded into integer regs as well, if 5465 // any integer regs are available for argument passing. 5466 unsigned ArgOffset = LinkageSize; 5467 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5468 5469 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5470 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5471 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5472 }; 5473 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5474 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5475 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5476 }; 5477 static const MCPhysReg VR[] = { 5478 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5479 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5480 }; 5481 const unsigned NumGPRs = array_lengthof(GPR_32); 5482 const unsigned NumFPRs = 13; 5483 const unsigned NumVRs = array_lengthof(VR); 5484 5485 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5486 5487 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5488 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5489 5490 SmallVector<SDValue, 8> MemOpChains; 5491 for (unsigned i = 0; i != NumOps; ++i) { 5492 SDValue Arg = OutVals[i]; 5493 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5494 5495 // PtrOff will be used to store the current argument to the stack if a 5496 // register cannot be found for it. 5497 SDValue PtrOff; 5498 5499 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5500 5501 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5502 5503 // On PPC64, promote integers to 64-bit values. 5504 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5505 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5506 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5507 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5508 } 5509 5510 // FIXME memcpy is used way more than necessary. Correctness first. 5511 // Note: "by value" is code for passing a structure by value, not 5512 // basic types. 5513 if (Flags.isByVal()) { 5514 unsigned Size = Flags.getByValSize(); 5515 // Very small objects are passed right-justified. Everything else is 5516 // passed left-justified. 5517 if (Size==1 || Size==2) { 5518 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5519 if (GPR_idx != NumGPRs) { 5520 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5521 MachinePointerInfo(), VT, 5522 false, false, false, 0); 5523 MemOpChains.push_back(Load.getValue(1)); 5524 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5525 5526 ArgOffset += PtrByteSize; 5527 } else { 5528 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5529 PtrOff.getValueType()); 5530 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5531 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5532 CallSeqStart, 5533 Flags, DAG, dl); 5534 ArgOffset += PtrByteSize; 5535 } 5536 continue; 5537 } 5538 // Copy entire object into memory. There are cases where gcc-generated 5539 // code assumes it is there, even if it could be put entirely into 5540 // registers. (This is not what the doc says.) 5541 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5542 CallSeqStart, 5543 Flags, DAG, dl); 5544 5545 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5546 // copy the pieces of the object that fit into registers from the 5547 // parameter save area. 5548 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5549 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5550 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5551 if (GPR_idx != NumGPRs) { 5552 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5553 MachinePointerInfo(), 5554 false, false, false, 0); 5555 MemOpChains.push_back(Load.getValue(1)); 5556 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5557 ArgOffset += PtrByteSize; 5558 } else { 5559 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5560 break; 5561 } 5562 } 5563 continue; 5564 } 5565 5566 switch (Arg.getSimpleValueType().SimpleTy) { 5567 default: llvm_unreachable("Unexpected ValueType for argument!"); 5568 case MVT::i1: 5569 case MVT::i32: 5570 case MVT::i64: 5571 if (GPR_idx != NumGPRs) { 5572 if (Arg.getValueType() == MVT::i1) 5573 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5574 5575 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5576 } else { 5577 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5578 isPPC64, isTailCall, false, MemOpChains, 5579 TailCallArguments, dl); 5580 } 5581 ArgOffset += PtrByteSize; 5582 break; 5583 case MVT::f32: 5584 case MVT::f64: 5585 if (FPR_idx != NumFPRs) { 5586 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5587 5588 if (isVarArg) { 5589 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5590 MachinePointerInfo(), false, false, 0); 5591 MemOpChains.push_back(Store); 5592 5593 // Float varargs are always shadowed in available integer registers 5594 if (GPR_idx != NumGPRs) { 5595 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5596 MachinePointerInfo(), false, false, 5597 false, 0); 5598 MemOpChains.push_back(Load.getValue(1)); 5599 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5600 } 5601 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5602 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5603 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5604 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5605 MachinePointerInfo(), 5606 false, false, false, 0); 5607 MemOpChains.push_back(Load.getValue(1)); 5608 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5609 } 5610 } else { 5611 // If we have any FPRs remaining, we may also have GPRs remaining. 5612 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5613 // GPRs. 5614 if (GPR_idx != NumGPRs) 5615 ++GPR_idx; 5616 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 5617 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 5618 ++GPR_idx; 5619 } 5620 } else 5621 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5622 isPPC64, isTailCall, false, MemOpChains, 5623 TailCallArguments, dl); 5624 if (isPPC64) 5625 ArgOffset += 8; 5626 else 5627 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 5628 break; 5629 case MVT::v4f32: 5630 case MVT::v4i32: 5631 case MVT::v8i16: 5632 case MVT::v16i8: 5633 if (isVarArg) { 5634 // These go aligned on the stack, or in the corresponding R registers 5635 // when within range. The Darwin PPC ABI doc claims they also go in 5636 // V registers; in fact gcc does this only for arguments that are 5637 // prototyped, not for those that match the ... We do it for all 5638 // arguments, seems to work. 5639 while (ArgOffset % 16 !=0) { 5640 ArgOffset += PtrByteSize; 5641 if (GPR_idx != NumGPRs) 5642 GPR_idx++; 5643 } 5644 // We could elide this store in the case where the object fits 5645 // entirely in R registers. Maybe later. 5646 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5647 DAG.getConstant(ArgOffset, dl, PtrVT)); 5648 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5649 MachinePointerInfo(), false, false, 0); 5650 MemOpChains.push_back(Store); 5651 if (VR_idx != NumVRs) { 5652 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5653 MachinePointerInfo(), 5654 false, false, false, 0); 5655 MemOpChains.push_back(Load.getValue(1)); 5656 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5657 } 5658 ArgOffset += 16; 5659 for (unsigned i=0; i<16; i+=PtrByteSize) { 5660 if (GPR_idx == NumGPRs) 5661 break; 5662 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5663 DAG.getConstant(i, dl, PtrVT)); 5664 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5665 false, false, false, 0); 5666 MemOpChains.push_back(Load.getValue(1)); 5667 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5668 } 5669 break; 5670 } 5671 5672 // Non-varargs Altivec params generally go in registers, but have 5673 // stack space allocated at the end. 5674 if (VR_idx != NumVRs) { 5675 // Doesn't have GPR space allocated. 5676 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5677 } else if (nAltivecParamsAtEnd==0) { 5678 // We are emitting Altivec params in order. 5679 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5680 isPPC64, isTailCall, true, MemOpChains, 5681 TailCallArguments, dl); 5682 ArgOffset += 16; 5683 } 5684 break; 5685 } 5686 } 5687 // If all Altivec parameters fit in registers, as they usually do, 5688 // they get stack space following the non-Altivec parameters. We 5689 // don't track this here because nobody below needs it. 5690 // If there are more Altivec parameters than fit in registers emit 5691 // the stores here. 5692 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5693 unsigned j = 0; 5694 // Offset is aligned; skip 1st 12 params which go in V registers. 5695 ArgOffset = ((ArgOffset+15)/16)*16; 5696 ArgOffset += 12*16; 5697 for (unsigned i = 0; i != NumOps; ++i) { 5698 SDValue Arg = OutVals[i]; 5699 EVT ArgType = Outs[i].VT; 5700 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5701 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5702 if (++j > NumVRs) { 5703 SDValue PtrOff; 5704 // We are emitting Altivec params in order. 5705 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5706 isPPC64, isTailCall, true, MemOpChains, 5707 TailCallArguments, dl); 5708 ArgOffset += 16; 5709 } 5710 } 5711 } 5712 } 5713 5714 if (!MemOpChains.empty()) 5715 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5716 5717 // On Darwin, R12 must contain the address of an indirect callee. This does 5718 // not mean the MTCTR instruction must use R12; it's easier to model this as 5719 // an extra parameter, so do that. 5720 if (!isTailCall && 5721 !isFunctionGlobalAddress(Callee) && 5722 !isa<ExternalSymbolSDNode>(Callee) && 5723 !isBLACompatibleAddress(Callee, DAG)) 5724 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5725 PPC::R12), Callee)); 5726 5727 // Build a sequence of copy-to-reg nodes chained together with token chain 5728 // and flag operands which copy the outgoing args into the appropriate regs. 5729 SDValue InFlag; 5730 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5731 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5732 RegsToPass[i].second, InFlag); 5733 InFlag = Chain.getValue(1); 5734 } 5735 5736 if (isTailCall) 5737 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 5738 FPOp, true, TailCallArguments); 5739 5740 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5741 /* unused except on PPC64 ELFv1 */ false, DAG, 5742 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5743 NumBytes, Ins, InVals, CS); 5744 } 5745 5746 bool 5747 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5748 MachineFunction &MF, bool isVarArg, 5749 const SmallVectorImpl<ISD::OutputArg> &Outs, 5750 LLVMContext &Context) const { 5751 SmallVector<CCValAssign, 16> RVLocs; 5752 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 5753 return CCInfo.CheckReturn(Outs, RetCC_PPC); 5754 } 5755 5756 SDValue 5757 PPCTargetLowering::LowerReturn(SDValue Chain, 5758 CallingConv::ID CallConv, bool isVarArg, 5759 const SmallVectorImpl<ISD::OutputArg> &Outs, 5760 const SmallVectorImpl<SDValue> &OutVals, 5761 SDLoc dl, SelectionDAG &DAG) const { 5762 5763 SmallVector<CCValAssign, 16> RVLocs; 5764 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5765 *DAG.getContext()); 5766 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 5767 5768 SDValue Flag; 5769 SmallVector<SDValue, 4> RetOps(1, Chain); 5770 5771 // Copy the result values into the output registers. 5772 for (unsigned i = 0; i != RVLocs.size(); ++i) { 5773 CCValAssign &VA = RVLocs[i]; 5774 assert(VA.isRegLoc() && "Can only return in registers!"); 5775 5776 SDValue Arg = OutVals[i]; 5777 5778 switch (VA.getLocInfo()) { 5779 default: llvm_unreachable("Unknown loc info!"); 5780 case CCValAssign::Full: break; 5781 case CCValAssign::AExt: 5782 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 5783 break; 5784 case CCValAssign::ZExt: 5785 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 5786 break; 5787 case CCValAssign::SExt: 5788 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 5789 break; 5790 } 5791 5792 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 5793 Flag = Chain.getValue(1); 5794 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 5795 } 5796 5797 RetOps[0] = Chain; // Update chain. 5798 5799 // Add the flag if we have it. 5800 if (Flag.getNode()) 5801 RetOps.push_back(Flag); 5802 5803 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 5804 } 5805 5806 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 5807 const PPCSubtarget &Subtarget) const { 5808 // When we pop the dynamic allocation we need to restore the SP link. 5809 SDLoc dl(Op); 5810 5811 // Get the corect type for pointers. 5812 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5813 5814 // Construct the stack pointer operand. 5815 bool isPPC64 = Subtarget.isPPC64(); 5816 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 5817 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 5818 5819 // Get the operands for the STACKRESTORE. 5820 SDValue Chain = Op.getOperand(0); 5821 SDValue SaveSP = Op.getOperand(1); 5822 5823 // Load the old link SP. 5824 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 5825 MachinePointerInfo(), 5826 false, false, false, 0); 5827 5828 // Restore the stack pointer. 5829 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 5830 5831 // Store the old link SP. 5832 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 5833 false, false, 0); 5834 } 5835 5836 5837 5838 SDValue 5839 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 5840 MachineFunction &MF = DAG.getMachineFunction(); 5841 bool isPPC64 = Subtarget.isPPC64(); 5842 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 5843 5844 // Get current frame pointer save index. The users of this index will be 5845 // primarily DYNALLOC instructions. 5846 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5847 int RASI = FI->getReturnAddrSaveIndex(); 5848 5849 // If the frame pointer save index hasn't been defined yet. 5850 if (!RASI) { 5851 // Find out what the fix offset of the frame pointer save area. 5852 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 5853 // Allocate the frame index for frame pointer save area. 5854 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 5855 // Save the result. 5856 FI->setReturnAddrSaveIndex(RASI); 5857 } 5858 return DAG.getFrameIndex(RASI, PtrVT); 5859 } 5860 5861 SDValue 5862 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 5863 MachineFunction &MF = DAG.getMachineFunction(); 5864 bool isPPC64 = Subtarget.isPPC64(); 5865 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 5866 5867 // Get current frame pointer save index. The users of this index will be 5868 // primarily DYNALLOC instructions. 5869 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5870 int FPSI = FI->getFramePointerSaveIndex(); 5871 5872 // If the frame pointer save index hasn't been defined yet. 5873 if (!FPSI) { 5874 // Find out what the fix offset of the frame pointer save area. 5875 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 5876 // Allocate the frame index for frame pointer save area. 5877 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 5878 // Save the result. 5879 FI->setFramePointerSaveIndex(FPSI); 5880 } 5881 return DAG.getFrameIndex(FPSI, PtrVT); 5882 } 5883 5884 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 5885 SelectionDAG &DAG, 5886 const PPCSubtarget &Subtarget) const { 5887 // Get the inputs. 5888 SDValue Chain = Op.getOperand(0); 5889 SDValue Size = Op.getOperand(1); 5890 SDLoc dl(Op); 5891 5892 // Get the corect type for pointers. 5893 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5894 // Negate the size. 5895 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 5896 DAG.getConstant(0, dl, PtrVT), Size); 5897 // Construct a node for the frame pointer save index. 5898 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 5899 // Build a DYNALLOC node. 5900 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 5901 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 5902 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 5903 } 5904 5905 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 5906 SelectionDAG &DAG) const { 5907 SDLoc DL(Op); 5908 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 5909 DAG.getVTList(MVT::i32, MVT::Other), 5910 Op.getOperand(0), Op.getOperand(1)); 5911 } 5912 5913 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 5914 SelectionDAG &DAG) const { 5915 SDLoc DL(Op); 5916 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 5917 Op.getOperand(0), Op.getOperand(1)); 5918 } 5919 5920 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 5921 if (Op.getValueType().isVector()) 5922 return LowerVectorLoad(Op, DAG); 5923 5924 assert(Op.getValueType() == MVT::i1 && 5925 "Custom lowering only for i1 loads"); 5926 5927 // First, load 8 bits into 32 bits, then truncate to 1 bit. 5928 5929 SDLoc dl(Op); 5930 LoadSDNode *LD = cast<LoadSDNode>(Op); 5931 5932 SDValue Chain = LD->getChain(); 5933 SDValue BasePtr = LD->getBasePtr(); 5934 MachineMemOperand *MMO = LD->getMemOperand(); 5935 5936 SDValue NewLD = 5937 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 5938 BasePtr, MVT::i8, MMO); 5939 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 5940 5941 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 5942 return DAG.getMergeValues(Ops, dl); 5943 } 5944 5945 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 5946 if (Op.getOperand(1).getValueType().isVector()) 5947 return LowerVectorStore(Op, DAG); 5948 5949 assert(Op.getOperand(1).getValueType() == MVT::i1 && 5950 "Custom lowering only for i1 stores"); 5951 5952 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 5953 5954 SDLoc dl(Op); 5955 StoreSDNode *ST = cast<StoreSDNode>(Op); 5956 5957 SDValue Chain = ST->getChain(); 5958 SDValue BasePtr = ST->getBasePtr(); 5959 SDValue Value = ST->getValue(); 5960 MachineMemOperand *MMO = ST->getMemOperand(); 5961 5962 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 5963 Value); 5964 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 5965 } 5966 5967 // FIXME: Remove this once the ANDI glue bug is fixed: 5968 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 5969 assert(Op.getValueType() == MVT::i1 && 5970 "Custom lowering only for i1 results"); 5971 5972 SDLoc DL(Op); 5973 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 5974 Op.getOperand(0)); 5975 } 5976 5977 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 5978 /// possible. 5979 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 5980 // Not FP? Not a fsel. 5981 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 5982 !Op.getOperand(2).getValueType().isFloatingPoint()) 5983 return Op; 5984 5985 // We might be able to do better than this under some circumstances, but in 5986 // general, fsel-based lowering of select is a finite-math-only optimization. 5987 // For more information, see section F.3 of the 2.06 ISA specification. 5988 if (!DAG.getTarget().Options.NoInfsFPMath || 5989 !DAG.getTarget().Options.NoNaNsFPMath) 5990 return Op; 5991 5992 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5993 5994 EVT ResVT = Op.getValueType(); 5995 EVT CmpVT = Op.getOperand(0).getValueType(); 5996 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5997 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 5998 SDLoc dl(Op); 5999 6000 // If the RHS of the comparison is a 0.0, we don't need to do the 6001 // subtraction at all. 6002 SDValue Sel1; 6003 if (isFloatingPointZero(RHS)) 6004 switch (CC) { 6005 default: break; // SETUO etc aren't handled by fsel. 6006 case ISD::SETNE: 6007 std::swap(TV, FV); 6008 case ISD::SETEQ: 6009 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6010 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6011 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6012 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6013 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6014 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6015 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6016 case ISD::SETULT: 6017 case ISD::SETLT: 6018 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6019 case ISD::SETOGE: 6020 case ISD::SETGE: 6021 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6022 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6023 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6024 case ISD::SETUGT: 6025 case ISD::SETGT: 6026 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6027 case ISD::SETOLE: 6028 case ISD::SETLE: 6029 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6030 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6031 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6032 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6033 } 6034 6035 SDValue Cmp; 6036 switch (CC) { 6037 default: break; // SETUO etc aren't handled by fsel. 6038 case ISD::SETNE: 6039 std::swap(TV, FV); 6040 case ISD::SETEQ: 6041 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 6042 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6043 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6044 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6045 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6046 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6047 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6048 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6049 case ISD::SETULT: 6050 case ISD::SETLT: 6051 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 6052 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6053 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6054 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6055 case ISD::SETOGE: 6056 case ISD::SETGE: 6057 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 6058 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6059 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6060 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6061 case ISD::SETUGT: 6062 case ISD::SETGT: 6063 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 6064 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6065 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6066 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6067 case ISD::SETOLE: 6068 case ISD::SETLE: 6069 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 6070 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6071 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6072 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6073 } 6074 return Op; 6075 } 6076 6077 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6078 SelectionDAG &DAG, 6079 SDLoc dl) const { 6080 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6081 SDValue Src = Op.getOperand(0); 6082 if (Src.getValueType() == MVT::f32) 6083 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6084 6085 SDValue Tmp; 6086 switch (Op.getSimpleValueType().SimpleTy) { 6087 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6088 case MVT::i32: 6089 Tmp = DAG.getNode( 6090 Op.getOpcode() == ISD::FP_TO_SINT 6091 ? PPCISD::FCTIWZ 6092 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6093 dl, MVT::f64, Src); 6094 break; 6095 case MVT::i64: 6096 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6097 "i64 FP_TO_UINT is supported only with FPCVT"); 6098 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6099 PPCISD::FCTIDUZ, 6100 dl, MVT::f64, Src); 6101 break; 6102 } 6103 6104 // Convert the FP value to an int value through memory. 6105 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6106 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6107 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6108 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6109 MachinePointerInfo MPI = 6110 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6111 6112 // Emit a store to the stack slot. 6113 SDValue Chain; 6114 if (i32Stack) { 6115 MachineFunction &MF = DAG.getMachineFunction(); 6116 MachineMemOperand *MMO = 6117 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6118 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6119 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6120 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6121 } else 6122 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 6123 MPI, false, false, 0); 6124 6125 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6126 // add in a bias. 6127 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6128 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6129 DAG.getConstant(4, dl, FIPtr.getValueType())); 6130 MPI = MPI.getWithOffset(4); 6131 } 6132 6133 RLI.Chain = Chain; 6134 RLI.Ptr = FIPtr; 6135 RLI.MPI = MPI; 6136 } 6137 6138 /// \brief Custom lowers floating point to integer conversions to use 6139 /// the direct move instructions available in ISA 2.07 to avoid the 6140 /// need for load/store combinations. 6141 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6142 SelectionDAG &DAG, 6143 SDLoc dl) const { 6144 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6145 SDValue Src = Op.getOperand(0); 6146 6147 if (Src.getValueType() == MVT::f32) 6148 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6149 6150 SDValue Tmp; 6151 switch (Op.getSimpleValueType().SimpleTy) { 6152 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6153 case MVT::i32: 6154 Tmp = DAG.getNode( 6155 Op.getOpcode() == ISD::FP_TO_SINT 6156 ? PPCISD::FCTIWZ 6157 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6158 dl, MVT::f64, Src); 6159 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6160 break; 6161 case MVT::i64: 6162 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6163 "i64 FP_TO_UINT is supported only with FPCVT"); 6164 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6165 PPCISD::FCTIDUZ, 6166 dl, MVT::f64, Src); 6167 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6168 break; 6169 } 6170 return Tmp; 6171 } 6172 6173 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6174 SDLoc dl) const { 6175 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6176 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6177 6178 ReuseLoadInfo RLI; 6179 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6180 6181 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6182 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6183 RLI.Ranges); 6184 } 6185 6186 // We're trying to insert a regular store, S, and then a load, L. If the 6187 // incoming value, O, is a load, we might just be able to have our load use the 6188 // address used by O. However, we don't know if anything else will store to 6189 // that address before we can load from it. To prevent this situation, we need 6190 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6191 // the same chain operand as O, we create a token factor from the chain results 6192 // of O and L, and we replace all uses of O's chain result with that token 6193 // factor (see spliceIntoChain below for this last part). 6194 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6195 ReuseLoadInfo &RLI, 6196 SelectionDAG &DAG, 6197 ISD::LoadExtType ET) const { 6198 SDLoc dl(Op); 6199 if (ET == ISD::NON_EXTLOAD && 6200 (Op.getOpcode() == ISD::FP_TO_UINT || 6201 Op.getOpcode() == ISD::FP_TO_SINT) && 6202 isOperationLegalOrCustom(Op.getOpcode(), 6203 Op.getOperand(0).getValueType())) { 6204 6205 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6206 return true; 6207 } 6208 6209 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6210 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6211 LD->isNonTemporal()) 6212 return false; 6213 if (LD->getMemoryVT() != MemVT) 6214 return false; 6215 6216 RLI.Ptr = LD->getBasePtr(); 6217 if (LD->isIndexed() && LD->getOffset().getOpcode() != ISD::UNDEF) { 6218 assert(LD->getAddressingMode() == ISD::PRE_INC && 6219 "Non-pre-inc AM on PPC?"); 6220 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6221 LD->getOffset()); 6222 } 6223 6224 RLI.Chain = LD->getChain(); 6225 RLI.MPI = LD->getPointerInfo(); 6226 RLI.IsInvariant = LD->isInvariant(); 6227 RLI.Alignment = LD->getAlignment(); 6228 RLI.AAInfo = LD->getAAInfo(); 6229 RLI.Ranges = LD->getRanges(); 6230 6231 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6232 return true; 6233 } 6234 6235 // Given the head of the old chain, ResChain, insert a token factor containing 6236 // it and NewResChain, and make users of ResChain now be users of that token 6237 // factor. 6238 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6239 SDValue NewResChain, 6240 SelectionDAG &DAG) const { 6241 if (!ResChain) 6242 return; 6243 6244 SDLoc dl(NewResChain); 6245 6246 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6247 NewResChain, DAG.getUNDEF(MVT::Other)); 6248 assert(TF.getNode() != NewResChain.getNode() && 6249 "A new TF really is required here"); 6250 6251 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6252 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6253 } 6254 6255 /// \brief Custom lowers integer to floating point conversions to use 6256 /// the direct move instructions available in ISA 2.07 to avoid the 6257 /// need for load/store combinations. 6258 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6259 SelectionDAG &DAG, 6260 SDLoc dl) const { 6261 assert((Op.getValueType() == MVT::f32 || 6262 Op.getValueType() == MVT::f64) && 6263 "Invalid floating point type as target of conversion"); 6264 assert(Subtarget.hasFPCVT() && 6265 "Int to FP conversions with direct moves require FPCVT"); 6266 SDValue FP; 6267 SDValue Src = Op.getOperand(0); 6268 bool SinglePrec = Op.getValueType() == MVT::f32; 6269 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6270 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6271 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6272 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6273 6274 if (WordInt) { 6275 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6276 dl, MVT::f64, Src); 6277 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6278 } 6279 else { 6280 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6281 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6282 } 6283 6284 return FP; 6285 } 6286 6287 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6288 SelectionDAG &DAG) const { 6289 SDLoc dl(Op); 6290 6291 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6292 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6293 return SDValue(); 6294 6295 SDValue Value = Op.getOperand(0); 6296 // The values are now known to be -1 (false) or 1 (true). To convert this 6297 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6298 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6299 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6300 6301 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 6302 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 6303 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 6304 6305 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6306 6307 if (Op.getValueType() != MVT::v4f64) 6308 Value = DAG.getNode(ISD::FP_ROUND, dl, 6309 Op.getValueType(), Value, 6310 DAG.getIntPtrConstant(1, dl)); 6311 return Value; 6312 } 6313 6314 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6315 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6316 return SDValue(); 6317 6318 if (Op.getOperand(0).getValueType() == MVT::i1) 6319 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6320 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6321 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6322 6323 // If we have direct moves, we can do all the conversion, skip the store/load 6324 // however, without FPCVT we can't do most conversions. 6325 if (Subtarget.hasDirectMove() && Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6326 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6327 6328 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6329 "UINT_TO_FP is supported only with FPCVT"); 6330 6331 // If we have FCFIDS, then use it when converting to single-precision. 6332 // Otherwise, convert to double-precision and then round. 6333 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6334 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6335 : PPCISD::FCFIDS) 6336 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6337 : PPCISD::FCFID); 6338 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6339 ? MVT::f32 6340 : MVT::f64; 6341 6342 if (Op.getOperand(0).getValueType() == MVT::i64) { 6343 SDValue SINT = Op.getOperand(0); 6344 // When converting to single-precision, we actually need to convert 6345 // to double-precision first and then round to single-precision. 6346 // To avoid double-rounding effects during that operation, we have 6347 // to prepare the input operand. Bits that might be truncated when 6348 // converting to double-precision are replaced by a bit that won't 6349 // be lost at this stage, but is below the single-precision rounding 6350 // position. 6351 // 6352 // However, if -enable-unsafe-fp-math is in effect, accept double 6353 // rounding to avoid the extra overhead. 6354 if (Op.getValueType() == MVT::f32 && 6355 !Subtarget.hasFPCVT() && 6356 !DAG.getTarget().Options.UnsafeFPMath) { 6357 6358 // Twiddle input to make sure the low 11 bits are zero. (If this 6359 // is the case, we are guaranteed the value will fit into the 53 bit 6360 // mantissa of an IEEE double-precision value without rounding.) 6361 // If any of those low 11 bits were not zero originally, make sure 6362 // bit 12 (value 2048) is set instead, so that the final rounding 6363 // to single-precision gets the correct result. 6364 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6365 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6366 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6367 Round, DAG.getConstant(2047, dl, MVT::i64)); 6368 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6369 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6370 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6371 6372 // However, we cannot use that value unconditionally: if the magnitude 6373 // of the input value is small, the bit-twiddling we did above might 6374 // end up visibly changing the output. Fortunately, in that case, we 6375 // don't need to twiddle bits since the original input will convert 6376 // exactly to double-precision floating-point already. Therefore, 6377 // construct a conditional to use the original value if the top 11 6378 // bits are all sign-bit copies, and use the rounded value computed 6379 // above otherwise. 6380 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6381 SINT, DAG.getConstant(53, dl, MVT::i32)); 6382 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6383 Cond, DAG.getConstant(1, dl, MVT::i64)); 6384 Cond = DAG.getSetCC(dl, MVT::i32, 6385 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6386 6387 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6388 } 6389 6390 ReuseLoadInfo RLI; 6391 SDValue Bits; 6392 6393 MachineFunction &MF = DAG.getMachineFunction(); 6394 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6395 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6396 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6397 RLI.Ranges); 6398 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6399 } else if (Subtarget.hasLFIWAX() && 6400 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6401 MachineMemOperand *MMO = 6402 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6403 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6404 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6405 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6406 DAG.getVTList(MVT::f64, MVT::Other), 6407 Ops, MVT::i32, MMO); 6408 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6409 } else if (Subtarget.hasFPCVT() && 6410 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6411 MachineMemOperand *MMO = 6412 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6413 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6414 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6415 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6416 DAG.getVTList(MVT::f64, MVT::Other), 6417 Ops, MVT::i32, MMO); 6418 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6419 } else if (((Subtarget.hasLFIWAX() && 6420 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6421 (Subtarget.hasFPCVT() && 6422 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6423 SINT.getOperand(0).getValueType() == MVT::i32) { 6424 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6425 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6426 6427 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6428 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6429 6430 SDValue Store = DAG.getStore( 6431 DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6432 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6433 false, false, 0); 6434 6435 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6436 "Expected an i32 store"); 6437 6438 RLI.Ptr = FIdx; 6439 RLI.Chain = Store; 6440 RLI.MPI = 6441 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6442 RLI.Alignment = 4; 6443 6444 MachineMemOperand *MMO = 6445 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6446 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6447 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6448 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6449 PPCISD::LFIWZX : PPCISD::LFIWAX, 6450 dl, DAG.getVTList(MVT::f64, MVT::Other), 6451 Ops, MVT::i32, MMO); 6452 } else 6453 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6454 6455 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6456 6457 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6458 FP = DAG.getNode(ISD::FP_ROUND, dl, 6459 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 6460 return FP; 6461 } 6462 6463 assert(Op.getOperand(0).getValueType() == MVT::i32 && 6464 "Unhandled INT_TO_FP type in custom expander!"); 6465 // Since we only generate this in 64-bit mode, we can take advantage of 6466 // 64-bit registers. In particular, sign extend the input value into the 6467 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 6468 // then lfd it and fcfid it. 6469 MachineFunction &MF = DAG.getMachineFunction(); 6470 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6471 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6472 6473 SDValue Ld; 6474 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 6475 ReuseLoadInfo RLI; 6476 bool ReusingLoad; 6477 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 6478 DAG))) { 6479 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6480 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6481 6482 SDValue Store = DAG.getStore( 6483 DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 6484 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6485 false, false, 0); 6486 6487 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6488 "Expected an i32 store"); 6489 6490 RLI.Ptr = FIdx; 6491 RLI.Chain = Store; 6492 RLI.MPI = 6493 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6494 RLI.Alignment = 4; 6495 } 6496 6497 MachineMemOperand *MMO = 6498 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6499 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6500 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6501 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 6502 PPCISD::LFIWZX : PPCISD::LFIWAX, 6503 dl, DAG.getVTList(MVT::f64, MVT::Other), 6504 Ops, MVT::i32, MMO); 6505 if (ReusingLoad) 6506 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 6507 } else { 6508 assert(Subtarget.isPPC64() && 6509 "i32->FP without LFIWAX supported only on PPC64"); 6510 6511 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 6512 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6513 6514 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 6515 Op.getOperand(0)); 6516 6517 // STD the extended value into the stack slot. 6518 SDValue Store = DAG.getStore( 6519 DAG.getEntryNode(), dl, Ext64, FIdx, 6520 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6521 false, false, 0); 6522 6523 // Load the value as a double. 6524 Ld = DAG.getLoad( 6525 MVT::f64, dl, Store, FIdx, 6526 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6527 false, false, false, 0); 6528 } 6529 6530 // FCFID it and return it. 6531 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 6532 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6533 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 6534 DAG.getIntPtrConstant(0, dl)); 6535 return FP; 6536 } 6537 6538 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6539 SelectionDAG &DAG) const { 6540 SDLoc dl(Op); 6541 /* 6542 The rounding mode is in bits 30:31 of FPSR, and has the following 6543 settings: 6544 00 Round to nearest 6545 01 Round to 0 6546 10 Round to +inf 6547 11 Round to -inf 6548 6549 FLT_ROUNDS, on the other hand, expects the following: 6550 -1 Undefined 6551 0 Round to 0 6552 1 Round to nearest 6553 2 Round to +inf 6554 3 Round to -inf 6555 6556 To perform the conversion, we do: 6557 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 6558 */ 6559 6560 MachineFunction &MF = DAG.getMachineFunction(); 6561 EVT VT = Op.getValueType(); 6562 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6563 6564 // Save FP Control Word to register 6565 EVT NodeTys[] = { 6566 MVT::f64, // return register 6567 MVT::Glue // unused in this context 6568 }; 6569 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 6570 6571 // Save FP register to stack slot 6572 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 6573 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 6574 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 6575 StackSlot, MachinePointerInfo(), false, false,0); 6576 6577 // Load FP Control Word from low 32 bits of stack slot. 6578 SDValue Four = DAG.getConstant(4, dl, PtrVT); 6579 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 6580 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 6581 false, false, false, 0); 6582 6583 // Transform as necessary 6584 SDValue CWD1 = 6585 DAG.getNode(ISD::AND, dl, MVT::i32, 6586 CWD, DAG.getConstant(3, dl, MVT::i32)); 6587 SDValue CWD2 = 6588 DAG.getNode(ISD::SRL, dl, MVT::i32, 6589 DAG.getNode(ISD::AND, dl, MVT::i32, 6590 DAG.getNode(ISD::XOR, dl, MVT::i32, 6591 CWD, DAG.getConstant(3, dl, MVT::i32)), 6592 DAG.getConstant(3, dl, MVT::i32)), 6593 DAG.getConstant(1, dl, MVT::i32)); 6594 6595 SDValue RetVal = 6596 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 6597 6598 return DAG.getNode((VT.getSizeInBits() < 16 ? 6599 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 6600 } 6601 6602 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6603 EVT VT = Op.getValueType(); 6604 unsigned BitWidth = VT.getSizeInBits(); 6605 SDLoc dl(Op); 6606 assert(Op.getNumOperands() == 3 && 6607 VT == Op.getOperand(1).getValueType() && 6608 "Unexpected SHL!"); 6609 6610 // Expand into a bunch of logical ops. Note that these ops 6611 // depend on the PPC behavior for oversized shift amounts. 6612 SDValue Lo = Op.getOperand(0); 6613 SDValue Hi = Op.getOperand(1); 6614 SDValue Amt = Op.getOperand(2); 6615 EVT AmtVT = Amt.getValueType(); 6616 6617 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6618 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6619 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 6620 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 6621 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 6622 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6623 DAG.getConstant(-BitWidth, dl, AmtVT)); 6624 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 6625 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6626 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 6627 SDValue OutOps[] = { OutLo, OutHi }; 6628 return DAG.getMergeValues(OutOps, dl); 6629 } 6630 6631 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6632 EVT VT = Op.getValueType(); 6633 SDLoc dl(Op); 6634 unsigned BitWidth = VT.getSizeInBits(); 6635 assert(Op.getNumOperands() == 3 && 6636 VT == Op.getOperand(1).getValueType() && 6637 "Unexpected SRL!"); 6638 6639 // Expand into a bunch of logical ops. Note that these ops 6640 // depend on the PPC behavior for oversized shift amounts. 6641 SDValue Lo = Op.getOperand(0); 6642 SDValue Hi = Op.getOperand(1); 6643 SDValue Amt = Op.getOperand(2); 6644 EVT AmtVT = Amt.getValueType(); 6645 6646 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6647 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6648 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6649 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6650 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6651 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6652 DAG.getConstant(-BitWidth, dl, AmtVT)); 6653 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 6654 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6655 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 6656 SDValue OutOps[] = { OutLo, OutHi }; 6657 return DAG.getMergeValues(OutOps, dl); 6658 } 6659 6660 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 6661 SDLoc dl(Op); 6662 EVT VT = Op.getValueType(); 6663 unsigned BitWidth = VT.getSizeInBits(); 6664 assert(Op.getNumOperands() == 3 && 6665 VT == Op.getOperand(1).getValueType() && 6666 "Unexpected SRA!"); 6667 6668 // Expand into a bunch of logical ops, followed by a select_cc. 6669 SDValue Lo = Op.getOperand(0); 6670 SDValue Hi = Op.getOperand(1); 6671 SDValue Amt = Op.getOperand(2); 6672 EVT AmtVT = Amt.getValueType(); 6673 6674 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6675 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6676 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6677 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6678 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6679 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6680 DAG.getConstant(-BitWidth, dl, AmtVT)); 6681 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 6682 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 6683 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 6684 Tmp4, Tmp6, ISD::SETLE); 6685 SDValue OutOps[] = { OutLo, OutHi }; 6686 return DAG.getMergeValues(OutOps, dl); 6687 } 6688 6689 //===----------------------------------------------------------------------===// 6690 // Vector related lowering. 6691 // 6692 6693 /// BuildSplatI - Build a canonical splati of Val with an element size of 6694 /// SplatSize. Cast the result to VT. 6695 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 6696 SelectionDAG &DAG, SDLoc dl) { 6697 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 6698 6699 static const MVT VTys[] = { // canonical VT to use for each size. 6700 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 6701 }; 6702 6703 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 6704 6705 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 6706 if (Val == -1) 6707 SplatSize = 1; 6708 6709 EVT CanonicalVT = VTys[SplatSize-1]; 6710 6711 // Build a canonical splat for this value. 6712 SDValue Elt = DAG.getConstant(Val, dl, MVT::i32); 6713 SmallVector<SDValue, 8> Ops; 6714 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 6715 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops); 6716 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 6717 } 6718 6719 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 6720 /// specified intrinsic ID. 6721 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 6722 SelectionDAG &DAG, SDLoc dl, 6723 EVT DestVT = MVT::Other) { 6724 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 6725 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6726 DAG.getConstant(IID, dl, MVT::i32), Op); 6727 } 6728 6729 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 6730 /// specified intrinsic ID. 6731 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 6732 SelectionDAG &DAG, SDLoc dl, 6733 EVT DestVT = MVT::Other) { 6734 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 6735 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6736 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 6737 } 6738 6739 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 6740 /// specified intrinsic ID. 6741 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 6742 SDValue Op2, SelectionDAG &DAG, 6743 SDLoc dl, EVT DestVT = MVT::Other) { 6744 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 6745 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6746 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 6747 } 6748 6749 6750 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 6751 /// amount. The result has the specified value type. 6752 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 6753 EVT VT, SelectionDAG &DAG, SDLoc dl) { 6754 // Force LHS/RHS to be the right type. 6755 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 6756 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 6757 6758 int Ops[16]; 6759 for (unsigned i = 0; i != 16; ++i) 6760 Ops[i] = i + Amt; 6761 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 6762 return DAG.getNode(ISD::BITCAST, dl, VT, T); 6763 } 6764 6765 // If this is a case we can't handle, return null and let the default 6766 // expansion code take care of it. If we CAN select this case, and if it 6767 // selects to a single instruction, return Op. Otherwise, if we can codegen 6768 // this case more efficiently than a constant pool load, lower it to the 6769 // sequence of ops that should be used. 6770 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 6771 SelectionDAG &DAG) const { 6772 SDLoc dl(Op); 6773 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6774 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 6775 6776 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 6777 // We first build an i32 vector, load it into a QPX register, 6778 // then convert it to a floating-point vector and compare it 6779 // to a zero vector to get the boolean result. 6780 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 6781 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 6782 MachinePointerInfo PtrInfo = 6783 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6784 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6785 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6786 6787 assert(BVN->getNumOperands() == 4 && 6788 "BUILD_VECTOR for v4i1 does not have 4 operands"); 6789 6790 bool IsConst = true; 6791 for (unsigned i = 0; i < 4; ++i) { 6792 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; 6793 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 6794 IsConst = false; 6795 break; 6796 } 6797 } 6798 6799 if (IsConst) { 6800 Constant *One = 6801 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 6802 Constant *NegOne = 6803 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 6804 6805 SmallVector<Constant*, 4> CV(4, NegOne); 6806 for (unsigned i = 0; i < 4; ++i) { 6807 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) 6808 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 6809 else if (cast<ConstantSDNode>(BVN->getOperand(i))-> 6810 getConstantIntValue()->isZero()) 6811 continue; 6812 else 6813 CV[i] = One; 6814 } 6815 6816 Constant *CP = ConstantVector::get(CV); 6817 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 6818 16 /* alignment */); 6819 6820 SmallVector<SDValue, 2> Ops; 6821 Ops.push_back(DAG.getEntryNode()); 6822 Ops.push_back(CPIdx); 6823 6824 SmallVector<EVT, 2> ValueVTs; 6825 ValueVTs.push_back(MVT::v4i1); 6826 ValueVTs.push_back(MVT::Other); // chain 6827 SDVTList VTs = DAG.getVTList(ValueVTs); 6828 6829 return DAG.getMemIntrinsicNode( 6830 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 6831 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 6832 } 6833 6834 SmallVector<SDValue, 4> Stores; 6835 for (unsigned i = 0; i < 4; ++i) { 6836 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; 6837 6838 unsigned Offset = 4*i; 6839 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 6840 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 6841 6842 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 6843 if (StoreSize > 4) { 6844 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 6845 BVN->getOperand(i), Idx, 6846 PtrInfo.getWithOffset(Offset), 6847 MVT::i32, false, false, 0)); 6848 } else { 6849 SDValue StoreValue = BVN->getOperand(i); 6850 if (StoreSize < 4) 6851 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 6852 6853 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 6854 StoreValue, Idx, 6855 PtrInfo.getWithOffset(Offset), 6856 false, false, 0)); 6857 } 6858 } 6859 6860 SDValue StoreChain; 6861 if (!Stores.empty()) 6862 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 6863 else 6864 StoreChain = DAG.getEntryNode(); 6865 6866 // Now load from v4i32 into the QPX register; this will extend it to 6867 // v4i64 but not yet convert it to a floating point. Nevertheless, this 6868 // is typed as v4f64 because the QPX register integer states are not 6869 // explicitly represented. 6870 6871 SmallVector<SDValue, 2> Ops; 6872 Ops.push_back(StoreChain); 6873 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32)); 6874 Ops.push_back(FIdx); 6875 6876 SmallVector<EVT, 2> ValueVTs; 6877 ValueVTs.push_back(MVT::v4f64); 6878 ValueVTs.push_back(MVT::Other); // chain 6879 SDVTList VTs = DAG.getVTList(ValueVTs); 6880 6881 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 6882 dl, VTs, Ops, MVT::v4i32, PtrInfo); 6883 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 6884 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 6885 LoadedVect); 6886 6887 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::f64); 6888 FPZeros = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 6889 FPZeros, FPZeros, FPZeros, FPZeros); 6890 6891 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 6892 } 6893 6894 // All other QPX vectors are handled by generic code. 6895 if (Subtarget.hasQPX()) 6896 return SDValue(); 6897 6898 // Check if this is a splat of a constant value. 6899 APInt APSplatBits, APSplatUndef; 6900 unsigned SplatBitSize; 6901 bool HasAnyUndefs; 6902 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 6903 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 6904 SplatBitSize > 32) 6905 return SDValue(); 6906 6907 unsigned SplatBits = APSplatBits.getZExtValue(); 6908 unsigned SplatUndef = APSplatUndef.getZExtValue(); 6909 unsigned SplatSize = SplatBitSize / 8; 6910 6911 // First, handle single instruction cases. 6912 6913 // All zeros? 6914 if (SplatBits == 0) { 6915 // Canonicalize all zero vectors to be v4i32. 6916 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 6917 SDValue Z = DAG.getConstant(0, dl, MVT::i32); 6918 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 6919 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 6920 } 6921 return Op; 6922 } 6923 6924 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 6925 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 6926 (32-SplatBitSize)); 6927 if (SextVal >= -16 && SextVal <= 15) 6928 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 6929 6930 6931 // Two instruction sequences. 6932 6933 // If this value is in the range [-32,30] and is even, use: 6934 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 6935 // If this value is in the range [17,31] and is odd, use: 6936 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 6937 // If this value is in the range [-31,-17] and is odd, use: 6938 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 6939 // Note the last two are three-instruction sequences. 6940 if (SextVal >= -32 && SextVal <= 31) { 6941 // To avoid having these optimizations undone by constant folding, 6942 // we convert to a pseudo that will be expanded later into one of 6943 // the above forms. 6944 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 6945 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 6946 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 6947 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 6948 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 6949 if (VT == Op.getValueType()) 6950 return RetVal; 6951 else 6952 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 6953 } 6954 6955 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 6956 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 6957 // for fneg/fabs. 6958 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 6959 // Make -1 and vspltisw -1: 6960 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 6961 6962 // Make the VSLW intrinsic, computing 0x8000_0000. 6963 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 6964 OnesV, DAG, dl); 6965 6966 // xor by OnesV to invert it. 6967 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 6968 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6969 } 6970 6971 // Check to see if this is a wide variety of vsplti*, binop self cases. 6972 static const signed char SplatCsts[] = { 6973 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 6974 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 6975 }; 6976 6977 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 6978 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 6979 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 6980 int i = SplatCsts[idx]; 6981 6982 // Figure out what shift amount will be used by altivec if shifted by i in 6983 // this splat size. 6984 unsigned TypeShiftAmt = i & (SplatBitSize-1); 6985 6986 // vsplti + shl self. 6987 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 6988 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6989 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6990 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 6991 Intrinsic::ppc_altivec_vslw 6992 }; 6993 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6994 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6995 } 6996 6997 // vsplti + srl self. 6998 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 6999 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7000 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7001 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7002 Intrinsic::ppc_altivec_vsrw 7003 }; 7004 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7005 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7006 } 7007 7008 // vsplti + sra self. 7009 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7010 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7011 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7012 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7013 Intrinsic::ppc_altivec_vsraw 7014 }; 7015 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7016 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7017 } 7018 7019 // vsplti + rol self. 7020 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7021 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7022 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7023 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7024 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7025 Intrinsic::ppc_altivec_vrlw 7026 }; 7027 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7028 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7029 } 7030 7031 // t = vsplti c, result = vsldoi t, t, 1 7032 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7033 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7034 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7035 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7036 } 7037 // t = vsplti c, result = vsldoi t, t, 2 7038 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7039 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7040 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7041 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7042 } 7043 // t = vsplti c, result = vsldoi t, t, 3 7044 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7045 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7046 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7047 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7048 } 7049 } 7050 7051 return SDValue(); 7052 } 7053 7054 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7055 /// the specified operations to build the shuffle. 7056 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7057 SDValue RHS, SelectionDAG &DAG, 7058 SDLoc dl) { 7059 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7060 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7061 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7062 7063 enum { 7064 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7065 OP_VMRGHW, 7066 OP_VMRGLW, 7067 OP_VSPLTISW0, 7068 OP_VSPLTISW1, 7069 OP_VSPLTISW2, 7070 OP_VSPLTISW3, 7071 OP_VSLDOI4, 7072 OP_VSLDOI8, 7073 OP_VSLDOI12 7074 }; 7075 7076 if (OpNum == OP_COPY) { 7077 if (LHSID == (1*9+2)*9+3) return LHS; 7078 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7079 return RHS; 7080 } 7081 7082 SDValue OpLHS, OpRHS; 7083 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7084 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7085 7086 int ShufIdxs[16]; 7087 switch (OpNum) { 7088 default: llvm_unreachable("Unknown i32 permute!"); 7089 case OP_VMRGHW: 7090 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7091 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7092 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7093 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7094 break; 7095 case OP_VMRGLW: 7096 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7097 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7098 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7099 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7100 break; 7101 case OP_VSPLTISW0: 7102 for (unsigned i = 0; i != 16; ++i) 7103 ShufIdxs[i] = (i&3)+0; 7104 break; 7105 case OP_VSPLTISW1: 7106 for (unsigned i = 0; i != 16; ++i) 7107 ShufIdxs[i] = (i&3)+4; 7108 break; 7109 case OP_VSPLTISW2: 7110 for (unsigned i = 0; i != 16; ++i) 7111 ShufIdxs[i] = (i&3)+8; 7112 break; 7113 case OP_VSPLTISW3: 7114 for (unsigned i = 0; i != 16; ++i) 7115 ShufIdxs[i] = (i&3)+12; 7116 break; 7117 case OP_VSLDOI4: 7118 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7119 case OP_VSLDOI8: 7120 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7121 case OP_VSLDOI12: 7122 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7123 } 7124 EVT VT = OpLHS.getValueType(); 7125 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7126 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7127 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7128 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7129 } 7130 7131 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7132 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7133 /// return the code it can be lowered into. Worst case, it can always be 7134 /// lowered into a vperm. 7135 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7136 SelectionDAG &DAG) const { 7137 SDLoc dl(Op); 7138 SDValue V1 = Op.getOperand(0); 7139 SDValue V2 = Op.getOperand(1); 7140 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7141 EVT VT = Op.getValueType(); 7142 bool isLittleEndian = Subtarget.isLittleEndian(); 7143 7144 if (Subtarget.hasQPX()) { 7145 if (VT.getVectorNumElements() != 4) 7146 return SDValue(); 7147 7148 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 7149 7150 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 7151 if (AlignIdx != -1) { 7152 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7153 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7154 } else if (SVOp->isSplat()) { 7155 int SplatIdx = SVOp->getSplatIndex(); 7156 if (SplatIdx >= 4) { 7157 std::swap(V1, V2); 7158 SplatIdx -= 4; 7159 } 7160 7161 // FIXME: If SplatIdx == 0 and the input came from a load, then there is 7162 // nothing to do. 7163 7164 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7165 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7166 } 7167 7168 // Lower this into a qvgpci/qvfperm pair. 7169 7170 // Compute the qvgpci literal 7171 unsigned idx = 0; 7172 for (unsigned i = 0; i < 4; ++i) { 7173 int m = SVOp->getMaskElt(i); 7174 unsigned mm = m >= 0 ? (unsigned) m : i; 7175 idx |= mm << (3-i)*3; 7176 } 7177 7178 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7179 DAG.getConstant(idx, dl, MVT::i32)); 7180 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7181 } 7182 7183 // Cases that are handled by instructions that take permute immediates 7184 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7185 // selected by the instruction selector. 7186 if (V2.getOpcode() == ISD::UNDEF) { 7187 if (PPC::isSplatShuffleMask(SVOp, 1) || 7188 PPC::isSplatShuffleMask(SVOp, 2) || 7189 PPC::isSplatShuffleMask(SVOp, 4) || 7190 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7191 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7192 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7193 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7194 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7195 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7196 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7197 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7198 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7199 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 7200 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 7201 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)) { 7202 return Op; 7203 } 7204 } 7205 7206 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7207 // and produce a fixed permutation. If any of these match, do not lower to 7208 // VPERM. 7209 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7210 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7211 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7212 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7213 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7214 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7215 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7216 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7217 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7218 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7219 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7220 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 7221 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)) 7222 return Op; 7223 7224 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7225 // perfect shuffle table to emit an optimal matching sequence. 7226 ArrayRef<int> PermMask = SVOp->getMask(); 7227 7228 unsigned PFIndexes[4]; 7229 bool isFourElementShuffle = true; 7230 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7231 unsigned EltNo = 8; // Start out undef. 7232 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7233 if (PermMask[i*4+j] < 0) 7234 continue; // Undef, ignore it. 7235 7236 unsigned ByteSource = PermMask[i*4+j]; 7237 if ((ByteSource & 3) != j) { 7238 isFourElementShuffle = false; 7239 break; 7240 } 7241 7242 if (EltNo == 8) { 7243 EltNo = ByteSource/4; 7244 } else if (EltNo != ByteSource/4) { 7245 isFourElementShuffle = false; 7246 break; 7247 } 7248 } 7249 PFIndexes[i] = EltNo; 7250 } 7251 7252 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7253 // perfect shuffle vector to determine if it is cost effective to do this as 7254 // discrete instructions, or whether we should use a vperm. 7255 // For now, we skip this for little endian until such time as we have a 7256 // little-endian perfect shuffle table. 7257 if (isFourElementShuffle && !isLittleEndian) { 7258 // Compute the index in the perfect shuffle table. 7259 unsigned PFTableIndex = 7260 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7261 7262 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7263 unsigned Cost = (PFEntry >> 30); 7264 7265 // Determining when to avoid vperm is tricky. Many things affect the cost 7266 // of vperm, particularly how many times the perm mask needs to be computed. 7267 // For example, if the perm mask can be hoisted out of a loop or is already 7268 // used (perhaps because there are multiple permutes with the same shuffle 7269 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7270 // the loop requires an extra register. 7271 // 7272 // As a compromise, we only emit discrete instructions if the shuffle can be 7273 // generated in 3 or fewer operations. When we have loop information 7274 // available, if this block is within a loop, we should avoid using vperm 7275 // for 3-operation perms and use a constant pool load instead. 7276 if (Cost < 3) 7277 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7278 } 7279 7280 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7281 // vector that will get spilled to the constant pool. 7282 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 7283 7284 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7285 // that it is in input element units, not in bytes. Convert now. 7286 7287 // For little endian, the order of the input vectors is reversed, and 7288 // the permutation mask is complemented with respect to 31. This is 7289 // necessary to produce proper semantics with the big-endian-biased vperm 7290 // instruction. 7291 EVT EltVT = V1.getValueType().getVectorElementType(); 7292 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7293 7294 SmallVector<SDValue, 16> ResultMask; 7295 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7296 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7297 7298 for (unsigned j = 0; j != BytesPerElement; ++j) 7299 if (isLittleEndian) 7300 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7301 dl, MVT::i32)); 7302 else 7303 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7304 MVT::i32)); 7305 } 7306 7307 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 7308 ResultMask); 7309 if (isLittleEndian) 7310 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7311 V2, V1, VPermMask); 7312 else 7313 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7314 V1, V2, VPermMask); 7315 } 7316 7317 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 7318 /// altivec comparison. If it is, return true and fill in Opc/isDot with 7319 /// information about the intrinsic. 7320 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 7321 bool &isDot, const PPCSubtarget &Subtarget) { 7322 unsigned IntrinsicID = 7323 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7324 CompareOpc = -1; 7325 isDot = false; 7326 switch (IntrinsicID) { 7327 default: return false; 7328 // Comparison predicates. 7329 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 7330 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 7331 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 7332 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 7333 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 7334 case Intrinsic::ppc_altivec_vcmpequd_p: 7335 if (Subtarget.hasP8Altivec()) { 7336 CompareOpc = 199; 7337 isDot = 1; 7338 } 7339 else 7340 return false; 7341 7342 break; 7343 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 7344 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 7345 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 7346 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 7347 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 7348 case Intrinsic::ppc_altivec_vcmpgtsd_p: 7349 if (Subtarget.hasP8Altivec()) { 7350 CompareOpc = 967; 7351 isDot = 1; 7352 } 7353 else 7354 return false; 7355 7356 break; 7357 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 7358 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 7359 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 7360 case Intrinsic::ppc_altivec_vcmpgtud_p: 7361 if (Subtarget.hasP8Altivec()) { 7362 CompareOpc = 711; 7363 isDot = 1; 7364 } 7365 else 7366 return false; 7367 7368 break; 7369 7370 // Normal Comparisons. 7371 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 7372 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 7373 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 7374 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 7375 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 7376 case Intrinsic::ppc_altivec_vcmpequd: 7377 if (Subtarget.hasP8Altivec()) { 7378 CompareOpc = 199; 7379 isDot = 0; 7380 } 7381 else 7382 return false; 7383 7384 break; 7385 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 7386 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 7387 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 7388 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 7389 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 7390 case Intrinsic::ppc_altivec_vcmpgtsd: 7391 if (Subtarget.hasP8Altivec()) { 7392 CompareOpc = 967; 7393 isDot = 0; 7394 } 7395 else 7396 return false; 7397 7398 break; 7399 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 7400 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 7401 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 7402 case Intrinsic::ppc_altivec_vcmpgtud: 7403 if (Subtarget.hasP8Altivec()) { 7404 CompareOpc = 711; 7405 isDot = 0; 7406 } 7407 else 7408 return false; 7409 7410 break; 7411 } 7412 return true; 7413 } 7414 7415 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 7416 /// lower, do it, otherwise return null. 7417 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 7418 SelectionDAG &DAG) const { 7419 // If this is a lowered altivec predicate compare, CompareOpc is set to the 7420 // opcode number of the comparison. 7421 SDLoc dl(Op); 7422 int CompareOpc; 7423 bool isDot; 7424 if (!getAltivecCompareInfo(Op, CompareOpc, isDot, Subtarget)) 7425 return SDValue(); // Don't custom lower most intrinsics. 7426 7427 // If this is a non-dot comparison, make the VCMP node and we are done. 7428 if (!isDot) { 7429 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 7430 Op.getOperand(1), Op.getOperand(2), 7431 DAG.getConstant(CompareOpc, dl, MVT::i32)); 7432 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 7433 } 7434 7435 // Create the PPCISD altivec 'dot' comparison node. 7436 SDValue Ops[] = { 7437 Op.getOperand(2), // LHS 7438 Op.getOperand(3), // RHS 7439 DAG.getConstant(CompareOpc, dl, MVT::i32) 7440 }; 7441 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 7442 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 7443 7444 // Now that we have the comparison, emit a copy from the CR to a GPR. 7445 // This is flagged to the above dot comparison. 7446 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 7447 DAG.getRegister(PPC::CR6, MVT::i32), 7448 CompNode.getValue(1)); 7449 7450 // Unpack the result based on how the target uses it. 7451 unsigned BitNo; // Bit # of CR6. 7452 bool InvertBit; // Invert result? 7453 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 7454 default: // Can't happen, don't crash on invalid number though. 7455 case 0: // Return the value of the EQ bit of CR6. 7456 BitNo = 0; InvertBit = false; 7457 break; 7458 case 1: // Return the inverted value of the EQ bit of CR6. 7459 BitNo = 0; InvertBit = true; 7460 break; 7461 case 2: // Return the value of the LT bit of CR6. 7462 BitNo = 2; InvertBit = false; 7463 break; 7464 case 3: // Return the inverted value of the LT bit of CR6. 7465 BitNo = 2; InvertBit = true; 7466 break; 7467 } 7468 7469 // Shift the bit into the low position. 7470 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 7471 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 7472 // Isolate the bit. 7473 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 7474 DAG.getConstant(1, dl, MVT::i32)); 7475 7476 // If we are supposed to, toggle the bit. 7477 if (InvertBit) 7478 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 7479 DAG.getConstant(1, dl, MVT::i32)); 7480 return Flags; 7481 } 7482 7483 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 7484 SelectionDAG &DAG) const { 7485 SDLoc dl(Op); 7486 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 7487 // instructions), but for smaller types, we need to first extend up to v2i32 7488 // before doing going farther. 7489 if (Op.getValueType() == MVT::v2i64) { 7490 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 7491 if (ExtVT != MVT::v2i32) { 7492 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 7493 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 7494 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 7495 ExtVT.getVectorElementType(), 4))); 7496 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 7497 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 7498 DAG.getValueType(MVT::v2i32)); 7499 } 7500 7501 return Op; 7502 } 7503 7504 return SDValue(); 7505 } 7506 7507 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 7508 SelectionDAG &DAG) const { 7509 SDLoc dl(Op); 7510 // Create a stack slot that is 16-byte aligned. 7511 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7512 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7513 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7514 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7515 7516 // Store the input value into Value#0 of the stack slot. 7517 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 7518 Op.getOperand(0), FIdx, MachinePointerInfo(), 7519 false, false, 0); 7520 // Load it out. 7521 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 7522 false, false, false, 0); 7523 } 7524 7525 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7526 SelectionDAG &DAG) const { 7527 SDLoc dl(Op); 7528 SDNode *N = Op.getNode(); 7529 7530 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 7531 "Unknown extract_vector_elt type"); 7532 7533 SDValue Value = N->getOperand(0); 7534 7535 // The first part of this is like the store lowering except that we don't 7536 // need to track the chain. 7537 7538 // The values are now known to be -1 (false) or 1 (true). To convert this 7539 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7540 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7541 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7542 7543 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7544 // understand how to form the extending load. 7545 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 7546 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 7547 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 7548 7549 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7550 7551 // Now convert to an integer and store. 7552 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7553 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7554 Value); 7555 7556 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7557 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7558 MachinePointerInfo PtrInfo = 7559 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7560 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7561 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7562 7563 SDValue StoreChain = DAG.getEntryNode(); 7564 SmallVector<SDValue, 2> Ops; 7565 Ops.push_back(StoreChain); 7566 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7567 Ops.push_back(Value); 7568 Ops.push_back(FIdx); 7569 7570 SmallVector<EVT, 2> ValueVTs; 7571 ValueVTs.push_back(MVT::Other); // chain 7572 SDVTList VTs = DAG.getVTList(ValueVTs); 7573 7574 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7575 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7576 7577 // Extract the value requested. 7578 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7579 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7580 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7581 7582 SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7583 PtrInfo.getWithOffset(Offset), 7584 false, false, false, 0); 7585 7586 if (!Subtarget.useCRBits()) 7587 return IntVal; 7588 7589 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 7590 } 7591 7592 /// Lowering for QPX v4i1 loads 7593 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 7594 SelectionDAG &DAG) const { 7595 SDLoc dl(Op); 7596 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 7597 SDValue LoadChain = LN->getChain(); 7598 SDValue BasePtr = LN->getBasePtr(); 7599 7600 if (Op.getValueType() == MVT::v4f64 || 7601 Op.getValueType() == MVT::v4f32) { 7602 EVT MemVT = LN->getMemoryVT(); 7603 unsigned Alignment = LN->getAlignment(); 7604 7605 // If this load is properly aligned, then it is legal. 7606 if (Alignment >= MemVT.getStoreSize()) 7607 return Op; 7608 7609 EVT ScalarVT = Op.getValueType().getScalarType(), 7610 ScalarMemVT = MemVT.getScalarType(); 7611 unsigned Stride = ScalarMemVT.getStoreSize(); 7612 7613 SmallVector<SDValue, 8> Vals, LoadChains; 7614 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7615 SDValue Load; 7616 if (ScalarVT != ScalarMemVT) 7617 Load = 7618 DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 7619 BasePtr, 7620 LN->getPointerInfo().getWithOffset(Idx*Stride), 7621 ScalarMemVT, LN->isVolatile(), LN->isNonTemporal(), 7622 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7623 LN->getAAInfo()); 7624 else 7625 Load = 7626 DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 7627 LN->getPointerInfo().getWithOffset(Idx*Stride), 7628 LN->isVolatile(), LN->isNonTemporal(), 7629 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7630 LN->getAAInfo()); 7631 7632 if (Idx == 0 && LN->isIndexed()) { 7633 assert(LN->getAddressingMode() == ISD::PRE_INC && 7634 "Unknown addressing mode on vector load"); 7635 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 7636 LN->getAddressingMode()); 7637 } 7638 7639 Vals.push_back(Load); 7640 LoadChains.push_back(Load.getValue(1)); 7641 7642 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7643 DAG.getConstant(Stride, dl, 7644 BasePtr.getValueType())); 7645 } 7646 7647 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7648 SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, 7649 Op.getValueType(), Vals); 7650 7651 if (LN->isIndexed()) { 7652 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 7653 return DAG.getMergeValues(RetOps, dl); 7654 } 7655 7656 SDValue RetOps[] = { Value, TF }; 7657 return DAG.getMergeValues(RetOps, dl); 7658 } 7659 7660 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 7661 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 7662 7663 // To lower v4i1 from a byte array, we load the byte elements of the 7664 // vector and then reuse the BUILD_VECTOR logic. 7665 7666 SmallVector<SDValue, 4> VectElmts, VectElmtChains; 7667 for (unsigned i = 0; i < 4; ++i) { 7668 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7669 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7670 7671 VectElmts.push_back(DAG.getExtLoad(ISD::EXTLOAD, 7672 dl, MVT::i32, LoadChain, Idx, 7673 LN->getPointerInfo().getWithOffset(i), 7674 MVT::i8 /* memory type */, 7675 LN->isVolatile(), LN->isNonTemporal(), 7676 LN->isInvariant(), 7677 1 /* alignment */, LN->getAAInfo())); 7678 VectElmtChains.push_back(VectElmts[i].getValue(1)); 7679 } 7680 7681 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 7682 SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i1, VectElmts); 7683 7684 SDValue RVals[] = { Value, LoadChain }; 7685 return DAG.getMergeValues(RVals, dl); 7686 } 7687 7688 /// Lowering for QPX v4i1 stores 7689 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 7690 SelectionDAG &DAG) const { 7691 SDLoc dl(Op); 7692 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 7693 SDValue StoreChain = SN->getChain(); 7694 SDValue BasePtr = SN->getBasePtr(); 7695 SDValue Value = SN->getValue(); 7696 7697 if (Value.getValueType() == MVT::v4f64 || 7698 Value.getValueType() == MVT::v4f32) { 7699 EVT MemVT = SN->getMemoryVT(); 7700 unsigned Alignment = SN->getAlignment(); 7701 7702 // If this store is properly aligned, then it is legal. 7703 if (Alignment >= MemVT.getStoreSize()) 7704 return Op; 7705 7706 EVT ScalarVT = Value.getValueType().getScalarType(), 7707 ScalarMemVT = MemVT.getScalarType(); 7708 unsigned Stride = ScalarMemVT.getStoreSize(); 7709 7710 SmallVector<SDValue, 8> Stores; 7711 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7712 SDValue Ex = DAG.getNode( 7713 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 7714 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 7715 SDValue Store; 7716 if (ScalarVT != ScalarMemVT) 7717 Store = 7718 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 7719 SN->getPointerInfo().getWithOffset(Idx*Stride), 7720 ScalarMemVT, SN->isVolatile(), SN->isNonTemporal(), 7721 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7722 else 7723 Store = 7724 DAG.getStore(StoreChain, dl, Ex, BasePtr, 7725 SN->getPointerInfo().getWithOffset(Idx*Stride), 7726 SN->isVolatile(), SN->isNonTemporal(), 7727 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7728 7729 if (Idx == 0 && SN->isIndexed()) { 7730 assert(SN->getAddressingMode() == ISD::PRE_INC && 7731 "Unknown addressing mode on vector store"); 7732 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 7733 SN->getAddressingMode()); 7734 } 7735 7736 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7737 DAG.getConstant(Stride, dl, 7738 BasePtr.getValueType())); 7739 Stores.push_back(Store); 7740 } 7741 7742 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7743 7744 if (SN->isIndexed()) { 7745 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 7746 return DAG.getMergeValues(RetOps, dl); 7747 } 7748 7749 return TF; 7750 } 7751 7752 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 7753 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 7754 7755 // The values are now known to be -1 (false) or 1 (true). To convert this 7756 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7757 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7758 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7759 7760 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7761 // understand how to form the extending load. 7762 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 7763 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 7764 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 7765 7766 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7767 7768 // Now convert to an integer and store. 7769 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7770 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7771 Value); 7772 7773 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7774 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7775 MachinePointerInfo PtrInfo = 7776 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7777 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7778 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7779 7780 SmallVector<SDValue, 2> Ops; 7781 Ops.push_back(StoreChain); 7782 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7783 Ops.push_back(Value); 7784 Ops.push_back(FIdx); 7785 7786 SmallVector<EVT, 2> ValueVTs; 7787 ValueVTs.push_back(MVT::Other); // chain 7788 SDVTList VTs = DAG.getVTList(ValueVTs); 7789 7790 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7791 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7792 7793 // Move data into the byte array. 7794 SmallVector<SDValue, 4> Loads, LoadChains; 7795 for (unsigned i = 0; i < 4; ++i) { 7796 unsigned Offset = 4*i; 7797 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7798 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7799 7800 Loads.push_back(DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7801 PtrInfo.getWithOffset(Offset), 7802 false, false, false, 0)); 7803 LoadChains.push_back(Loads[i].getValue(1)); 7804 } 7805 7806 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7807 7808 SmallVector<SDValue, 4> Stores; 7809 for (unsigned i = 0; i < 4; ++i) { 7810 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7811 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7812 7813 Stores.push_back(DAG.getTruncStore(StoreChain, dl, Loads[i], Idx, 7814 SN->getPointerInfo().getWithOffset(i), 7815 MVT::i8 /* memory type */, 7816 SN->isNonTemporal(), SN->isVolatile(), 7817 1 /* alignment */, SN->getAAInfo())); 7818 } 7819 7820 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7821 7822 return StoreChain; 7823 } 7824 7825 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 7826 SDLoc dl(Op); 7827 if (Op.getValueType() == MVT::v4i32) { 7828 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7829 7830 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 7831 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 7832 7833 SDValue RHSSwap = // = vrlw RHS, 16 7834 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 7835 7836 // Shrinkify inputs to v8i16. 7837 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 7838 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 7839 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 7840 7841 // Low parts multiplied together, generating 32-bit results (we ignore the 7842 // top parts). 7843 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 7844 LHS, RHS, DAG, dl, MVT::v4i32); 7845 7846 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 7847 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 7848 // Shift the high parts up 16 bits. 7849 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 7850 Neg16, DAG, dl); 7851 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 7852 } else if (Op.getValueType() == MVT::v8i16) { 7853 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7854 7855 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 7856 7857 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 7858 LHS, RHS, Zero, DAG, dl); 7859 } else if (Op.getValueType() == MVT::v16i8) { 7860 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7861 bool isLittleEndian = Subtarget.isLittleEndian(); 7862 7863 // Multiply the even 8-bit parts, producing 16-bit sums. 7864 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 7865 LHS, RHS, DAG, dl, MVT::v8i16); 7866 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 7867 7868 // Multiply the odd 8-bit parts, producing 16-bit sums. 7869 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 7870 LHS, RHS, DAG, dl, MVT::v8i16); 7871 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 7872 7873 // Merge the results together. Because vmuleub and vmuloub are 7874 // instructions with a big-endian bias, we must reverse the 7875 // element numbering and reverse the meaning of "odd" and "even" 7876 // when generating little endian code. 7877 int Ops[16]; 7878 for (unsigned i = 0; i != 8; ++i) { 7879 if (isLittleEndian) { 7880 Ops[i*2 ] = 2*i; 7881 Ops[i*2+1] = 2*i+16; 7882 } else { 7883 Ops[i*2 ] = 2*i+1; 7884 Ops[i*2+1] = 2*i+1+16; 7885 } 7886 } 7887 if (isLittleEndian) 7888 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 7889 else 7890 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 7891 } else { 7892 llvm_unreachable("Unknown mul to lower!"); 7893 } 7894 } 7895 7896 /// LowerOperation - Provide custom lowering hooks for some operations. 7897 /// 7898 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7899 switch (Op.getOpcode()) { 7900 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 7901 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 7902 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 7903 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 7904 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 7905 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 7906 case ISD::SETCC: return LowerSETCC(Op, DAG); 7907 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 7908 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 7909 case ISD::VASTART: 7910 return LowerVASTART(Op, DAG, Subtarget); 7911 7912 case ISD::VAARG: 7913 return LowerVAARG(Op, DAG, Subtarget); 7914 7915 case ISD::VACOPY: 7916 return LowerVACOPY(Op, DAG, Subtarget); 7917 7918 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget); 7919 case ISD::DYNAMIC_STACKALLOC: 7920 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget); 7921 7922 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 7923 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 7924 7925 case ISD::LOAD: return LowerLOAD(Op, DAG); 7926 case ISD::STORE: return LowerSTORE(Op, DAG); 7927 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 7928 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 7929 case ISD::FP_TO_UINT: 7930 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 7931 SDLoc(Op)); 7932 case ISD::UINT_TO_FP: 7933 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 7934 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 7935 7936 // Lower 64-bit shifts. 7937 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 7938 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 7939 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 7940 7941 // Vector-related lowering. 7942 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 7943 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 7944 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 7945 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 7946 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 7947 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 7948 case ISD::MUL: return LowerMUL(Op, DAG); 7949 7950 // For counter-based loop handling. 7951 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 7952 7953 // Frame & Return address. 7954 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 7955 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 7956 } 7957 } 7958 7959 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 7960 SmallVectorImpl<SDValue>&Results, 7961 SelectionDAG &DAG) const { 7962 SDLoc dl(N); 7963 switch (N->getOpcode()) { 7964 default: 7965 llvm_unreachable("Do not know how to custom type legalize this operation!"); 7966 case ISD::READCYCLECOUNTER: { 7967 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 7968 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 7969 7970 Results.push_back(RTB); 7971 Results.push_back(RTB.getValue(1)); 7972 Results.push_back(RTB.getValue(2)); 7973 break; 7974 } 7975 case ISD::INTRINSIC_W_CHAIN: { 7976 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 7977 Intrinsic::ppc_is_decremented_ctr_nonzero) 7978 break; 7979 7980 assert(N->getValueType(0) == MVT::i1 && 7981 "Unexpected result type for CTR decrement intrinsic"); 7982 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 7983 N->getValueType(0)); 7984 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 7985 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 7986 N->getOperand(1)); 7987 7988 Results.push_back(NewInt); 7989 Results.push_back(NewInt.getValue(1)); 7990 break; 7991 } 7992 case ISD::VAARG: { 7993 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 7994 return; 7995 7996 EVT VT = N->getValueType(0); 7997 7998 if (VT == MVT::i64) { 7999 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget); 8000 8001 Results.push_back(NewNode); 8002 Results.push_back(NewNode.getValue(1)); 8003 } 8004 return; 8005 } 8006 case ISD::FP_ROUND_INREG: { 8007 assert(N->getValueType(0) == MVT::ppcf128); 8008 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 8009 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8010 MVT::f64, N->getOperand(0), 8011 DAG.getIntPtrConstant(0, dl)); 8012 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8013 MVT::f64, N->getOperand(0), 8014 DAG.getIntPtrConstant(1, dl)); 8015 8016 // Add the two halves of the long double in round-to-zero mode. 8017 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 8018 8019 // We know the low half is about to be thrown away, so just use something 8020 // convenient. 8021 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 8022 FPreg, FPreg)); 8023 return; 8024 } 8025 case ISD::FP_TO_SINT: 8026 case ISD::FP_TO_UINT: 8027 // LowerFP_TO_INT() can only handle f32 and f64. 8028 if (N->getOperand(0).getValueType() == MVT::ppcf128) 8029 return; 8030 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 8031 return; 8032 } 8033 } 8034 8035 8036 //===----------------------------------------------------------------------===// 8037 // Other Lowering Code 8038 //===----------------------------------------------------------------------===// 8039 8040 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 8041 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 8042 Function *Func = Intrinsic::getDeclaration(M, Id); 8043 return Builder.CreateCall(Func, {}); 8044 } 8045 8046 // The mappings for emitLeading/TrailingFence is taken from 8047 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 8048 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 8049 AtomicOrdering Ord, bool IsStore, 8050 bool IsLoad) const { 8051 if (Ord == SequentiallyConsistent) 8052 return callIntrinsic(Builder, Intrinsic::ppc_sync); 8053 if (isAtLeastRelease(Ord)) 8054 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8055 return nullptr; 8056 } 8057 8058 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 8059 AtomicOrdering Ord, bool IsStore, 8060 bool IsLoad) const { 8061 if (IsLoad && isAtLeastAcquire(Ord)) 8062 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8063 // FIXME: this is too conservative, a dependent branch + isync is enough. 8064 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 8065 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 8066 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 8067 return nullptr; 8068 } 8069 8070 MachineBasicBlock * 8071 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 8072 unsigned AtomicSize, 8073 unsigned BinOpcode) const { 8074 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8075 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8076 8077 auto LoadMnemonic = PPC::LDARX; 8078 auto StoreMnemonic = PPC::STDCX; 8079 switch (AtomicSize) { 8080 default: 8081 llvm_unreachable("Unexpected size of atomic entity"); 8082 case 1: 8083 LoadMnemonic = PPC::LBARX; 8084 StoreMnemonic = PPC::STBCX; 8085 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8086 break; 8087 case 2: 8088 LoadMnemonic = PPC::LHARX; 8089 StoreMnemonic = PPC::STHCX; 8090 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8091 break; 8092 case 4: 8093 LoadMnemonic = PPC::LWARX; 8094 StoreMnemonic = PPC::STWCX; 8095 break; 8096 case 8: 8097 LoadMnemonic = PPC::LDARX; 8098 StoreMnemonic = PPC::STDCX; 8099 break; 8100 } 8101 8102 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8103 MachineFunction *F = BB->getParent(); 8104 MachineFunction::iterator It = BB; 8105 ++It; 8106 8107 unsigned dest = MI->getOperand(0).getReg(); 8108 unsigned ptrA = MI->getOperand(1).getReg(); 8109 unsigned ptrB = MI->getOperand(2).getReg(); 8110 unsigned incr = MI->getOperand(3).getReg(); 8111 DebugLoc dl = MI->getDebugLoc(); 8112 8113 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8114 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8115 F->insert(It, loopMBB); 8116 F->insert(It, exitMBB); 8117 exitMBB->splice(exitMBB->begin(), BB, 8118 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8119 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8120 8121 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8122 unsigned TmpReg = (!BinOpcode) ? incr : 8123 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 8124 : &PPC::GPRCRegClass); 8125 8126 // thisMBB: 8127 // ... 8128 // fallthrough --> loopMBB 8129 BB->addSuccessor(loopMBB); 8130 8131 // loopMBB: 8132 // l[wd]arx dest, ptr 8133 // add r0, dest, incr 8134 // st[wd]cx. r0, ptr 8135 // bne- loopMBB 8136 // fallthrough --> exitMBB 8137 BB = loopMBB; 8138 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8139 .addReg(ptrA).addReg(ptrB); 8140 if (BinOpcode) 8141 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 8142 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8143 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 8144 BuildMI(BB, dl, TII->get(PPC::BCC)) 8145 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8146 BB->addSuccessor(loopMBB); 8147 BB->addSuccessor(exitMBB); 8148 8149 // exitMBB: 8150 // ... 8151 BB = exitMBB; 8152 return BB; 8153 } 8154 8155 MachineBasicBlock * 8156 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 8157 MachineBasicBlock *BB, 8158 bool is8bit, // operation 8159 unsigned BinOpcode) const { 8160 // If we support part-word atomic mnemonics, just use them 8161 if (Subtarget.hasPartwordAtomics()) 8162 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode); 8163 8164 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8165 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8166 // In 64 bit mode we have to use 64 bits for addresses, even though the 8167 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 8168 // registers without caring whether they're 32 or 64, but here we're 8169 // doing actual arithmetic on the addresses. 8170 bool is64bit = Subtarget.isPPC64(); 8171 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8172 8173 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8174 MachineFunction *F = BB->getParent(); 8175 MachineFunction::iterator It = BB; 8176 ++It; 8177 8178 unsigned dest = MI->getOperand(0).getReg(); 8179 unsigned ptrA = MI->getOperand(1).getReg(); 8180 unsigned ptrB = MI->getOperand(2).getReg(); 8181 unsigned incr = MI->getOperand(3).getReg(); 8182 DebugLoc dl = MI->getDebugLoc(); 8183 8184 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8185 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8186 F->insert(It, loopMBB); 8187 F->insert(It, exitMBB); 8188 exitMBB->splice(exitMBB->begin(), BB, 8189 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8190 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8191 8192 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8193 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8194 : &PPC::GPRCRegClass; 8195 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8196 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8197 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8198 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 8199 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8200 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8201 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8202 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8203 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 8204 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8205 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8206 unsigned Ptr1Reg; 8207 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 8208 8209 // thisMBB: 8210 // ... 8211 // fallthrough --> loopMBB 8212 BB->addSuccessor(loopMBB); 8213 8214 // The 4-byte load must be aligned, while a char or short may be 8215 // anywhere in the word. Hence all this nasty bookkeeping code. 8216 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8217 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8218 // xori shift, shift1, 24 [16] 8219 // rlwinm ptr, ptr1, 0, 0, 29 8220 // slw incr2, incr, shift 8221 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8222 // slw mask, mask2, shift 8223 // loopMBB: 8224 // lwarx tmpDest, ptr 8225 // add tmp, tmpDest, incr2 8226 // andc tmp2, tmpDest, mask 8227 // and tmp3, tmp, mask 8228 // or tmp4, tmp3, tmp2 8229 // stwcx. tmp4, ptr 8230 // bne- loopMBB 8231 // fallthrough --> exitMBB 8232 // srw dest, tmpDest, shift 8233 if (ptrA != ZeroReg) { 8234 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8235 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8236 .addReg(ptrA).addReg(ptrB); 8237 } else { 8238 Ptr1Reg = ptrB; 8239 } 8240 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8241 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8242 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8243 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8244 if (is64bit) 8245 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8246 .addReg(Ptr1Reg).addImm(0).addImm(61); 8247 else 8248 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8249 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8250 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 8251 .addReg(incr).addReg(ShiftReg); 8252 if (is8bit) 8253 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8254 else { 8255 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8256 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 8257 } 8258 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8259 .addReg(Mask2Reg).addReg(ShiftReg); 8260 8261 BB = loopMBB; 8262 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8263 .addReg(ZeroReg).addReg(PtrReg); 8264 if (BinOpcode) 8265 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 8266 .addReg(Incr2Reg).addReg(TmpDestReg); 8267 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 8268 .addReg(TmpDestReg).addReg(MaskReg); 8269 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 8270 .addReg(TmpReg).addReg(MaskReg); 8271 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 8272 .addReg(Tmp3Reg).addReg(Tmp2Reg); 8273 BuildMI(BB, dl, TII->get(PPC::STWCX)) 8274 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 8275 BuildMI(BB, dl, TII->get(PPC::BCC)) 8276 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8277 BB->addSuccessor(loopMBB); 8278 BB->addSuccessor(exitMBB); 8279 8280 // exitMBB: 8281 // ... 8282 BB = exitMBB; 8283 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 8284 .addReg(ShiftReg); 8285 return BB; 8286 } 8287 8288 llvm::MachineBasicBlock* 8289 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 8290 MachineBasicBlock *MBB) const { 8291 DebugLoc DL = MI->getDebugLoc(); 8292 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8293 8294 MachineFunction *MF = MBB->getParent(); 8295 MachineRegisterInfo &MRI = MF->getRegInfo(); 8296 8297 const BasicBlock *BB = MBB->getBasicBlock(); 8298 MachineFunction::iterator I = MBB; 8299 ++I; 8300 8301 // Memory Reference 8302 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8303 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8304 8305 unsigned DstReg = MI->getOperand(0).getReg(); 8306 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 8307 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 8308 unsigned mainDstReg = MRI.createVirtualRegister(RC); 8309 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 8310 8311 MVT PVT = getPointerTy(MF->getDataLayout()); 8312 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8313 "Invalid Pointer Size!"); 8314 // For v = setjmp(buf), we generate 8315 // 8316 // thisMBB: 8317 // SjLjSetup mainMBB 8318 // bl mainMBB 8319 // v_restore = 1 8320 // b sinkMBB 8321 // 8322 // mainMBB: 8323 // buf[LabelOffset] = LR 8324 // v_main = 0 8325 // 8326 // sinkMBB: 8327 // v = phi(main, restore) 8328 // 8329 8330 MachineBasicBlock *thisMBB = MBB; 8331 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 8332 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 8333 MF->insert(I, mainMBB); 8334 MF->insert(I, sinkMBB); 8335 8336 MachineInstrBuilder MIB; 8337 8338 // Transfer the remainder of BB and its successor edges to sinkMBB. 8339 sinkMBB->splice(sinkMBB->begin(), MBB, 8340 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8341 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 8342 8343 // Note that the structure of the jmp_buf used here is not compatible 8344 // with that used by libc, and is not designed to be. Specifically, it 8345 // stores only those 'reserved' registers that LLVM does not otherwise 8346 // understand how to spill. Also, by convention, by the time this 8347 // intrinsic is called, Clang has already stored the frame address in the 8348 // first slot of the buffer and stack address in the third. Following the 8349 // X86 target code, we'll store the jump address in the second slot. We also 8350 // need to save the TOC pointer (R2) to handle jumps between shared 8351 // libraries, and that will be stored in the fourth slot. The thread 8352 // identifier (R13) is not affected. 8353 8354 // thisMBB: 8355 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8356 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8357 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8358 8359 // Prepare IP either in reg. 8360 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 8361 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 8362 unsigned BufReg = MI->getOperand(1).getReg(); 8363 8364 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 8365 setUsesTOCBasePtr(*MBB->getParent()); 8366 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 8367 .addReg(PPC::X2) 8368 .addImm(TOCOffset) 8369 .addReg(BufReg); 8370 MIB.setMemRefs(MMOBegin, MMOEnd); 8371 } 8372 8373 // Naked functions never have a base pointer, and so we use r1. For all 8374 // other functions, this decision must be delayed until during PEI. 8375 unsigned BaseReg; 8376 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 8377 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 8378 else 8379 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 8380 8381 MIB = BuildMI(*thisMBB, MI, DL, 8382 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 8383 .addReg(BaseReg) 8384 .addImm(BPOffset) 8385 .addReg(BufReg); 8386 MIB.setMemRefs(MMOBegin, MMOEnd); 8387 8388 // Setup 8389 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 8390 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 8391 MIB.addRegMask(TRI->getNoPreservedMask()); 8392 8393 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 8394 8395 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 8396 .addMBB(mainMBB); 8397 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 8398 8399 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 8400 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 8401 8402 // mainMBB: 8403 // mainDstReg = 0 8404 MIB = 8405 BuildMI(mainMBB, DL, 8406 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 8407 8408 // Store IP 8409 if (Subtarget.isPPC64()) { 8410 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 8411 .addReg(LabelReg) 8412 .addImm(LabelOffset) 8413 .addReg(BufReg); 8414 } else { 8415 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 8416 .addReg(LabelReg) 8417 .addImm(LabelOffset) 8418 .addReg(BufReg); 8419 } 8420 8421 MIB.setMemRefs(MMOBegin, MMOEnd); 8422 8423 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 8424 mainMBB->addSuccessor(sinkMBB); 8425 8426 // sinkMBB: 8427 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 8428 TII->get(PPC::PHI), DstReg) 8429 .addReg(mainDstReg).addMBB(mainMBB) 8430 .addReg(restoreDstReg).addMBB(thisMBB); 8431 8432 MI->eraseFromParent(); 8433 return sinkMBB; 8434 } 8435 8436 MachineBasicBlock * 8437 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 8438 MachineBasicBlock *MBB) const { 8439 DebugLoc DL = MI->getDebugLoc(); 8440 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8441 8442 MachineFunction *MF = MBB->getParent(); 8443 MachineRegisterInfo &MRI = MF->getRegInfo(); 8444 8445 // Memory Reference 8446 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8447 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8448 8449 MVT PVT = getPointerTy(MF->getDataLayout()); 8450 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8451 "Invalid Pointer Size!"); 8452 8453 const TargetRegisterClass *RC = 8454 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 8455 unsigned Tmp = MRI.createVirtualRegister(RC); 8456 // Since FP is only updated here but NOT referenced, it's treated as GPR. 8457 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 8458 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 8459 unsigned BP = 8460 (PVT == MVT::i64) 8461 ? PPC::X30 8462 : (Subtarget.isSVR4ABI() && 8463 MF->getTarget().getRelocationModel() == Reloc::PIC_ 8464 ? PPC::R29 8465 : PPC::R30); 8466 8467 MachineInstrBuilder MIB; 8468 8469 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8470 const int64_t SPOffset = 2 * PVT.getStoreSize(); 8471 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8472 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8473 8474 unsigned BufReg = MI->getOperand(0).getReg(); 8475 8476 // Reload FP (the jumped-to function may not have had a 8477 // frame pointer, and if so, then its r31 will be restored 8478 // as necessary). 8479 if (PVT == MVT::i64) { 8480 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 8481 .addImm(0) 8482 .addReg(BufReg); 8483 } else { 8484 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 8485 .addImm(0) 8486 .addReg(BufReg); 8487 } 8488 MIB.setMemRefs(MMOBegin, MMOEnd); 8489 8490 // Reload IP 8491 if (PVT == MVT::i64) { 8492 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 8493 .addImm(LabelOffset) 8494 .addReg(BufReg); 8495 } else { 8496 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 8497 .addImm(LabelOffset) 8498 .addReg(BufReg); 8499 } 8500 MIB.setMemRefs(MMOBegin, MMOEnd); 8501 8502 // Reload SP 8503 if (PVT == MVT::i64) { 8504 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 8505 .addImm(SPOffset) 8506 .addReg(BufReg); 8507 } else { 8508 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 8509 .addImm(SPOffset) 8510 .addReg(BufReg); 8511 } 8512 MIB.setMemRefs(MMOBegin, MMOEnd); 8513 8514 // Reload BP 8515 if (PVT == MVT::i64) { 8516 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 8517 .addImm(BPOffset) 8518 .addReg(BufReg); 8519 } else { 8520 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 8521 .addImm(BPOffset) 8522 .addReg(BufReg); 8523 } 8524 MIB.setMemRefs(MMOBegin, MMOEnd); 8525 8526 // Reload TOC 8527 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 8528 setUsesTOCBasePtr(*MBB->getParent()); 8529 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 8530 .addImm(TOCOffset) 8531 .addReg(BufReg); 8532 8533 MIB.setMemRefs(MMOBegin, MMOEnd); 8534 } 8535 8536 // Jump 8537 BuildMI(*MBB, MI, DL, 8538 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 8539 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 8540 8541 MI->eraseFromParent(); 8542 return MBB; 8543 } 8544 8545 MachineBasicBlock * 8546 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 8547 MachineBasicBlock *BB) const { 8548 if (MI->getOpcode() == TargetOpcode::STACKMAP || 8549 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8550 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 8551 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8552 // Call lowering should have added an r2 operand to indicate a dependence 8553 // on the TOC base pointer value. It can't however, because there is no 8554 // way to mark the dependence as implicit there, and so the stackmap code 8555 // will confuse it with a regular operand. Instead, add the dependence 8556 // here. 8557 setUsesTOCBasePtr(*BB->getParent()); 8558 MI->addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 8559 } 8560 8561 return emitPatchPoint(MI, BB); 8562 } 8563 8564 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 8565 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 8566 return emitEHSjLjSetJmp(MI, BB); 8567 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 8568 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 8569 return emitEHSjLjLongJmp(MI, BB); 8570 } 8571 8572 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8573 8574 // To "insert" these instructions we actually have to insert their 8575 // control-flow patterns. 8576 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8577 MachineFunction::iterator It = BB; 8578 ++It; 8579 8580 MachineFunction *F = BB->getParent(); 8581 8582 if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 8583 MI->getOpcode() == PPC::SELECT_CC_I8 || 8584 MI->getOpcode() == PPC::SELECT_I4 || 8585 MI->getOpcode() == PPC::SELECT_I8)) { 8586 SmallVector<MachineOperand, 2> Cond; 8587 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8588 MI->getOpcode() == PPC::SELECT_CC_I8) 8589 Cond.push_back(MI->getOperand(4)); 8590 else 8591 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 8592 Cond.push_back(MI->getOperand(1)); 8593 8594 DebugLoc dl = MI->getDebugLoc(); 8595 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 8596 Cond, MI->getOperand(2).getReg(), 8597 MI->getOperand(3).getReg()); 8598 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8599 MI->getOpcode() == PPC::SELECT_CC_I8 || 8600 MI->getOpcode() == PPC::SELECT_CC_F4 || 8601 MI->getOpcode() == PPC::SELECT_CC_F8 || 8602 MI->getOpcode() == PPC::SELECT_CC_QFRC || 8603 MI->getOpcode() == PPC::SELECT_CC_QSRC || 8604 MI->getOpcode() == PPC::SELECT_CC_QBRC || 8605 MI->getOpcode() == PPC::SELECT_CC_VRRC || 8606 MI->getOpcode() == PPC::SELECT_CC_VSFRC || 8607 MI->getOpcode() == PPC::SELECT_CC_VSSRC || 8608 MI->getOpcode() == PPC::SELECT_CC_VSRC || 8609 MI->getOpcode() == PPC::SELECT_I4 || 8610 MI->getOpcode() == PPC::SELECT_I8 || 8611 MI->getOpcode() == PPC::SELECT_F4 || 8612 MI->getOpcode() == PPC::SELECT_F8 || 8613 MI->getOpcode() == PPC::SELECT_QFRC || 8614 MI->getOpcode() == PPC::SELECT_QSRC || 8615 MI->getOpcode() == PPC::SELECT_QBRC || 8616 MI->getOpcode() == PPC::SELECT_VRRC || 8617 MI->getOpcode() == PPC::SELECT_VSFRC || 8618 MI->getOpcode() == PPC::SELECT_VSSRC || 8619 MI->getOpcode() == PPC::SELECT_VSRC) { 8620 // The incoming instruction knows the destination vreg to set, the 8621 // condition code register to branch on, the true/false values to 8622 // select between, and a branch opcode to use. 8623 8624 // thisMBB: 8625 // ... 8626 // TrueVal = ... 8627 // cmpTY ccX, r1, r2 8628 // bCC copy1MBB 8629 // fallthrough --> copy0MBB 8630 MachineBasicBlock *thisMBB = BB; 8631 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8632 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8633 DebugLoc dl = MI->getDebugLoc(); 8634 F->insert(It, copy0MBB); 8635 F->insert(It, sinkMBB); 8636 8637 // Transfer the remainder of BB and its successor edges to sinkMBB. 8638 sinkMBB->splice(sinkMBB->begin(), BB, 8639 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8640 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8641 8642 // Next, add the true and fallthrough blocks as its successors. 8643 BB->addSuccessor(copy0MBB); 8644 BB->addSuccessor(sinkMBB); 8645 8646 if (MI->getOpcode() == PPC::SELECT_I4 || 8647 MI->getOpcode() == PPC::SELECT_I8 || 8648 MI->getOpcode() == PPC::SELECT_F4 || 8649 MI->getOpcode() == PPC::SELECT_F8 || 8650 MI->getOpcode() == PPC::SELECT_QFRC || 8651 MI->getOpcode() == PPC::SELECT_QSRC || 8652 MI->getOpcode() == PPC::SELECT_QBRC || 8653 MI->getOpcode() == PPC::SELECT_VRRC || 8654 MI->getOpcode() == PPC::SELECT_VSFRC || 8655 MI->getOpcode() == PPC::SELECT_VSSRC || 8656 MI->getOpcode() == PPC::SELECT_VSRC) { 8657 BuildMI(BB, dl, TII->get(PPC::BC)) 8658 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8659 } else { 8660 unsigned SelectPred = MI->getOperand(4).getImm(); 8661 BuildMI(BB, dl, TII->get(PPC::BCC)) 8662 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8663 } 8664 8665 // copy0MBB: 8666 // %FalseValue = ... 8667 // # fallthrough to sinkMBB 8668 BB = copy0MBB; 8669 8670 // Update machine-CFG edges 8671 BB->addSuccessor(sinkMBB); 8672 8673 // sinkMBB: 8674 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8675 // ... 8676 BB = sinkMBB; 8677 BuildMI(*BB, BB->begin(), dl, 8678 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 8679 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 8680 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 8681 } else if (MI->getOpcode() == PPC::ReadTB) { 8682 // To read the 64-bit time-base register on a 32-bit target, we read the 8683 // two halves. Should the counter have wrapped while it was being read, we 8684 // need to try again. 8685 // ... 8686 // readLoop: 8687 // mfspr Rx,TBU # load from TBU 8688 // mfspr Ry,TB # load from TB 8689 // mfspr Rz,TBU # load from TBU 8690 // cmpw crX,Rx,Rz # check if ‘old’=’new’ 8691 // bne readLoop # branch if they're not equal 8692 // ... 8693 8694 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 8695 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8696 DebugLoc dl = MI->getDebugLoc(); 8697 F->insert(It, readMBB); 8698 F->insert(It, sinkMBB); 8699 8700 // Transfer the remainder of BB and its successor edges to sinkMBB. 8701 sinkMBB->splice(sinkMBB->begin(), BB, 8702 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8703 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8704 8705 BB->addSuccessor(readMBB); 8706 BB = readMBB; 8707 8708 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8709 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 8710 unsigned LoReg = MI->getOperand(0).getReg(); 8711 unsigned HiReg = MI->getOperand(1).getReg(); 8712 8713 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 8714 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 8715 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 8716 8717 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 8718 8719 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 8720 .addReg(HiReg).addReg(ReadAgainReg); 8721 BuildMI(BB, dl, TII->get(PPC::BCC)) 8722 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 8723 8724 BB->addSuccessor(readMBB); 8725 BB->addSuccessor(sinkMBB); 8726 } 8727 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 8728 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 8729 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 8730 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 8731 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 8732 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 8733 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 8734 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 8735 8736 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 8737 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 8738 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 8739 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 8740 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 8741 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 8742 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 8743 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 8744 8745 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 8746 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 8747 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 8748 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 8749 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 8750 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 8751 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 8752 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 8753 8754 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 8755 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 8756 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 8757 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 8758 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 8759 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 8760 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 8761 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 8762 8763 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 8764 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 8765 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 8766 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 8767 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 8768 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 8769 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 8770 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 8771 8772 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 8773 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 8774 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 8775 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 8776 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 8777 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 8778 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 8779 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 8780 8781 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 8782 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 8783 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 8784 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 8785 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 8786 BB = EmitAtomicBinary(MI, BB, 4, 0); 8787 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 8788 BB = EmitAtomicBinary(MI, BB, 8, 0); 8789 8790 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 8791 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 8792 (Subtarget.hasPartwordAtomics() && 8793 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 8794 (Subtarget.hasPartwordAtomics() && 8795 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 8796 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 8797 8798 auto LoadMnemonic = PPC::LDARX; 8799 auto StoreMnemonic = PPC::STDCX; 8800 switch(MI->getOpcode()) { 8801 default: 8802 llvm_unreachable("Compare and swap of unknown size"); 8803 case PPC::ATOMIC_CMP_SWAP_I8: 8804 LoadMnemonic = PPC::LBARX; 8805 StoreMnemonic = PPC::STBCX; 8806 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 8807 break; 8808 case PPC::ATOMIC_CMP_SWAP_I16: 8809 LoadMnemonic = PPC::LHARX; 8810 StoreMnemonic = PPC::STHCX; 8811 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 8812 break; 8813 case PPC::ATOMIC_CMP_SWAP_I32: 8814 LoadMnemonic = PPC::LWARX; 8815 StoreMnemonic = PPC::STWCX; 8816 break; 8817 case PPC::ATOMIC_CMP_SWAP_I64: 8818 LoadMnemonic = PPC::LDARX; 8819 StoreMnemonic = PPC::STDCX; 8820 break; 8821 } 8822 unsigned dest = MI->getOperand(0).getReg(); 8823 unsigned ptrA = MI->getOperand(1).getReg(); 8824 unsigned ptrB = MI->getOperand(2).getReg(); 8825 unsigned oldval = MI->getOperand(3).getReg(); 8826 unsigned newval = MI->getOperand(4).getReg(); 8827 DebugLoc dl = MI->getDebugLoc(); 8828 8829 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 8830 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 8831 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 8832 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8833 F->insert(It, loop1MBB); 8834 F->insert(It, loop2MBB); 8835 F->insert(It, midMBB); 8836 F->insert(It, exitMBB); 8837 exitMBB->splice(exitMBB->begin(), BB, 8838 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8839 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8840 8841 // thisMBB: 8842 // ... 8843 // fallthrough --> loopMBB 8844 BB->addSuccessor(loop1MBB); 8845 8846 // loop1MBB: 8847 // l[bhwd]arx dest, ptr 8848 // cmp[wd] dest, oldval 8849 // bne- midMBB 8850 // loop2MBB: 8851 // st[bhwd]cx. newval, ptr 8852 // bne- loopMBB 8853 // b exitBB 8854 // midMBB: 8855 // st[bhwd]cx. dest, ptr 8856 // exitBB: 8857 BB = loop1MBB; 8858 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8859 .addReg(ptrA).addReg(ptrB); 8860 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 8861 .addReg(oldval).addReg(dest); 8862 BuildMI(BB, dl, TII->get(PPC::BCC)) 8863 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 8864 BB->addSuccessor(loop2MBB); 8865 BB->addSuccessor(midMBB); 8866 8867 BB = loop2MBB; 8868 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8869 .addReg(newval).addReg(ptrA).addReg(ptrB); 8870 BuildMI(BB, dl, TII->get(PPC::BCC)) 8871 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 8872 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 8873 BB->addSuccessor(loop1MBB); 8874 BB->addSuccessor(exitMBB); 8875 8876 BB = midMBB; 8877 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8878 .addReg(dest).addReg(ptrA).addReg(ptrB); 8879 BB->addSuccessor(exitMBB); 8880 8881 // exitMBB: 8882 // ... 8883 BB = exitMBB; 8884 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 8885 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 8886 // We must use 64-bit registers for addresses when targeting 64-bit, 8887 // since we're actually doing arithmetic on them. Other registers 8888 // can be 32-bit. 8889 bool is64bit = Subtarget.isPPC64(); 8890 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 8891 8892 unsigned dest = MI->getOperand(0).getReg(); 8893 unsigned ptrA = MI->getOperand(1).getReg(); 8894 unsigned ptrB = MI->getOperand(2).getReg(); 8895 unsigned oldval = MI->getOperand(3).getReg(); 8896 unsigned newval = MI->getOperand(4).getReg(); 8897 DebugLoc dl = MI->getDebugLoc(); 8898 8899 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 8900 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 8901 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 8902 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8903 F->insert(It, loop1MBB); 8904 F->insert(It, loop2MBB); 8905 F->insert(It, midMBB); 8906 F->insert(It, exitMBB); 8907 exitMBB->splice(exitMBB->begin(), BB, 8908 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8909 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8910 8911 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8912 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8913 : &PPC::GPRCRegClass; 8914 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8915 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8916 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8917 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 8918 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 8919 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 8920 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 8921 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8922 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8923 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8924 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8925 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8926 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8927 unsigned Ptr1Reg; 8928 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 8929 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8930 // thisMBB: 8931 // ... 8932 // fallthrough --> loopMBB 8933 BB->addSuccessor(loop1MBB); 8934 8935 // The 4-byte load must be aligned, while a char or short may be 8936 // anywhere in the word. Hence all this nasty bookkeeping code. 8937 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8938 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8939 // xori shift, shift1, 24 [16] 8940 // rlwinm ptr, ptr1, 0, 0, 29 8941 // slw newval2, newval, shift 8942 // slw oldval2, oldval,shift 8943 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8944 // slw mask, mask2, shift 8945 // and newval3, newval2, mask 8946 // and oldval3, oldval2, mask 8947 // loop1MBB: 8948 // lwarx tmpDest, ptr 8949 // and tmp, tmpDest, mask 8950 // cmpw tmp, oldval3 8951 // bne- midMBB 8952 // loop2MBB: 8953 // andc tmp2, tmpDest, mask 8954 // or tmp4, tmp2, newval3 8955 // stwcx. tmp4, ptr 8956 // bne- loop1MBB 8957 // b exitBB 8958 // midMBB: 8959 // stwcx. tmpDest, ptr 8960 // exitBB: 8961 // srw dest, tmpDest, shift 8962 if (ptrA != ZeroReg) { 8963 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8964 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8965 .addReg(ptrA).addReg(ptrB); 8966 } else { 8967 Ptr1Reg = ptrB; 8968 } 8969 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8970 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8971 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8972 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8973 if (is64bit) 8974 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8975 .addReg(Ptr1Reg).addImm(0).addImm(61); 8976 else 8977 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8978 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8979 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 8980 .addReg(newval).addReg(ShiftReg); 8981 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 8982 .addReg(oldval).addReg(ShiftReg); 8983 if (is8bit) 8984 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8985 else { 8986 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8987 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 8988 .addReg(Mask3Reg).addImm(65535); 8989 } 8990 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8991 .addReg(Mask2Reg).addReg(ShiftReg); 8992 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 8993 .addReg(NewVal2Reg).addReg(MaskReg); 8994 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 8995 .addReg(OldVal2Reg).addReg(MaskReg); 8996 8997 BB = loop1MBB; 8998 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8999 .addReg(ZeroReg).addReg(PtrReg); 9000 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 9001 .addReg(TmpDestReg).addReg(MaskReg); 9002 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 9003 .addReg(TmpReg).addReg(OldVal3Reg); 9004 BuildMI(BB, dl, TII->get(PPC::BCC)) 9005 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9006 BB->addSuccessor(loop2MBB); 9007 BB->addSuccessor(midMBB); 9008 9009 BB = loop2MBB; 9010 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 9011 .addReg(TmpDestReg).addReg(MaskReg); 9012 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 9013 .addReg(Tmp2Reg).addReg(NewVal3Reg); 9014 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 9015 .addReg(ZeroReg).addReg(PtrReg); 9016 BuildMI(BB, dl, TII->get(PPC::BCC)) 9017 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9018 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9019 BB->addSuccessor(loop1MBB); 9020 BB->addSuccessor(exitMBB); 9021 9022 BB = midMBB; 9023 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 9024 .addReg(ZeroReg).addReg(PtrReg); 9025 BB->addSuccessor(exitMBB); 9026 9027 // exitMBB: 9028 // ... 9029 BB = exitMBB; 9030 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 9031 .addReg(ShiftReg); 9032 } else if (MI->getOpcode() == PPC::FADDrtz) { 9033 // This pseudo performs an FADD with rounding mode temporarily forced 9034 // to round-to-zero. We emit this via custom inserter since the FPSCR 9035 // is not modeled at the SelectionDAG level. 9036 unsigned Dest = MI->getOperand(0).getReg(); 9037 unsigned Src1 = MI->getOperand(1).getReg(); 9038 unsigned Src2 = MI->getOperand(2).getReg(); 9039 DebugLoc dl = MI->getDebugLoc(); 9040 9041 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9042 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 9043 9044 // Save FPSCR value. 9045 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 9046 9047 // Set rounding mode to round-to-zero. 9048 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 9049 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 9050 9051 // Perform addition. 9052 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 9053 9054 // Restore FPSCR value. 9055 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 9056 } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9057 MI->getOpcode() == PPC::ANDIo_1_GT_BIT || 9058 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9059 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) { 9060 unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9061 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ? 9062 PPC::ANDIo8 : PPC::ANDIo; 9063 bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9064 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8); 9065 9066 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9067 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 9068 &PPC::GPRCRegClass : 9069 &PPC::G8RCRegClass); 9070 9071 DebugLoc dl = MI->getDebugLoc(); 9072 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 9073 .addReg(MI->getOperand(1).getReg()).addImm(1); 9074 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 9075 MI->getOperand(0).getReg()) 9076 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 9077 } else if (MI->getOpcode() == PPC::TCHECK_RET) { 9078 DebugLoc Dl = MI->getDebugLoc(); 9079 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9080 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9081 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 9082 return BB; 9083 } else { 9084 llvm_unreachable("Unexpected instr type to insert"); 9085 } 9086 9087 MI->eraseFromParent(); // The pseudo instruction is gone now. 9088 return BB; 9089 } 9090 9091 //===----------------------------------------------------------------------===// 9092 // Target Optimization Hooks 9093 //===----------------------------------------------------------------------===// 9094 9095 static std::string getRecipOp(const char *Base, EVT VT) { 9096 std::string RecipOp(Base); 9097 if (VT.getScalarType() == MVT::f64) 9098 RecipOp += "d"; 9099 else 9100 RecipOp += "f"; 9101 9102 if (VT.isVector()) 9103 RecipOp = "vec-" + RecipOp; 9104 9105 return RecipOp; 9106 } 9107 9108 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand, 9109 DAGCombinerInfo &DCI, 9110 unsigned &RefinementSteps, 9111 bool &UseOneConstNR) const { 9112 EVT VT = Operand.getValueType(); 9113 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 9114 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 9115 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9116 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9117 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9118 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9119 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9120 std::string RecipOp = getRecipOp("sqrt", VT); 9121 if (!Recips.isEnabled(RecipOp)) 9122 return SDValue(); 9123 9124 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9125 UseOneConstNR = true; 9126 return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 9127 } 9128 return SDValue(); 9129 } 9130 9131 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, 9132 DAGCombinerInfo &DCI, 9133 unsigned &RefinementSteps) const { 9134 EVT VT = Operand.getValueType(); 9135 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 9136 (VT == MVT::f64 && Subtarget.hasFRE()) || 9137 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9138 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9139 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9140 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9141 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9142 std::string RecipOp = getRecipOp("div", VT); 9143 if (!Recips.isEnabled(RecipOp)) 9144 return SDValue(); 9145 9146 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9147 return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 9148 } 9149 return SDValue(); 9150 } 9151 9152 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 9153 // Note: This functionality is used only when unsafe-fp-math is enabled, and 9154 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 9155 // enabled for division), this functionality is redundant with the default 9156 // combiner logic (once the division -> reciprocal/multiply transformation 9157 // has taken place). As a result, this matters more for older cores than for 9158 // newer ones. 9159 9160 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9161 // reciprocal if there are two or more FDIVs (for embedded cores with only 9162 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 9163 switch (Subtarget.getDarwinDirective()) { 9164 default: 9165 return 3; 9166 case PPC::DIR_440: 9167 case PPC::DIR_A2: 9168 case PPC::DIR_E500mc: 9169 case PPC::DIR_E5500: 9170 return 2; 9171 } 9172 } 9173 9174 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 9175 unsigned Bytes, int Dist, 9176 SelectionDAG &DAG) { 9177 if (VT.getSizeInBits() / 8 != Bytes) 9178 return false; 9179 9180 SDValue BaseLoc = Base->getBasePtr(); 9181 if (Loc.getOpcode() == ISD::FrameIndex) { 9182 if (BaseLoc.getOpcode() != ISD::FrameIndex) 9183 return false; 9184 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9185 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 9186 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 9187 int FS = MFI->getObjectSize(FI); 9188 int BFS = MFI->getObjectSize(BFI); 9189 if (FS != BFS || FS != (int)Bytes) return false; 9190 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 9191 } 9192 9193 // Handle X+C 9194 if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 9195 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 9196 return true; 9197 9198 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9199 const GlobalValue *GV1 = nullptr; 9200 const GlobalValue *GV2 = nullptr; 9201 int64_t Offset1 = 0; 9202 int64_t Offset2 = 0; 9203 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 9204 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 9205 if (isGA1 && isGA2 && GV1 == GV2) 9206 return Offset1 == (Offset2 + Dist*Bytes); 9207 return false; 9208 } 9209 9210 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 9211 // not enforce equality of the chain operands. 9212 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 9213 unsigned Bytes, int Dist, 9214 SelectionDAG &DAG) { 9215 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 9216 EVT VT = LS->getMemoryVT(); 9217 SDValue Loc = LS->getBasePtr(); 9218 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 9219 } 9220 9221 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 9222 EVT VT; 9223 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9224 default: return false; 9225 case Intrinsic::ppc_qpx_qvlfd: 9226 case Intrinsic::ppc_qpx_qvlfda: 9227 VT = MVT::v4f64; 9228 break; 9229 case Intrinsic::ppc_qpx_qvlfs: 9230 case Intrinsic::ppc_qpx_qvlfsa: 9231 VT = MVT::v4f32; 9232 break; 9233 case Intrinsic::ppc_qpx_qvlfcd: 9234 case Intrinsic::ppc_qpx_qvlfcda: 9235 VT = MVT::v2f64; 9236 break; 9237 case Intrinsic::ppc_qpx_qvlfcs: 9238 case Intrinsic::ppc_qpx_qvlfcsa: 9239 VT = MVT::v2f32; 9240 break; 9241 case Intrinsic::ppc_qpx_qvlfiwa: 9242 case Intrinsic::ppc_qpx_qvlfiwz: 9243 case Intrinsic::ppc_altivec_lvx: 9244 case Intrinsic::ppc_altivec_lvxl: 9245 case Intrinsic::ppc_vsx_lxvw4x: 9246 VT = MVT::v4i32; 9247 break; 9248 case Intrinsic::ppc_vsx_lxvd2x: 9249 VT = MVT::v2f64; 9250 break; 9251 case Intrinsic::ppc_altivec_lvebx: 9252 VT = MVT::i8; 9253 break; 9254 case Intrinsic::ppc_altivec_lvehx: 9255 VT = MVT::i16; 9256 break; 9257 case Intrinsic::ppc_altivec_lvewx: 9258 VT = MVT::i32; 9259 break; 9260 } 9261 9262 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 9263 } 9264 9265 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 9266 EVT VT; 9267 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9268 default: return false; 9269 case Intrinsic::ppc_qpx_qvstfd: 9270 case Intrinsic::ppc_qpx_qvstfda: 9271 VT = MVT::v4f64; 9272 break; 9273 case Intrinsic::ppc_qpx_qvstfs: 9274 case Intrinsic::ppc_qpx_qvstfsa: 9275 VT = MVT::v4f32; 9276 break; 9277 case Intrinsic::ppc_qpx_qvstfcd: 9278 case Intrinsic::ppc_qpx_qvstfcda: 9279 VT = MVT::v2f64; 9280 break; 9281 case Intrinsic::ppc_qpx_qvstfcs: 9282 case Intrinsic::ppc_qpx_qvstfcsa: 9283 VT = MVT::v2f32; 9284 break; 9285 case Intrinsic::ppc_qpx_qvstfiw: 9286 case Intrinsic::ppc_qpx_qvstfiwa: 9287 case Intrinsic::ppc_altivec_stvx: 9288 case Intrinsic::ppc_altivec_stvxl: 9289 case Intrinsic::ppc_vsx_stxvw4x: 9290 VT = MVT::v4i32; 9291 break; 9292 case Intrinsic::ppc_vsx_stxvd2x: 9293 VT = MVT::v2f64; 9294 break; 9295 case Intrinsic::ppc_altivec_stvebx: 9296 VT = MVT::i8; 9297 break; 9298 case Intrinsic::ppc_altivec_stvehx: 9299 VT = MVT::i16; 9300 break; 9301 case Intrinsic::ppc_altivec_stvewx: 9302 VT = MVT::i32; 9303 break; 9304 } 9305 9306 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 9307 } 9308 9309 return false; 9310 } 9311 9312 // Return true is there is a nearyby consecutive load to the one provided 9313 // (regardless of alignment). We search up and down the chain, looking though 9314 // token factors and other loads (but nothing else). As a result, a true result 9315 // indicates that it is safe to create a new consecutive load adjacent to the 9316 // load provided. 9317 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 9318 SDValue Chain = LD->getChain(); 9319 EVT VT = LD->getMemoryVT(); 9320 9321 SmallSet<SDNode *, 16> LoadRoots; 9322 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 9323 SmallSet<SDNode *, 16> Visited; 9324 9325 // First, search up the chain, branching to follow all token-factor operands. 9326 // If we find a consecutive load, then we're done, otherwise, record all 9327 // nodes just above the top-level loads and token factors. 9328 while (!Queue.empty()) { 9329 SDNode *ChainNext = Queue.pop_back_val(); 9330 if (!Visited.insert(ChainNext).second) 9331 continue; 9332 9333 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 9334 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9335 return true; 9336 9337 if (!Visited.count(ChainLD->getChain().getNode())) 9338 Queue.push_back(ChainLD->getChain().getNode()); 9339 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 9340 for (const SDUse &O : ChainNext->ops()) 9341 if (!Visited.count(O.getNode())) 9342 Queue.push_back(O.getNode()); 9343 } else 9344 LoadRoots.insert(ChainNext); 9345 } 9346 9347 // Second, search down the chain, starting from the top-level nodes recorded 9348 // in the first phase. These top-level nodes are the nodes just above all 9349 // loads and token factors. Starting with their uses, recursively look though 9350 // all loads (just the chain uses) and token factors to find a consecutive 9351 // load. 9352 Visited.clear(); 9353 Queue.clear(); 9354 9355 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 9356 IE = LoadRoots.end(); I != IE; ++I) { 9357 Queue.push_back(*I); 9358 9359 while (!Queue.empty()) { 9360 SDNode *LoadRoot = Queue.pop_back_val(); 9361 if (!Visited.insert(LoadRoot).second) 9362 continue; 9363 9364 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 9365 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9366 return true; 9367 9368 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 9369 UE = LoadRoot->use_end(); UI != UE; ++UI) 9370 if (((isa<MemSDNode>(*UI) && 9371 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 9372 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 9373 Queue.push_back(*UI); 9374 } 9375 } 9376 9377 return false; 9378 } 9379 9380 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 9381 DAGCombinerInfo &DCI) const { 9382 SelectionDAG &DAG = DCI.DAG; 9383 SDLoc dl(N); 9384 9385 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 9386 // If we're tracking CR bits, we need to be careful that we don't have: 9387 // trunc(binary-ops(zext(x), zext(y))) 9388 // or 9389 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 9390 // such that we're unnecessarily moving things into GPRs when it would be 9391 // better to keep them in CR bits. 9392 9393 // Note that trunc here can be an actual i1 trunc, or can be the effective 9394 // truncation that comes from a setcc or select_cc. 9395 if (N->getOpcode() == ISD::TRUNCATE && 9396 N->getValueType(0) != MVT::i1) 9397 return SDValue(); 9398 9399 if (N->getOperand(0).getValueType() != MVT::i32 && 9400 N->getOperand(0).getValueType() != MVT::i64) 9401 return SDValue(); 9402 9403 if (N->getOpcode() == ISD::SETCC || 9404 N->getOpcode() == ISD::SELECT_CC) { 9405 // If we're looking at a comparison, then we need to make sure that the 9406 // high bits (all except for the first) don't matter the result. 9407 ISD::CondCode CC = 9408 cast<CondCodeSDNode>(N->getOperand( 9409 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 9410 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 9411 9412 if (ISD::isSignedIntSetCC(CC)) { 9413 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 9414 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 9415 return SDValue(); 9416 } else if (ISD::isUnsignedIntSetCC(CC)) { 9417 if (!DAG.MaskedValueIsZero(N->getOperand(0), 9418 APInt::getHighBitsSet(OpBits, OpBits-1)) || 9419 !DAG.MaskedValueIsZero(N->getOperand(1), 9420 APInt::getHighBitsSet(OpBits, OpBits-1))) 9421 return SDValue(); 9422 } else { 9423 // This is neither a signed nor an unsigned comparison, just make sure 9424 // that the high bits are equal. 9425 APInt Op1Zero, Op1One; 9426 APInt Op2Zero, Op2One; 9427 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 9428 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 9429 9430 // We don't really care about what is known about the first bit (if 9431 // anything), so clear it in all masks prior to comparing them. 9432 Op1Zero.clearBit(0); Op1One.clearBit(0); 9433 Op2Zero.clearBit(0); Op2One.clearBit(0); 9434 9435 if (Op1Zero != Op2Zero || Op1One != Op2One) 9436 return SDValue(); 9437 } 9438 } 9439 9440 // We now know that the higher-order bits are irrelevant, we just need to 9441 // make sure that all of the intermediate operations are bit operations, and 9442 // all inputs are extensions. 9443 if (N->getOperand(0).getOpcode() != ISD::AND && 9444 N->getOperand(0).getOpcode() != ISD::OR && 9445 N->getOperand(0).getOpcode() != ISD::XOR && 9446 N->getOperand(0).getOpcode() != ISD::SELECT && 9447 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 9448 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 9449 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 9450 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 9451 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 9452 return SDValue(); 9453 9454 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 9455 N->getOperand(1).getOpcode() != ISD::AND && 9456 N->getOperand(1).getOpcode() != ISD::OR && 9457 N->getOperand(1).getOpcode() != ISD::XOR && 9458 N->getOperand(1).getOpcode() != ISD::SELECT && 9459 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 9460 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 9461 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 9462 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 9463 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 9464 return SDValue(); 9465 9466 SmallVector<SDValue, 4> Inputs; 9467 SmallVector<SDValue, 8> BinOps, PromOps; 9468 SmallPtrSet<SDNode *, 16> Visited; 9469 9470 for (unsigned i = 0; i < 2; ++i) { 9471 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9472 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9473 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9474 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9475 isa<ConstantSDNode>(N->getOperand(i))) 9476 Inputs.push_back(N->getOperand(i)); 9477 else 9478 BinOps.push_back(N->getOperand(i)); 9479 9480 if (N->getOpcode() == ISD::TRUNCATE) 9481 break; 9482 } 9483 9484 // Visit all inputs, collect all binary operations (and, or, xor and 9485 // select) that are all fed by extensions. 9486 while (!BinOps.empty()) { 9487 SDValue BinOp = BinOps.back(); 9488 BinOps.pop_back(); 9489 9490 if (!Visited.insert(BinOp.getNode()).second) 9491 continue; 9492 9493 PromOps.push_back(BinOp); 9494 9495 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9496 // The condition of the select is not promoted. 9497 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9498 continue; 9499 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9500 continue; 9501 9502 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9503 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9504 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9505 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9506 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9507 Inputs.push_back(BinOp.getOperand(i)); 9508 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9509 BinOp.getOperand(i).getOpcode() == ISD::OR || 9510 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9511 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9512 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 9513 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9514 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9515 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9516 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 9517 BinOps.push_back(BinOp.getOperand(i)); 9518 } else { 9519 // We have an input that is not an extension or another binary 9520 // operation; we'll abort this transformation. 9521 return SDValue(); 9522 } 9523 } 9524 } 9525 9526 // Make sure that this is a self-contained cluster of operations (which 9527 // is not quite the same thing as saying that everything has only one 9528 // use). 9529 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9530 if (isa<ConstantSDNode>(Inputs[i])) 9531 continue; 9532 9533 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9534 UE = Inputs[i].getNode()->use_end(); 9535 UI != UE; ++UI) { 9536 SDNode *User = *UI; 9537 if (User != N && !Visited.count(User)) 9538 return SDValue(); 9539 9540 // Make sure that we're not going to promote the non-output-value 9541 // operand(s) or SELECT or SELECT_CC. 9542 // FIXME: Although we could sometimes handle this, and it does occur in 9543 // practice that one of the condition inputs to the select is also one of 9544 // the outputs, we currently can't deal with this. 9545 if (User->getOpcode() == ISD::SELECT) { 9546 if (User->getOperand(0) == Inputs[i]) 9547 return SDValue(); 9548 } else if (User->getOpcode() == ISD::SELECT_CC) { 9549 if (User->getOperand(0) == Inputs[i] || 9550 User->getOperand(1) == Inputs[i]) 9551 return SDValue(); 9552 } 9553 } 9554 } 9555 9556 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9557 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9558 UE = PromOps[i].getNode()->use_end(); 9559 UI != UE; ++UI) { 9560 SDNode *User = *UI; 9561 if (User != N && !Visited.count(User)) 9562 return SDValue(); 9563 9564 // Make sure that we're not going to promote the non-output-value 9565 // operand(s) or SELECT or SELECT_CC. 9566 // FIXME: Although we could sometimes handle this, and it does occur in 9567 // practice that one of the condition inputs to the select is also one of 9568 // the outputs, we currently can't deal with this. 9569 if (User->getOpcode() == ISD::SELECT) { 9570 if (User->getOperand(0) == PromOps[i]) 9571 return SDValue(); 9572 } else if (User->getOpcode() == ISD::SELECT_CC) { 9573 if (User->getOperand(0) == PromOps[i] || 9574 User->getOperand(1) == PromOps[i]) 9575 return SDValue(); 9576 } 9577 } 9578 } 9579 9580 // Replace all inputs with the extension operand. 9581 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9582 // Constants may have users outside the cluster of to-be-promoted nodes, 9583 // and so we need to replace those as we do the promotions. 9584 if (isa<ConstantSDNode>(Inputs[i])) 9585 continue; 9586 else 9587 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 9588 } 9589 9590 // Replace all operations (these are all the same, but have a different 9591 // (i1) return type). DAG.getNode will validate that the types of 9592 // a binary operator match, so go through the list in reverse so that 9593 // we've likely promoted both operands first. Any intermediate truncations or 9594 // extensions disappear. 9595 while (!PromOps.empty()) { 9596 SDValue PromOp = PromOps.back(); 9597 PromOps.pop_back(); 9598 9599 if (PromOp.getOpcode() == ISD::TRUNCATE || 9600 PromOp.getOpcode() == ISD::SIGN_EXTEND || 9601 PromOp.getOpcode() == ISD::ZERO_EXTEND || 9602 PromOp.getOpcode() == ISD::ANY_EXTEND) { 9603 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 9604 PromOp.getOperand(0).getValueType() != MVT::i1) { 9605 // The operand is not yet ready (see comment below). 9606 PromOps.insert(PromOps.begin(), PromOp); 9607 continue; 9608 } 9609 9610 SDValue RepValue = PromOp.getOperand(0); 9611 if (isa<ConstantSDNode>(RepValue)) 9612 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 9613 9614 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 9615 continue; 9616 } 9617 9618 unsigned C; 9619 switch (PromOp.getOpcode()) { 9620 default: C = 0; break; 9621 case ISD::SELECT: C = 1; break; 9622 case ISD::SELECT_CC: C = 2; break; 9623 } 9624 9625 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9626 PromOp.getOperand(C).getValueType() != MVT::i1) || 9627 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9628 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 9629 // The to-be-promoted operands of this node have not yet been 9630 // promoted (this should be rare because we're going through the 9631 // list backward, but if one of the operands has several users in 9632 // this cluster of to-be-promoted nodes, it is possible). 9633 PromOps.insert(PromOps.begin(), PromOp); 9634 continue; 9635 } 9636 9637 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9638 PromOp.getNode()->op_end()); 9639 9640 // If there are any constant inputs, make sure they're replaced now. 9641 for (unsigned i = 0; i < 2; ++i) 9642 if (isa<ConstantSDNode>(Ops[C+i])) 9643 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 9644 9645 DAG.ReplaceAllUsesOfValueWith(PromOp, 9646 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 9647 } 9648 9649 // Now we're left with the initial truncation itself. 9650 if (N->getOpcode() == ISD::TRUNCATE) 9651 return N->getOperand(0); 9652 9653 // Otherwise, this is a comparison. The operands to be compared have just 9654 // changed type (to i1), but everything else is the same. 9655 return SDValue(N, 0); 9656 } 9657 9658 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 9659 DAGCombinerInfo &DCI) const { 9660 SelectionDAG &DAG = DCI.DAG; 9661 SDLoc dl(N); 9662 9663 // If we're tracking CR bits, we need to be careful that we don't have: 9664 // zext(binary-ops(trunc(x), trunc(y))) 9665 // or 9666 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 9667 // such that we're unnecessarily moving things into CR bits that can more 9668 // efficiently stay in GPRs. Note that if we're not certain that the high 9669 // bits are set as required by the final extension, we still may need to do 9670 // some masking to get the proper behavior. 9671 9672 // This same functionality is important on PPC64 when dealing with 9673 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 9674 // the return values of functions. Because it is so similar, it is handled 9675 // here as well. 9676 9677 if (N->getValueType(0) != MVT::i32 && 9678 N->getValueType(0) != MVT::i64) 9679 return SDValue(); 9680 9681 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 9682 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 9683 return SDValue(); 9684 9685 if (N->getOperand(0).getOpcode() != ISD::AND && 9686 N->getOperand(0).getOpcode() != ISD::OR && 9687 N->getOperand(0).getOpcode() != ISD::XOR && 9688 N->getOperand(0).getOpcode() != ISD::SELECT && 9689 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 9690 return SDValue(); 9691 9692 SmallVector<SDValue, 4> Inputs; 9693 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 9694 SmallPtrSet<SDNode *, 16> Visited; 9695 9696 // Visit all inputs, collect all binary operations (and, or, xor and 9697 // select) that are all fed by truncations. 9698 while (!BinOps.empty()) { 9699 SDValue BinOp = BinOps.back(); 9700 BinOps.pop_back(); 9701 9702 if (!Visited.insert(BinOp.getNode()).second) 9703 continue; 9704 9705 PromOps.push_back(BinOp); 9706 9707 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9708 // The condition of the select is not promoted. 9709 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9710 continue; 9711 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9712 continue; 9713 9714 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9715 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9716 Inputs.push_back(BinOp.getOperand(i)); 9717 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9718 BinOp.getOperand(i).getOpcode() == ISD::OR || 9719 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9720 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9721 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 9722 BinOps.push_back(BinOp.getOperand(i)); 9723 } else { 9724 // We have an input that is not a truncation or another binary 9725 // operation; we'll abort this transformation. 9726 return SDValue(); 9727 } 9728 } 9729 } 9730 9731 // The operands of a select that must be truncated when the select is 9732 // promoted because the operand is actually part of the to-be-promoted set. 9733 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 9734 9735 // Make sure that this is a self-contained cluster of operations (which 9736 // is not quite the same thing as saying that everything has only one 9737 // use). 9738 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9739 if (isa<ConstantSDNode>(Inputs[i])) 9740 continue; 9741 9742 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9743 UE = Inputs[i].getNode()->use_end(); 9744 UI != UE; ++UI) { 9745 SDNode *User = *UI; 9746 if (User != N && !Visited.count(User)) 9747 return SDValue(); 9748 9749 // If we're going to promote the non-output-value operand(s) or SELECT or 9750 // SELECT_CC, record them for truncation. 9751 if (User->getOpcode() == ISD::SELECT) { 9752 if (User->getOperand(0) == Inputs[i]) 9753 SelectTruncOp[0].insert(std::make_pair(User, 9754 User->getOperand(0).getValueType())); 9755 } else if (User->getOpcode() == ISD::SELECT_CC) { 9756 if (User->getOperand(0) == Inputs[i]) 9757 SelectTruncOp[0].insert(std::make_pair(User, 9758 User->getOperand(0).getValueType())); 9759 if (User->getOperand(1) == Inputs[i]) 9760 SelectTruncOp[1].insert(std::make_pair(User, 9761 User->getOperand(1).getValueType())); 9762 } 9763 } 9764 } 9765 9766 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9767 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9768 UE = PromOps[i].getNode()->use_end(); 9769 UI != UE; ++UI) { 9770 SDNode *User = *UI; 9771 if (User != N && !Visited.count(User)) 9772 return SDValue(); 9773 9774 // If we're going to promote the non-output-value operand(s) or SELECT or 9775 // SELECT_CC, record them for truncation. 9776 if (User->getOpcode() == ISD::SELECT) { 9777 if (User->getOperand(0) == PromOps[i]) 9778 SelectTruncOp[0].insert(std::make_pair(User, 9779 User->getOperand(0).getValueType())); 9780 } else if (User->getOpcode() == ISD::SELECT_CC) { 9781 if (User->getOperand(0) == PromOps[i]) 9782 SelectTruncOp[0].insert(std::make_pair(User, 9783 User->getOperand(0).getValueType())); 9784 if (User->getOperand(1) == PromOps[i]) 9785 SelectTruncOp[1].insert(std::make_pair(User, 9786 User->getOperand(1).getValueType())); 9787 } 9788 } 9789 } 9790 9791 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 9792 bool ReallyNeedsExt = false; 9793 if (N->getOpcode() != ISD::ANY_EXTEND) { 9794 // If all of the inputs are not already sign/zero extended, then 9795 // we'll still need to do that at the end. 9796 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9797 if (isa<ConstantSDNode>(Inputs[i])) 9798 continue; 9799 9800 unsigned OpBits = 9801 Inputs[i].getOperand(0).getValueSizeInBits(); 9802 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 9803 9804 if ((N->getOpcode() == ISD::ZERO_EXTEND && 9805 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 9806 APInt::getHighBitsSet(OpBits, 9807 OpBits-PromBits))) || 9808 (N->getOpcode() == ISD::SIGN_EXTEND && 9809 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 9810 (OpBits-(PromBits-1)))) { 9811 ReallyNeedsExt = true; 9812 break; 9813 } 9814 } 9815 } 9816 9817 // Replace all inputs, either with the truncation operand, or a 9818 // truncation or extension to the final output type. 9819 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9820 // Constant inputs need to be replaced with the to-be-promoted nodes that 9821 // use them because they might have users outside of the cluster of 9822 // promoted nodes. 9823 if (isa<ConstantSDNode>(Inputs[i])) 9824 continue; 9825 9826 SDValue InSrc = Inputs[i].getOperand(0); 9827 if (Inputs[i].getValueType() == N->getValueType(0)) 9828 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 9829 else if (N->getOpcode() == ISD::SIGN_EXTEND) 9830 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9831 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 9832 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9833 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9834 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 9835 else 9836 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9837 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 9838 } 9839 9840 // Replace all operations (these are all the same, but have a different 9841 // (promoted) return type). DAG.getNode will validate that the types of 9842 // a binary operator match, so go through the list in reverse so that 9843 // we've likely promoted both operands first. 9844 while (!PromOps.empty()) { 9845 SDValue PromOp = PromOps.back(); 9846 PromOps.pop_back(); 9847 9848 unsigned C; 9849 switch (PromOp.getOpcode()) { 9850 default: C = 0; break; 9851 case ISD::SELECT: C = 1; break; 9852 case ISD::SELECT_CC: C = 2; break; 9853 } 9854 9855 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9856 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 9857 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9858 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 9859 // The to-be-promoted operands of this node have not yet been 9860 // promoted (this should be rare because we're going through the 9861 // list backward, but if one of the operands has several users in 9862 // this cluster of to-be-promoted nodes, it is possible). 9863 PromOps.insert(PromOps.begin(), PromOp); 9864 continue; 9865 } 9866 9867 // For SELECT and SELECT_CC nodes, we do a similar check for any 9868 // to-be-promoted comparison inputs. 9869 if (PromOp.getOpcode() == ISD::SELECT || 9870 PromOp.getOpcode() == ISD::SELECT_CC) { 9871 if ((SelectTruncOp[0].count(PromOp.getNode()) && 9872 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 9873 (SelectTruncOp[1].count(PromOp.getNode()) && 9874 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 9875 PromOps.insert(PromOps.begin(), PromOp); 9876 continue; 9877 } 9878 } 9879 9880 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9881 PromOp.getNode()->op_end()); 9882 9883 // If this node has constant inputs, then they'll need to be promoted here. 9884 for (unsigned i = 0; i < 2; ++i) { 9885 if (!isa<ConstantSDNode>(Ops[C+i])) 9886 continue; 9887 if (Ops[C+i].getValueType() == N->getValueType(0)) 9888 continue; 9889 9890 if (N->getOpcode() == ISD::SIGN_EXTEND) 9891 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9892 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9893 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9894 else 9895 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9896 } 9897 9898 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 9899 // truncate them again to the original value type. 9900 if (PromOp.getOpcode() == ISD::SELECT || 9901 PromOp.getOpcode() == ISD::SELECT_CC) { 9902 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 9903 if (SI0 != SelectTruncOp[0].end()) 9904 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 9905 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 9906 if (SI1 != SelectTruncOp[1].end()) 9907 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 9908 } 9909 9910 DAG.ReplaceAllUsesOfValueWith(PromOp, 9911 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 9912 } 9913 9914 // Now we're left with the initial extension itself. 9915 if (!ReallyNeedsExt) 9916 return N->getOperand(0); 9917 9918 // To zero extend, just mask off everything except for the first bit (in the 9919 // i1 case). 9920 if (N->getOpcode() == ISD::ZERO_EXTEND) 9921 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 9922 DAG.getConstant(APInt::getLowBitsSet( 9923 N->getValueSizeInBits(0), PromBits), 9924 dl, N->getValueType(0))); 9925 9926 assert(N->getOpcode() == ISD::SIGN_EXTEND && 9927 "Invalid extension type"); 9928 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 9929 SDValue ShiftCst = 9930 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 9931 return DAG.getNode(ISD::SRA, dl, N->getValueType(0), 9932 DAG.getNode(ISD::SHL, dl, N->getValueType(0), 9933 N->getOperand(0), ShiftCst), ShiftCst); 9934 } 9935 9936 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 9937 DAGCombinerInfo &DCI) const { 9938 assert((N->getOpcode() == ISD::SINT_TO_FP || 9939 N->getOpcode() == ISD::UINT_TO_FP) && 9940 "Need an int -> FP conversion node here"); 9941 9942 if (!Subtarget.has64BitSupport()) 9943 return SDValue(); 9944 9945 SelectionDAG &DAG = DCI.DAG; 9946 SDLoc dl(N); 9947 SDValue Op(N, 0); 9948 9949 // Don't handle ppc_fp128 here or i1 conversions. 9950 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 9951 return SDValue(); 9952 if (Op.getOperand(0).getValueType() == MVT::i1) 9953 return SDValue(); 9954 9955 // For i32 intermediate values, unfortunately, the conversion functions 9956 // leave the upper 32 bits of the value are undefined. Within the set of 9957 // scalar instructions, we have no method for zero- or sign-extending the 9958 // value. Thus, we cannot handle i32 intermediate values here. 9959 if (Op.getOperand(0).getValueType() == MVT::i32) 9960 return SDValue(); 9961 9962 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 9963 "UINT_TO_FP is supported only with FPCVT"); 9964 9965 // If we have FCFIDS, then use it when converting to single-precision. 9966 // Otherwise, convert to double-precision and then round. 9967 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 9968 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 9969 : PPCISD::FCFIDS) 9970 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 9971 : PPCISD::FCFID); 9972 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 9973 ? MVT::f32 9974 : MVT::f64; 9975 9976 // If we're converting from a float, to an int, and back to a float again, 9977 // then we don't need the store/load pair at all. 9978 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 9979 Subtarget.hasFPCVT()) || 9980 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 9981 SDValue Src = Op.getOperand(0).getOperand(0); 9982 if (Src.getValueType() == MVT::f32) { 9983 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 9984 DCI.AddToWorklist(Src.getNode()); 9985 } 9986 9987 unsigned FCTOp = 9988 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 9989 PPCISD::FCTIDUZ; 9990 9991 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 9992 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 9993 9994 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 9995 FP = DAG.getNode(ISD::FP_ROUND, dl, 9996 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 9997 DCI.AddToWorklist(FP.getNode()); 9998 } 9999 10000 return FP; 10001 } 10002 10003 return SDValue(); 10004 } 10005 10006 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 10007 // builtins) into loads with swaps. 10008 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 10009 DAGCombinerInfo &DCI) const { 10010 SelectionDAG &DAG = DCI.DAG; 10011 SDLoc dl(N); 10012 SDValue Chain; 10013 SDValue Base; 10014 MachineMemOperand *MMO; 10015 10016 switch (N->getOpcode()) { 10017 default: 10018 llvm_unreachable("Unexpected opcode for little endian VSX load"); 10019 case ISD::LOAD: { 10020 LoadSDNode *LD = cast<LoadSDNode>(N); 10021 Chain = LD->getChain(); 10022 Base = LD->getBasePtr(); 10023 MMO = LD->getMemOperand(); 10024 // If the MMO suggests this isn't a load of a full vector, leave 10025 // things alone. For a built-in, we have to make the change for 10026 // correctness, so if there is a size problem that will be a bug. 10027 if (MMO->getSize() < 16) 10028 return SDValue(); 10029 break; 10030 } 10031 case ISD::INTRINSIC_W_CHAIN: { 10032 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10033 Chain = Intrin->getChain(); 10034 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 10035 // us what we want. Get operand 2 instead. 10036 Base = Intrin->getOperand(2); 10037 MMO = Intrin->getMemOperand(); 10038 break; 10039 } 10040 } 10041 10042 MVT VecTy = N->getValueType(0).getSimpleVT(); 10043 SDValue LoadOps[] = { Chain, Base }; 10044 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 10045 DAG.getVTList(VecTy, MVT::Other), 10046 LoadOps, VecTy, MMO); 10047 DCI.AddToWorklist(Load.getNode()); 10048 Chain = Load.getValue(1); 10049 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10050 DAG.getVTList(VecTy, MVT::Other), Chain, Load); 10051 DCI.AddToWorklist(Swap.getNode()); 10052 return Swap; 10053 } 10054 10055 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 10056 // builtins) into stores with swaps. 10057 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 10058 DAGCombinerInfo &DCI) const { 10059 SelectionDAG &DAG = DCI.DAG; 10060 SDLoc dl(N); 10061 SDValue Chain; 10062 SDValue Base; 10063 unsigned SrcOpnd; 10064 MachineMemOperand *MMO; 10065 10066 switch (N->getOpcode()) { 10067 default: 10068 llvm_unreachable("Unexpected opcode for little endian VSX store"); 10069 case ISD::STORE: { 10070 StoreSDNode *ST = cast<StoreSDNode>(N); 10071 Chain = ST->getChain(); 10072 Base = ST->getBasePtr(); 10073 MMO = ST->getMemOperand(); 10074 SrcOpnd = 1; 10075 // If the MMO suggests this isn't a store of a full vector, leave 10076 // things alone. For a built-in, we have to make the change for 10077 // correctness, so if there is a size problem that will be a bug. 10078 if (MMO->getSize() < 16) 10079 return SDValue(); 10080 break; 10081 } 10082 case ISD::INTRINSIC_VOID: { 10083 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10084 Chain = Intrin->getChain(); 10085 // Intrin->getBasePtr() oddly does not get what we want. 10086 Base = Intrin->getOperand(3); 10087 MMO = Intrin->getMemOperand(); 10088 SrcOpnd = 2; 10089 break; 10090 } 10091 } 10092 10093 SDValue Src = N->getOperand(SrcOpnd); 10094 MVT VecTy = Src.getValueType().getSimpleVT(); 10095 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10096 DAG.getVTList(VecTy, MVT::Other), Chain, Src); 10097 DCI.AddToWorklist(Swap.getNode()); 10098 Chain = Swap.getValue(1); 10099 SDValue StoreOps[] = { Chain, Swap, Base }; 10100 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 10101 DAG.getVTList(MVT::Other), 10102 StoreOps, VecTy, MMO); 10103 DCI.AddToWorklist(Store.getNode()); 10104 return Store; 10105 } 10106 10107 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 10108 DAGCombinerInfo &DCI) const { 10109 SelectionDAG &DAG = DCI.DAG; 10110 SDLoc dl(N); 10111 switch (N->getOpcode()) { 10112 default: break; 10113 case PPCISD::SHL: 10114 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10115 if (C->isNullValue()) // 0 << V -> 0. 10116 return N->getOperand(0); 10117 } 10118 break; 10119 case PPCISD::SRL: 10120 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10121 if (C->isNullValue()) // 0 >>u V -> 0. 10122 return N->getOperand(0); 10123 } 10124 break; 10125 case PPCISD::SRA: 10126 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10127 if (C->isNullValue() || // 0 >>s V -> 0. 10128 C->isAllOnesValue()) // -1 >>s V -> -1. 10129 return N->getOperand(0); 10130 } 10131 break; 10132 case ISD::SIGN_EXTEND: 10133 case ISD::ZERO_EXTEND: 10134 case ISD::ANY_EXTEND: 10135 return DAGCombineExtBoolTrunc(N, DCI); 10136 case ISD::TRUNCATE: 10137 case ISD::SETCC: 10138 case ISD::SELECT_CC: 10139 return DAGCombineTruncBoolExt(N, DCI); 10140 case ISD::SINT_TO_FP: 10141 case ISD::UINT_TO_FP: 10142 return combineFPToIntToFP(N, DCI); 10143 case ISD::STORE: { 10144 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 10145 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 10146 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 10147 N->getOperand(1).getValueType() == MVT::i32 && 10148 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 10149 SDValue Val = N->getOperand(1).getOperand(0); 10150 if (Val.getValueType() == MVT::f32) { 10151 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 10152 DCI.AddToWorklist(Val.getNode()); 10153 } 10154 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 10155 DCI.AddToWorklist(Val.getNode()); 10156 10157 SDValue Ops[] = { 10158 N->getOperand(0), Val, N->getOperand(2), 10159 DAG.getValueType(N->getOperand(1).getValueType()) 10160 }; 10161 10162 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 10163 DAG.getVTList(MVT::Other), Ops, 10164 cast<StoreSDNode>(N)->getMemoryVT(), 10165 cast<StoreSDNode>(N)->getMemOperand()); 10166 DCI.AddToWorklist(Val.getNode()); 10167 return Val; 10168 } 10169 10170 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 10171 if (cast<StoreSDNode>(N)->isUnindexed() && 10172 N->getOperand(1).getOpcode() == ISD::BSWAP && 10173 N->getOperand(1).getNode()->hasOneUse() && 10174 (N->getOperand(1).getValueType() == MVT::i32 || 10175 N->getOperand(1).getValueType() == MVT::i16 || 10176 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10177 N->getOperand(1).getValueType() == MVT::i64))) { 10178 SDValue BSwapOp = N->getOperand(1).getOperand(0); 10179 // Do an any-extend to 32-bits if this is a half-word input. 10180 if (BSwapOp.getValueType() == MVT::i16) 10181 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 10182 10183 SDValue Ops[] = { 10184 N->getOperand(0), BSwapOp, N->getOperand(2), 10185 DAG.getValueType(N->getOperand(1).getValueType()) 10186 }; 10187 return 10188 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 10189 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 10190 cast<StoreSDNode>(N)->getMemOperand()); 10191 } 10192 10193 // For little endian, VSX stores require generating xxswapd/lxvd2x. 10194 EVT VT = N->getOperand(1).getValueType(); 10195 if (VT.isSimple()) { 10196 MVT StoreVT = VT.getSimpleVT(); 10197 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10198 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 10199 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 10200 return expandVSXStoreForLE(N, DCI); 10201 } 10202 break; 10203 } 10204 case ISD::LOAD: { 10205 LoadSDNode *LD = cast<LoadSDNode>(N); 10206 EVT VT = LD->getValueType(0); 10207 10208 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10209 if (VT.isSimple()) { 10210 MVT LoadVT = VT.getSimpleVT(); 10211 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10212 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 10213 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 10214 return expandVSXLoadForLE(N, DCI); 10215 } 10216 10217 EVT MemVT = LD->getMemoryVT(); 10218 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 10219 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 10220 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 10221 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 10222 if (LD->isUnindexed() && VT.isVector() && 10223 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 10224 // P8 and later hardware should just use LOAD. 10225 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 10226 VT == MVT::v4i32 || VT == MVT::v4f32)) || 10227 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 10228 LD->getAlignment() >= ScalarABIAlignment)) && 10229 LD->getAlignment() < ABIAlignment) { 10230 // This is a type-legal unaligned Altivec or QPX load. 10231 SDValue Chain = LD->getChain(); 10232 SDValue Ptr = LD->getBasePtr(); 10233 bool isLittleEndian = Subtarget.isLittleEndian(); 10234 10235 // This implements the loading of unaligned vectors as described in 10236 // the venerable Apple Velocity Engine overview. Specifically: 10237 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 10238 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 10239 // 10240 // The general idea is to expand a sequence of one or more unaligned 10241 // loads into an alignment-based permutation-control instruction (lvsl 10242 // or lvsr), a series of regular vector loads (which always truncate 10243 // their input address to an aligned address), and a series of 10244 // permutations. The results of these permutations are the requested 10245 // loaded values. The trick is that the last "extra" load is not taken 10246 // from the address you might suspect (sizeof(vector) bytes after the 10247 // last requested load), but rather sizeof(vector) - 1 bytes after the 10248 // last requested vector. The point of this is to avoid a page fault if 10249 // the base address happened to be aligned. This works because if the 10250 // base address is aligned, then adding less than a full vector length 10251 // will cause the last vector in the sequence to be (re)loaded. 10252 // Otherwise, the next vector will be fetched as you might suspect was 10253 // necessary. 10254 10255 // We might be able to reuse the permutation generation from 10256 // a different base address offset from this one by an aligned amount. 10257 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 10258 // optimization later. 10259 Intrinsic::ID Intr, IntrLD, IntrPerm; 10260 MVT PermCntlTy, PermTy, LDTy; 10261 if (Subtarget.hasAltivec()) { 10262 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 10263 Intrinsic::ppc_altivec_lvsl; 10264 IntrLD = Intrinsic::ppc_altivec_lvx; 10265 IntrPerm = Intrinsic::ppc_altivec_vperm; 10266 PermCntlTy = MVT::v16i8; 10267 PermTy = MVT::v4i32; 10268 LDTy = MVT::v4i32; 10269 } else { 10270 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 10271 Intrinsic::ppc_qpx_qvlpcls; 10272 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 10273 Intrinsic::ppc_qpx_qvlfs; 10274 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 10275 PermCntlTy = MVT::v4f64; 10276 PermTy = MVT::v4f64; 10277 LDTy = MemVT.getSimpleVT(); 10278 } 10279 10280 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 10281 10282 // Create the new MMO for the new base load. It is like the original MMO, 10283 // but represents an area in memory almost twice the vector size centered 10284 // on the original address. If the address is unaligned, we might start 10285 // reading up to (sizeof(vector)-1) bytes below the address of the 10286 // original unaligned load. 10287 MachineFunction &MF = DAG.getMachineFunction(); 10288 MachineMemOperand *BaseMMO = 10289 MF.getMachineMemOperand(LD->getMemOperand(), -MemVT.getStoreSize()+1, 10290 2*MemVT.getStoreSize()-1); 10291 10292 // Create the new base load. 10293 SDValue LDXIntID = 10294 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 10295 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 10296 SDValue BaseLoad = 10297 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10298 DAG.getVTList(PermTy, MVT::Other), 10299 BaseLoadOps, LDTy, BaseMMO); 10300 10301 // Note that the value of IncOffset (which is provided to the next 10302 // load's pointer info offset value, and thus used to calculate the 10303 // alignment), and the value of IncValue (which is actually used to 10304 // increment the pointer value) are different! This is because we 10305 // require the next load to appear to be aligned, even though it 10306 // is actually offset from the base pointer by a lesser amount. 10307 int IncOffset = VT.getSizeInBits() / 8; 10308 int IncValue = IncOffset; 10309 10310 // Walk (both up and down) the chain looking for another load at the real 10311 // (aligned) offset (the alignment of the other load does not matter in 10312 // this case). If found, then do not use the offset reduction trick, as 10313 // that will prevent the loads from being later combined (as they would 10314 // otherwise be duplicates). 10315 if (!findConsecutiveLoad(LD, DAG)) 10316 --IncValue; 10317 10318 SDValue Increment = 10319 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 10320 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 10321 10322 MachineMemOperand *ExtraMMO = 10323 MF.getMachineMemOperand(LD->getMemOperand(), 10324 1, 2*MemVT.getStoreSize()-1); 10325 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 10326 SDValue ExtraLoad = 10327 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10328 DAG.getVTList(PermTy, MVT::Other), 10329 ExtraLoadOps, LDTy, ExtraMMO); 10330 10331 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 10332 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 10333 10334 // Because vperm has a big-endian bias, we must reverse the order 10335 // of the input vectors and complement the permute control vector 10336 // when generating little endian code. We have already handled the 10337 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 10338 // and ExtraLoad here. 10339 SDValue Perm; 10340 if (isLittleEndian) 10341 Perm = BuildIntrinsicOp(IntrPerm, 10342 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 10343 else 10344 Perm = BuildIntrinsicOp(IntrPerm, 10345 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 10346 10347 if (VT != PermTy) 10348 Perm = Subtarget.hasAltivec() ? 10349 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 10350 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 10351 DAG.getTargetConstant(1, dl, MVT::i64)); 10352 // second argument is 1 because this rounding 10353 // is always exact. 10354 10355 // The output of the permutation is our loaded result, the TokenFactor is 10356 // our new chain. 10357 DCI.CombineTo(N, Perm, TF); 10358 return SDValue(N, 0); 10359 } 10360 } 10361 break; 10362 case ISD::INTRINSIC_WO_CHAIN: { 10363 bool isLittleEndian = Subtarget.isLittleEndian(); 10364 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10365 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 10366 : Intrinsic::ppc_altivec_lvsl); 10367 if ((IID == Intr || 10368 IID == Intrinsic::ppc_qpx_qvlpcld || 10369 IID == Intrinsic::ppc_qpx_qvlpcls) && 10370 N->getOperand(1)->getOpcode() == ISD::ADD) { 10371 SDValue Add = N->getOperand(1); 10372 10373 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 10374 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 10375 10376 if (DAG.MaskedValueIsZero( 10377 Add->getOperand(1), 10378 APInt::getAllOnesValue(Bits /* alignment */) 10379 .zext( 10380 Add.getValueType().getScalarType().getSizeInBits()))) { 10381 SDNode *BasePtr = Add->getOperand(0).getNode(); 10382 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10383 UE = BasePtr->use_end(); 10384 UI != UE; ++UI) { 10385 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10386 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 10387 // We've found another LVSL/LVSR, and this address is an aligned 10388 // multiple of that one. The results will be the same, so use the 10389 // one we've just found instead. 10390 10391 return SDValue(*UI, 0); 10392 } 10393 } 10394 } 10395 10396 if (isa<ConstantSDNode>(Add->getOperand(1))) { 10397 SDNode *BasePtr = Add->getOperand(0).getNode(); 10398 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10399 UE = BasePtr->use_end(); UI != UE; ++UI) { 10400 if (UI->getOpcode() == ISD::ADD && 10401 isa<ConstantSDNode>(UI->getOperand(1)) && 10402 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 10403 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 10404 (1ULL << Bits) == 0) { 10405 SDNode *OtherAdd = *UI; 10406 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 10407 VE = OtherAdd->use_end(); VI != VE; ++VI) { 10408 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10409 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 10410 return SDValue(*VI, 0); 10411 } 10412 } 10413 } 10414 } 10415 } 10416 } 10417 } 10418 10419 break; 10420 case ISD::INTRINSIC_W_CHAIN: { 10421 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10422 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10423 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10424 default: 10425 break; 10426 case Intrinsic::ppc_vsx_lxvw4x: 10427 case Intrinsic::ppc_vsx_lxvd2x: 10428 return expandVSXLoadForLE(N, DCI); 10429 } 10430 } 10431 break; 10432 } 10433 case ISD::INTRINSIC_VOID: { 10434 // For little endian, VSX stores require generating xxswapd/stxvd2x. 10435 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10436 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10437 default: 10438 break; 10439 case Intrinsic::ppc_vsx_stxvw4x: 10440 case Intrinsic::ppc_vsx_stxvd2x: 10441 return expandVSXStoreForLE(N, DCI); 10442 } 10443 } 10444 break; 10445 } 10446 case ISD::BSWAP: 10447 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 10448 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 10449 N->getOperand(0).hasOneUse() && 10450 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 10451 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10452 N->getValueType(0) == MVT::i64))) { 10453 SDValue Load = N->getOperand(0); 10454 LoadSDNode *LD = cast<LoadSDNode>(Load); 10455 // Create the byte-swapping load. 10456 SDValue Ops[] = { 10457 LD->getChain(), // Chain 10458 LD->getBasePtr(), // Ptr 10459 DAG.getValueType(N->getValueType(0)) // VT 10460 }; 10461 SDValue BSLoad = 10462 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 10463 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 10464 MVT::i64 : MVT::i32, MVT::Other), 10465 Ops, LD->getMemoryVT(), LD->getMemOperand()); 10466 10467 // If this is an i16 load, insert the truncate. 10468 SDValue ResVal = BSLoad; 10469 if (N->getValueType(0) == MVT::i16) 10470 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 10471 10472 // First, combine the bswap away. This makes the value produced by the 10473 // load dead. 10474 DCI.CombineTo(N, ResVal); 10475 10476 // Next, combine the load away, we give it a bogus result value but a real 10477 // chain result. The result value is dead because the bswap is dead. 10478 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 10479 10480 // Return N so it doesn't get rechecked! 10481 return SDValue(N, 0); 10482 } 10483 10484 break; 10485 case PPCISD::VCMP: { 10486 // If a VCMPo node already exists with exactly the same operands as this 10487 // node, use its result instead of this node (VCMPo computes both a CR6 and 10488 // a normal output). 10489 // 10490 if (!N->getOperand(0).hasOneUse() && 10491 !N->getOperand(1).hasOneUse() && 10492 !N->getOperand(2).hasOneUse()) { 10493 10494 // Scan all of the users of the LHS, looking for VCMPo's that match. 10495 SDNode *VCMPoNode = nullptr; 10496 10497 SDNode *LHSN = N->getOperand(0).getNode(); 10498 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 10499 UI != E; ++UI) 10500 if (UI->getOpcode() == PPCISD::VCMPo && 10501 UI->getOperand(1) == N->getOperand(1) && 10502 UI->getOperand(2) == N->getOperand(2) && 10503 UI->getOperand(0) == N->getOperand(0)) { 10504 VCMPoNode = *UI; 10505 break; 10506 } 10507 10508 // If there is no VCMPo node, or if the flag value has a single use, don't 10509 // transform this. 10510 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 10511 break; 10512 10513 // Look at the (necessarily single) use of the flag value. If it has a 10514 // chain, this transformation is more complex. Note that multiple things 10515 // could use the value result, which we should ignore. 10516 SDNode *FlagUser = nullptr; 10517 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 10518 FlagUser == nullptr; ++UI) { 10519 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 10520 SDNode *User = *UI; 10521 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 10522 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 10523 FlagUser = User; 10524 break; 10525 } 10526 } 10527 } 10528 10529 // If the user is a MFOCRF instruction, we know this is safe. 10530 // Otherwise we give up for right now. 10531 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 10532 return SDValue(VCMPoNode, 0); 10533 } 10534 break; 10535 } 10536 case ISD::BRCOND: { 10537 SDValue Cond = N->getOperand(1); 10538 SDValue Target = N->getOperand(2); 10539 10540 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10541 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 10542 Intrinsic::ppc_is_decremented_ctr_nonzero) { 10543 10544 // We now need to make the intrinsic dead (it cannot be instruction 10545 // selected). 10546 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 10547 assert(Cond.getNode()->hasOneUse() && 10548 "Counter decrement has more than one use"); 10549 10550 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 10551 N->getOperand(0), Target); 10552 } 10553 } 10554 break; 10555 case ISD::BR_CC: { 10556 // If this is a branch on an altivec predicate comparison, lower this so 10557 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 10558 // lowering is done pre-legalize, because the legalizer lowers the predicate 10559 // compare down to code that is difficult to reassemble. 10560 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 10561 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 10562 10563 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 10564 // value. If so, pass-through the AND to get to the intrinsic. 10565 if (LHS.getOpcode() == ISD::AND && 10566 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 10567 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 10568 Intrinsic::ppc_is_decremented_ctr_nonzero && 10569 isa<ConstantSDNode>(LHS.getOperand(1)) && 10570 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 10571 isZero()) 10572 LHS = LHS.getOperand(0); 10573 10574 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10575 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 10576 Intrinsic::ppc_is_decremented_ctr_nonzero && 10577 isa<ConstantSDNode>(RHS)) { 10578 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 10579 "Counter decrement comparison is not EQ or NE"); 10580 10581 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10582 bool isBDNZ = (CC == ISD::SETEQ && Val) || 10583 (CC == ISD::SETNE && !Val); 10584 10585 // We now need to make the intrinsic dead (it cannot be instruction 10586 // selected). 10587 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 10588 assert(LHS.getNode()->hasOneUse() && 10589 "Counter decrement has more than one use"); 10590 10591 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 10592 N->getOperand(0), N->getOperand(4)); 10593 } 10594 10595 int CompareOpc; 10596 bool isDot; 10597 10598 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10599 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 10600 getAltivecCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 10601 assert(isDot && "Can't compare against a vector result!"); 10602 10603 // If this is a comparison against something other than 0/1, then we know 10604 // that the condition is never/always true. 10605 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10606 if (Val != 0 && Val != 1) { 10607 if (CC == ISD::SETEQ) // Cond never true, remove branch. 10608 return N->getOperand(0); 10609 // Always !=, turn it into an unconditional branch. 10610 return DAG.getNode(ISD::BR, dl, MVT::Other, 10611 N->getOperand(0), N->getOperand(4)); 10612 } 10613 10614 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 10615 10616 // Create the PPCISD altivec 'dot' comparison node. 10617 SDValue Ops[] = { 10618 LHS.getOperand(2), // LHS of compare 10619 LHS.getOperand(3), // RHS of compare 10620 DAG.getConstant(CompareOpc, dl, MVT::i32) 10621 }; 10622 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 10623 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 10624 10625 // Unpack the result based on how the target uses it. 10626 PPC::Predicate CompOpc; 10627 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 10628 default: // Can't happen, don't crash on invalid number though. 10629 case 0: // Branch on the value of the EQ bit of CR6. 10630 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 10631 break; 10632 case 1: // Branch on the inverted value of the EQ bit of CR6. 10633 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 10634 break; 10635 case 2: // Branch on the value of the LT bit of CR6. 10636 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 10637 break; 10638 case 3: // Branch on the inverted value of the LT bit of CR6. 10639 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 10640 break; 10641 } 10642 10643 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 10644 DAG.getConstant(CompOpc, dl, MVT::i32), 10645 DAG.getRegister(PPC::CR6, MVT::i32), 10646 N->getOperand(4), CompNode.getValue(1)); 10647 } 10648 break; 10649 } 10650 } 10651 10652 return SDValue(); 10653 } 10654 10655 SDValue 10656 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 10657 SelectionDAG &DAG, 10658 std::vector<SDNode *> *Created) const { 10659 // fold (sdiv X, pow2) 10660 EVT VT = N->getValueType(0); 10661 if (VT == MVT::i64 && !Subtarget.isPPC64()) 10662 return SDValue(); 10663 if ((VT != MVT::i32 && VT != MVT::i64) || 10664 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 10665 return SDValue(); 10666 10667 SDLoc DL(N); 10668 SDValue N0 = N->getOperand(0); 10669 10670 bool IsNegPow2 = (-Divisor).isPowerOf2(); 10671 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 10672 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 10673 10674 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 10675 if (Created) 10676 Created->push_back(Op.getNode()); 10677 10678 if (IsNegPow2) { 10679 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 10680 if (Created) 10681 Created->push_back(Op.getNode()); 10682 } 10683 10684 return Op; 10685 } 10686 10687 //===----------------------------------------------------------------------===// 10688 // Inline Assembly Support 10689 //===----------------------------------------------------------------------===// 10690 10691 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 10692 APInt &KnownZero, 10693 APInt &KnownOne, 10694 const SelectionDAG &DAG, 10695 unsigned Depth) const { 10696 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 10697 switch (Op.getOpcode()) { 10698 default: break; 10699 case PPCISD::LBRX: { 10700 // lhbrx is known to have the top bits cleared out. 10701 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 10702 KnownZero = 0xFFFF0000; 10703 break; 10704 } 10705 case ISD::INTRINSIC_WO_CHAIN: { 10706 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 10707 default: break; 10708 case Intrinsic::ppc_altivec_vcmpbfp_p: 10709 case Intrinsic::ppc_altivec_vcmpeqfp_p: 10710 case Intrinsic::ppc_altivec_vcmpequb_p: 10711 case Intrinsic::ppc_altivec_vcmpequh_p: 10712 case Intrinsic::ppc_altivec_vcmpequw_p: 10713 case Intrinsic::ppc_altivec_vcmpequd_p: 10714 case Intrinsic::ppc_altivec_vcmpgefp_p: 10715 case Intrinsic::ppc_altivec_vcmpgtfp_p: 10716 case Intrinsic::ppc_altivec_vcmpgtsb_p: 10717 case Intrinsic::ppc_altivec_vcmpgtsh_p: 10718 case Intrinsic::ppc_altivec_vcmpgtsw_p: 10719 case Intrinsic::ppc_altivec_vcmpgtsd_p: 10720 case Intrinsic::ppc_altivec_vcmpgtub_p: 10721 case Intrinsic::ppc_altivec_vcmpgtuh_p: 10722 case Intrinsic::ppc_altivec_vcmpgtuw_p: 10723 case Intrinsic::ppc_altivec_vcmpgtud_p: 10724 KnownZero = ~1U; // All bits but the low one are known to be zero. 10725 break; 10726 } 10727 } 10728 } 10729 } 10730 10731 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 10732 switch (Subtarget.getDarwinDirective()) { 10733 default: break; 10734 case PPC::DIR_970: 10735 case PPC::DIR_PWR4: 10736 case PPC::DIR_PWR5: 10737 case PPC::DIR_PWR5X: 10738 case PPC::DIR_PWR6: 10739 case PPC::DIR_PWR6X: 10740 case PPC::DIR_PWR7: 10741 case PPC::DIR_PWR8: { 10742 if (!ML) 10743 break; 10744 10745 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 10746 10747 // For small loops (between 5 and 8 instructions), align to a 32-byte 10748 // boundary so that the entire loop fits in one instruction-cache line. 10749 uint64_t LoopSize = 0; 10750 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 10751 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) 10752 LoopSize += TII->GetInstSizeInBytes(J); 10753 10754 if (LoopSize > 16 && LoopSize <= 32) 10755 return 5; 10756 10757 break; 10758 } 10759 } 10760 10761 return TargetLowering::getPrefLoopAlignment(ML); 10762 } 10763 10764 /// getConstraintType - Given a constraint, return the type of 10765 /// constraint it is for this target. 10766 PPCTargetLowering::ConstraintType 10767 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 10768 if (Constraint.size() == 1) { 10769 switch (Constraint[0]) { 10770 default: break; 10771 case 'b': 10772 case 'r': 10773 case 'f': 10774 case 'v': 10775 case 'y': 10776 return C_RegisterClass; 10777 case 'Z': 10778 // FIXME: While Z does indicate a memory constraint, it specifically 10779 // indicates an r+r address (used in conjunction with the 'y' modifier 10780 // in the replacement string). Currently, we're forcing the base 10781 // register to be r0 in the asm printer (which is interpreted as zero) 10782 // and forming the complete address in the second register. This is 10783 // suboptimal. 10784 return C_Memory; 10785 } 10786 } else if (Constraint == "wc") { // individual CR bits. 10787 return C_RegisterClass; 10788 } else if (Constraint == "wa" || Constraint == "wd" || 10789 Constraint == "wf" || Constraint == "ws") { 10790 return C_RegisterClass; // VSX registers. 10791 } 10792 return TargetLowering::getConstraintType(Constraint); 10793 } 10794 10795 /// Examine constraint type and operand type and determine a weight value. 10796 /// This object must already have been set up with the operand type 10797 /// and the current alternative constraint selected. 10798 TargetLowering::ConstraintWeight 10799 PPCTargetLowering::getSingleConstraintMatchWeight( 10800 AsmOperandInfo &info, const char *constraint) const { 10801 ConstraintWeight weight = CW_Invalid; 10802 Value *CallOperandVal = info.CallOperandVal; 10803 // If we don't have a value, we can't do a match, 10804 // but allow it at the lowest weight. 10805 if (!CallOperandVal) 10806 return CW_Default; 10807 Type *type = CallOperandVal->getType(); 10808 10809 // Look at the constraint type. 10810 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 10811 return CW_Register; // an individual CR bit. 10812 else if ((StringRef(constraint) == "wa" || 10813 StringRef(constraint) == "wd" || 10814 StringRef(constraint) == "wf") && 10815 type->isVectorTy()) 10816 return CW_Register; 10817 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 10818 return CW_Register; 10819 10820 switch (*constraint) { 10821 default: 10822 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 10823 break; 10824 case 'b': 10825 if (type->isIntegerTy()) 10826 weight = CW_Register; 10827 break; 10828 case 'f': 10829 if (type->isFloatTy()) 10830 weight = CW_Register; 10831 break; 10832 case 'd': 10833 if (type->isDoubleTy()) 10834 weight = CW_Register; 10835 break; 10836 case 'v': 10837 if (type->isVectorTy()) 10838 weight = CW_Register; 10839 break; 10840 case 'y': 10841 weight = CW_Register; 10842 break; 10843 case 'Z': 10844 weight = CW_Memory; 10845 break; 10846 } 10847 return weight; 10848 } 10849 10850 std::pair<unsigned, const TargetRegisterClass *> 10851 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 10852 StringRef Constraint, 10853 MVT VT) const { 10854 if (Constraint.size() == 1) { 10855 // GCC RS6000 Constraint Letters 10856 switch (Constraint[0]) { 10857 case 'b': // R1-R31 10858 if (VT == MVT::i64 && Subtarget.isPPC64()) 10859 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 10860 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 10861 case 'r': // R0-R31 10862 if (VT == MVT::i64 && Subtarget.isPPC64()) 10863 return std::make_pair(0U, &PPC::G8RCRegClass); 10864 return std::make_pair(0U, &PPC::GPRCRegClass); 10865 case 'f': 10866 if (VT == MVT::f32 || VT == MVT::i32) 10867 return std::make_pair(0U, &PPC::F4RCRegClass); 10868 if (VT == MVT::f64 || VT == MVT::i64) 10869 return std::make_pair(0U, &PPC::F8RCRegClass); 10870 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 10871 return std::make_pair(0U, &PPC::QFRCRegClass); 10872 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 10873 return std::make_pair(0U, &PPC::QSRCRegClass); 10874 break; 10875 case 'v': 10876 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 10877 return std::make_pair(0U, &PPC::QFRCRegClass); 10878 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 10879 return std::make_pair(0U, &PPC::QSRCRegClass); 10880 return std::make_pair(0U, &PPC::VRRCRegClass); 10881 case 'y': // crrc 10882 return std::make_pair(0U, &PPC::CRRCRegClass); 10883 } 10884 } else if (Constraint == "wc") { // an individual CR bit. 10885 return std::make_pair(0U, &PPC::CRBITRCRegClass); 10886 } else if (Constraint == "wa" || Constraint == "wd" || 10887 Constraint == "wf") { 10888 return std::make_pair(0U, &PPC::VSRCRegClass); 10889 } else if (Constraint == "ws") { 10890 if (VT == MVT::f32) 10891 return std::make_pair(0U, &PPC::VSSRCRegClass); 10892 else 10893 return std::make_pair(0U, &PPC::VSFRCRegClass); 10894 } 10895 10896 std::pair<unsigned, const TargetRegisterClass *> R = 10897 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 10898 10899 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 10900 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 10901 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 10902 // register. 10903 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 10904 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 10905 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 10906 PPC::GPRCRegClass.contains(R.first)) 10907 return std::make_pair(TRI->getMatchingSuperReg(R.first, 10908 PPC::sub_32, &PPC::G8RCRegClass), 10909 &PPC::G8RCRegClass); 10910 10911 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 10912 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 10913 R.first = PPC::CR0; 10914 R.second = &PPC::CRRCRegClass; 10915 } 10916 10917 return R; 10918 } 10919 10920 10921 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 10922 /// vector. If it is invalid, don't add anything to Ops. 10923 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 10924 std::string &Constraint, 10925 std::vector<SDValue>&Ops, 10926 SelectionDAG &DAG) const { 10927 SDValue Result; 10928 10929 // Only support length 1 constraints. 10930 if (Constraint.length() > 1) return; 10931 10932 char Letter = Constraint[0]; 10933 switch (Letter) { 10934 default: break; 10935 case 'I': 10936 case 'J': 10937 case 'K': 10938 case 'L': 10939 case 'M': 10940 case 'N': 10941 case 'O': 10942 case 'P': { 10943 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 10944 if (!CST) return; // Must be an immediate to match. 10945 SDLoc dl(Op); 10946 int64_t Value = CST->getSExtValue(); 10947 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 10948 // numbers are printed as such. 10949 switch (Letter) { 10950 default: llvm_unreachable("Unknown constraint letter!"); 10951 case 'I': // "I" is a signed 16-bit constant. 10952 if (isInt<16>(Value)) 10953 Result = DAG.getTargetConstant(Value, dl, TCVT); 10954 break; 10955 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 10956 if (isShiftedUInt<16, 16>(Value)) 10957 Result = DAG.getTargetConstant(Value, dl, TCVT); 10958 break; 10959 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 10960 if (isShiftedInt<16, 16>(Value)) 10961 Result = DAG.getTargetConstant(Value, dl, TCVT); 10962 break; 10963 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 10964 if (isUInt<16>(Value)) 10965 Result = DAG.getTargetConstant(Value, dl, TCVT); 10966 break; 10967 case 'M': // "M" is a constant that is greater than 31. 10968 if (Value > 31) 10969 Result = DAG.getTargetConstant(Value, dl, TCVT); 10970 break; 10971 case 'N': // "N" is a positive constant that is an exact power of two. 10972 if (Value > 0 && isPowerOf2_64(Value)) 10973 Result = DAG.getTargetConstant(Value, dl, TCVT); 10974 break; 10975 case 'O': // "O" is the constant zero. 10976 if (Value == 0) 10977 Result = DAG.getTargetConstant(Value, dl, TCVT); 10978 break; 10979 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 10980 if (isInt<16>(-Value)) 10981 Result = DAG.getTargetConstant(Value, dl, TCVT); 10982 break; 10983 } 10984 break; 10985 } 10986 } 10987 10988 if (Result.getNode()) { 10989 Ops.push_back(Result); 10990 return; 10991 } 10992 10993 // Handle standard constraint letters. 10994 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 10995 } 10996 10997 // isLegalAddressingMode - Return true if the addressing mode represented 10998 // by AM is legal for this target, for a load/store of the specified type. 10999 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 11000 const AddrMode &AM, Type *Ty, 11001 unsigned AS) const { 11002 // PPC does not allow r+i addressing modes for vectors! 11003 if (Ty->isVectorTy() && AM.BaseOffs != 0) 11004 return false; 11005 11006 // PPC allows a sign-extended 16-bit immediate field. 11007 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 11008 return false; 11009 11010 // No global is ever allowed as a base. 11011 if (AM.BaseGV) 11012 return false; 11013 11014 // PPC only support r+r, 11015 switch (AM.Scale) { 11016 case 0: // "r+i" or just "i", depending on HasBaseReg. 11017 break; 11018 case 1: 11019 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 11020 return false; 11021 // Otherwise we have r+r or r+i. 11022 break; 11023 case 2: 11024 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 11025 return false; 11026 // Allow 2*r as r+r. 11027 break; 11028 default: 11029 // No other scales are supported. 11030 return false; 11031 } 11032 11033 return true; 11034 } 11035 11036 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 11037 SelectionDAG &DAG) const { 11038 MachineFunction &MF = DAG.getMachineFunction(); 11039 MachineFrameInfo *MFI = MF.getFrameInfo(); 11040 MFI->setReturnAddressIsTaken(true); 11041 11042 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 11043 return SDValue(); 11044 11045 SDLoc dl(Op); 11046 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11047 11048 // Make sure the function does not optimize away the store of the RA to 11049 // the stack. 11050 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 11051 FuncInfo->setLRStoreRequired(); 11052 bool isPPC64 = Subtarget.isPPC64(); 11053 auto PtrVT = getPointerTy(MF.getDataLayout()); 11054 11055 if (Depth > 0) { 11056 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11057 SDValue Offset = 11058 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 11059 isPPC64 ? MVT::i64 : MVT::i32); 11060 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11061 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 11062 MachinePointerInfo(), false, false, false, 0); 11063 } 11064 11065 // Just load the return address off the stack. 11066 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 11067 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 11068 MachinePointerInfo(), false, false, false, 0); 11069 } 11070 11071 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 11072 SelectionDAG &DAG) const { 11073 SDLoc dl(Op); 11074 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11075 11076 MachineFunction &MF = DAG.getMachineFunction(); 11077 MachineFrameInfo *MFI = MF.getFrameInfo(); 11078 MFI->setFrameAddressIsTaken(true); 11079 11080 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 11081 bool isPPC64 = PtrVT == MVT::i64; 11082 11083 // Naked functions never have a frame pointer, and so we use r1. For all 11084 // other functions, this decision must be delayed until during PEI. 11085 unsigned FrameReg; 11086 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 11087 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 11088 else 11089 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 11090 11091 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 11092 PtrVT); 11093 while (Depth--) 11094 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 11095 FrameAddr, MachinePointerInfo(), false, false, 11096 false, 0); 11097 return FrameAddr; 11098 } 11099 11100 // FIXME? Maybe this could be a TableGen attribute on some registers and 11101 // this table could be generated automatically from RegInfo. 11102 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 11103 SelectionDAG &DAG) const { 11104 bool isPPC64 = Subtarget.isPPC64(); 11105 bool isDarwinABI = Subtarget.isDarwinABI(); 11106 11107 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 11108 (!isPPC64 && VT != MVT::i32)) 11109 report_fatal_error("Invalid register global variable type"); 11110 11111 bool is64Bit = isPPC64 && VT == MVT::i64; 11112 unsigned Reg = StringSwitch<unsigned>(RegName) 11113 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 11114 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 11115 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 11116 (is64Bit ? PPC::X13 : PPC::R13)) 11117 .Default(0); 11118 11119 if (Reg) 11120 return Reg; 11121 report_fatal_error("Invalid register name global variable"); 11122 } 11123 11124 bool 11125 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11126 // The PowerPC target isn't yet aware of offsets. 11127 return false; 11128 } 11129 11130 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 11131 const CallInst &I, 11132 unsigned Intrinsic) const { 11133 11134 switch (Intrinsic) { 11135 case Intrinsic::ppc_qpx_qvlfd: 11136 case Intrinsic::ppc_qpx_qvlfs: 11137 case Intrinsic::ppc_qpx_qvlfcd: 11138 case Intrinsic::ppc_qpx_qvlfcs: 11139 case Intrinsic::ppc_qpx_qvlfiwa: 11140 case Intrinsic::ppc_qpx_qvlfiwz: 11141 case Intrinsic::ppc_altivec_lvx: 11142 case Intrinsic::ppc_altivec_lvxl: 11143 case Intrinsic::ppc_altivec_lvebx: 11144 case Intrinsic::ppc_altivec_lvehx: 11145 case Intrinsic::ppc_altivec_lvewx: 11146 case Intrinsic::ppc_vsx_lxvd2x: 11147 case Intrinsic::ppc_vsx_lxvw4x: { 11148 EVT VT; 11149 switch (Intrinsic) { 11150 case Intrinsic::ppc_altivec_lvebx: 11151 VT = MVT::i8; 11152 break; 11153 case Intrinsic::ppc_altivec_lvehx: 11154 VT = MVT::i16; 11155 break; 11156 case Intrinsic::ppc_altivec_lvewx: 11157 VT = MVT::i32; 11158 break; 11159 case Intrinsic::ppc_vsx_lxvd2x: 11160 VT = MVT::v2f64; 11161 break; 11162 case Intrinsic::ppc_qpx_qvlfd: 11163 VT = MVT::v4f64; 11164 break; 11165 case Intrinsic::ppc_qpx_qvlfs: 11166 VT = MVT::v4f32; 11167 break; 11168 case Intrinsic::ppc_qpx_qvlfcd: 11169 VT = MVT::v2f64; 11170 break; 11171 case Intrinsic::ppc_qpx_qvlfcs: 11172 VT = MVT::v2f32; 11173 break; 11174 default: 11175 VT = MVT::v4i32; 11176 break; 11177 } 11178 11179 Info.opc = ISD::INTRINSIC_W_CHAIN; 11180 Info.memVT = VT; 11181 Info.ptrVal = I.getArgOperand(0); 11182 Info.offset = -VT.getStoreSize()+1; 11183 Info.size = 2*VT.getStoreSize()-1; 11184 Info.align = 1; 11185 Info.vol = false; 11186 Info.readMem = true; 11187 Info.writeMem = false; 11188 return true; 11189 } 11190 case Intrinsic::ppc_qpx_qvlfda: 11191 case Intrinsic::ppc_qpx_qvlfsa: 11192 case Intrinsic::ppc_qpx_qvlfcda: 11193 case Intrinsic::ppc_qpx_qvlfcsa: 11194 case Intrinsic::ppc_qpx_qvlfiwaa: 11195 case Intrinsic::ppc_qpx_qvlfiwza: { 11196 EVT VT; 11197 switch (Intrinsic) { 11198 case Intrinsic::ppc_qpx_qvlfda: 11199 VT = MVT::v4f64; 11200 break; 11201 case Intrinsic::ppc_qpx_qvlfsa: 11202 VT = MVT::v4f32; 11203 break; 11204 case Intrinsic::ppc_qpx_qvlfcda: 11205 VT = MVT::v2f64; 11206 break; 11207 case Intrinsic::ppc_qpx_qvlfcsa: 11208 VT = MVT::v2f32; 11209 break; 11210 default: 11211 VT = MVT::v4i32; 11212 break; 11213 } 11214 11215 Info.opc = ISD::INTRINSIC_W_CHAIN; 11216 Info.memVT = VT; 11217 Info.ptrVal = I.getArgOperand(0); 11218 Info.offset = 0; 11219 Info.size = VT.getStoreSize(); 11220 Info.align = 1; 11221 Info.vol = false; 11222 Info.readMem = true; 11223 Info.writeMem = false; 11224 return true; 11225 } 11226 case Intrinsic::ppc_qpx_qvstfd: 11227 case Intrinsic::ppc_qpx_qvstfs: 11228 case Intrinsic::ppc_qpx_qvstfcd: 11229 case Intrinsic::ppc_qpx_qvstfcs: 11230 case Intrinsic::ppc_qpx_qvstfiw: 11231 case Intrinsic::ppc_altivec_stvx: 11232 case Intrinsic::ppc_altivec_stvxl: 11233 case Intrinsic::ppc_altivec_stvebx: 11234 case Intrinsic::ppc_altivec_stvehx: 11235 case Intrinsic::ppc_altivec_stvewx: 11236 case Intrinsic::ppc_vsx_stxvd2x: 11237 case Intrinsic::ppc_vsx_stxvw4x: { 11238 EVT VT; 11239 switch (Intrinsic) { 11240 case Intrinsic::ppc_altivec_stvebx: 11241 VT = MVT::i8; 11242 break; 11243 case Intrinsic::ppc_altivec_stvehx: 11244 VT = MVT::i16; 11245 break; 11246 case Intrinsic::ppc_altivec_stvewx: 11247 VT = MVT::i32; 11248 break; 11249 case Intrinsic::ppc_vsx_stxvd2x: 11250 VT = MVT::v2f64; 11251 break; 11252 case Intrinsic::ppc_qpx_qvstfd: 11253 VT = MVT::v4f64; 11254 break; 11255 case Intrinsic::ppc_qpx_qvstfs: 11256 VT = MVT::v4f32; 11257 break; 11258 case Intrinsic::ppc_qpx_qvstfcd: 11259 VT = MVT::v2f64; 11260 break; 11261 case Intrinsic::ppc_qpx_qvstfcs: 11262 VT = MVT::v2f32; 11263 break; 11264 default: 11265 VT = MVT::v4i32; 11266 break; 11267 } 11268 11269 Info.opc = ISD::INTRINSIC_VOID; 11270 Info.memVT = VT; 11271 Info.ptrVal = I.getArgOperand(1); 11272 Info.offset = -VT.getStoreSize()+1; 11273 Info.size = 2*VT.getStoreSize()-1; 11274 Info.align = 1; 11275 Info.vol = false; 11276 Info.readMem = false; 11277 Info.writeMem = true; 11278 return true; 11279 } 11280 case Intrinsic::ppc_qpx_qvstfda: 11281 case Intrinsic::ppc_qpx_qvstfsa: 11282 case Intrinsic::ppc_qpx_qvstfcda: 11283 case Intrinsic::ppc_qpx_qvstfcsa: 11284 case Intrinsic::ppc_qpx_qvstfiwa: { 11285 EVT VT; 11286 switch (Intrinsic) { 11287 case Intrinsic::ppc_qpx_qvstfda: 11288 VT = MVT::v4f64; 11289 break; 11290 case Intrinsic::ppc_qpx_qvstfsa: 11291 VT = MVT::v4f32; 11292 break; 11293 case Intrinsic::ppc_qpx_qvstfcda: 11294 VT = MVT::v2f64; 11295 break; 11296 case Intrinsic::ppc_qpx_qvstfcsa: 11297 VT = MVT::v2f32; 11298 break; 11299 default: 11300 VT = MVT::v4i32; 11301 break; 11302 } 11303 11304 Info.opc = ISD::INTRINSIC_VOID; 11305 Info.memVT = VT; 11306 Info.ptrVal = I.getArgOperand(1); 11307 Info.offset = 0; 11308 Info.size = VT.getStoreSize(); 11309 Info.align = 1; 11310 Info.vol = false; 11311 Info.readMem = false; 11312 Info.writeMem = true; 11313 return true; 11314 } 11315 default: 11316 break; 11317 } 11318 11319 return false; 11320 } 11321 11322 /// getOptimalMemOpType - Returns the target specific optimal type for load 11323 /// and store operations as a result of memset, memcpy, and memmove 11324 /// lowering. If DstAlign is zero that means it's safe to destination 11325 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 11326 /// means there isn't a need to check it against alignment requirement, 11327 /// probably because the source does not need to be loaded. If 'IsMemset' is 11328 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 11329 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 11330 /// source is constant so it does not need to be loaded. 11331 /// It returns EVT::Other if the type should be determined using generic 11332 /// target-independent logic. 11333 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 11334 unsigned DstAlign, unsigned SrcAlign, 11335 bool IsMemset, bool ZeroMemset, 11336 bool MemcpyStrSrc, 11337 MachineFunction &MF) const { 11338 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 11339 const Function *F = MF.getFunction(); 11340 // When expanding a memset, require at least two QPX instructions to cover 11341 // the cost of loading the value to be stored from the constant pool. 11342 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 11343 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 11344 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 11345 return MVT::v4f64; 11346 } 11347 11348 // We should use Altivec/VSX loads and stores when available. For unaligned 11349 // addresses, unaligned VSX loads are only fast starting with the P8. 11350 if (Subtarget.hasAltivec() && Size >= 16 && 11351 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 11352 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 11353 return MVT::v4i32; 11354 } 11355 11356 if (Subtarget.isPPC64()) { 11357 return MVT::i64; 11358 } 11359 11360 return MVT::i32; 11361 } 11362 11363 /// \brief Returns true if it is beneficial to convert a load of a constant 11364 /// to just the constant itself. 11365 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 11366 Type *Ty) const { 11367 assert(Ty->isIntegerTy()); 11368 11369 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 11370 if (BitSize == 0 || BitSize > 64) 11371 return false; 11372 return true; 11373 } 11374 11375 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11376 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11377 return false; 11378 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11379 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11380 return NumBits1 == 64 && NumBits2 == 32; 11381 } 11382 11383 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11384 if (!VT1.isInteger() || !VT2.isInteger()) 11385 return false; 11386 unsigned NumBits1 = VT1.getSizeInBits(); 11387 unsigned NumBits2 = VT2.getSizeInBits(); 11388 return NumBits1 == 64 && NumBits2 == 32; 11389 } 11390 11391 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 11392 // Generally speaking, zexts are not free, but they are free when they can be 11393 // folded with other operations. 11394 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 11395 EVT MemVT = LD->getMemoryVT(); 11396 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 11397 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 11398 (LD->getExtensionType() == ISD::NON_EXTLOAD || 11399 LD->getExtensionType() == ISD::ZEXTLOAD)) 11400 return true; 11401 } 11402 11403 // FIXME: Add other cases... 11404 // - 32-bit shifts with a zext to i64 11405 // - zext after ctlz, bswap, etc. 11406 // - zext after and by a constant mask 11407 11408 return TargetLowering::isZExtFree(Val, VT2); 11409 } 11410 11411 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 11412 assert(VT.isFloatingPoint()); 11413 return true; 11414 } 11415 11416 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11417 return isInt<16>(Imm) || isUInt<16>(Imm); 11418 } 11419 11420 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11421 return isInt<16>(Imm) || isUInt<16>(Imm); 11422 } 11423 11424 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 11425 unsigned, 11426 unsigned, 11427 bool *Fast) const { 11428 if (DisablePPCUnaligned) 11429 return false; 11430 11431 // PowerPC supports unaligned memory access for simple non-vector types. 11432 // Although accessing unaligned addresses is not as efficient as accessing 11433 // aligned addresses, it is generally more efficient than manual expansion, 11434 // and generally only traps for software emulation when crossing page 11435 // boundaries. 11436 11437 if (!VT.isSimple()) 11438 return false; 11439 11440 if (VT.getSimpleVT().isVector()) { 11441 if (Subtarget.hasVSX()) { 11442 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 11443 VT != MVT::v4f32 && VT != MVT::v4i32) 11444 return false; 11445 } else { 11446 return false; 11447 } 11448 } 11449 11450 if (VT == MVT::ppcf128) 11451 return false; 11452 11453 if (Fast) 11454 *Fast = true; 11455 11456 return true; 11457 } 11458 11459 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 11460 VT = VT.getScalarType(); 11461 11462 if (!VT.isSimple()) 11463 return false; 11464 11465 switch (VT.getSimpleVT().SimpleTy) { 11466 case MVT::f32: 11467 case MVT::f64: 11468 return true; 11469 default: 11470 break; 11471 } 11472 11473 return false; 11474 } 11475 11476 const MCPhysReg * 11477 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 11478 // LR is a callee-save register, but we must treat it as clobbered by any call 11479 // site. Hence we include LR in the scratch registers, which are in turn added 11480 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 11481 // to CTR, which is used by any indirect call. 11482 static const MCPhysReg ScratchRegs[] = { 11483 PPC::X12, PPC::LR8, PPC::CTR8, 0 11484 }; 11485 11486 return ScratchRegs; 11487 } 11488 11489 bool 11490 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 11491 EVT VT , unsigned DefinedValues) const { 11492 if (VT == MVT::v2i64) 11493 return false; 11494 11495 if (Subtarget.hasQPX()) { 11496 if (VT == MVT::v4f32 || VT == MVT::v4f64 || VT == MVT::v4i1) 11497 return true; 11498 } 11499 11500 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 11501 } 11502 11503 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 11504 if (DisableILPPref || Subtarget.enableMachineScheduler()) 11505 return TargetLowering::getSchedulingPreference(N); 11506 11507 return Sched::ILP; 11508 } 11509 11510 // Create a fast isel object. 11511 FastISel * 11512 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 11513 const TargetLibraryInfo *LibInfo) const { 11514 return PPC::createFastISel(FuncInfo, LibInfo); 11515 } 11516