1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCCallingConv.h" 17 #include "PPCMachineFunctionInfo.h" 18 #include "PPCPerfectShuffle.h" 19 #include "PPCTargetMachine.h" 20 #include "PPCTargetObjectFile.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/StringSwitch.h" 23 #include "llvm/ADT/Triple.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineLoopInfo.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 32 #include "llvm/IR/CallingConv.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include "llvm/Target/TargetOptions.h" 42 43 using namespace llvm; 44 45 // FIXME: Remove this once soft-float is supported. 46 static cl::opt<bool> DisablePPCFloatInVariadic("disable-ppc-float-in-variadic", 47 cl::desc("disable saving float registers for va_start on PPC"), cl::Hidden); 48 49 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 50 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 51 52 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 53 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 54 55 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 56 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 57 58 // FIXME: Remove this once the bug has been fixed! 59 extern cl::opt<bool> ANDIGlueBug; 60 61 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 62 const PPCSubtarget &STI) 63 : TargetLowering(TM), Subtarget(STI) { 64 // Use _setjmp/_longjmp instead of setjmp/longjmp. 65 setUseUnderscoreSetJmp(true); 66 setUseUnderscoreLongJmp(true); 67 68 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 69 // arguments are at least 4/8 bytes aligned. 70 bool isPPC64 = Subtarget.isPPC64(); 71 setMinStackArgumentAlignment(isPPC64 ? 8:4); 72 73 // Set up the register classes. 74 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 75 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 76 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 77 78 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 79 for (MVT VT : MVT::integer_valuetypes()) { 80 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 81 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 82 } 83 84 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 85 86 // PowerPC has pre-inc load and store's. 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 91 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 92 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 93 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 96 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 97 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 98 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 99 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 100 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 101 102 if (Subtarget.useCRBits()) { 103 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 104 105 if (isPPC64 || Subtarget.hasFPCVT()) { 106 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 107 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 108 isPPC64 ? MVT::i64 : MVT::i32); 109 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 110 AddPromotedToType (ISD::UINT_TO_FP, MVT::i1, 111 isPPC64 ? MVT::i64 : MVT::i32); 112 } else { 113 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 114 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 115 } 116 117 // PowerPC does not support direct load / store of condition registers 118 setOperationAction(ISD::LOAD, MVT::i1, Custom); 119 setOperationAction(ISD::STORE, MVT::i1, Custom); 120 121 // FIXME: Remove this once the ANDI glue bug is fixed: 122 if (ANDIGlueBug) 123 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 124 125 for (MVT VT : MVT::integer_valuetypes()) { 126 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 127 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 128 setTruncStoreAction(VT, MVT::i1, Expand); 129 } 130 131 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 132 } 133 134 // This is used in the ppcf128->int sequence. Note it has different semantics 135 // from FP_ROUND: that rounds to nearest, this rounds to zero. 136 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 137 138 // We do not currently implement these libm ops for PowerPC. 139 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 140 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 141 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 142 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 143 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 144 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 145 146 // PowerPC has no SREM/UREM instructions 147 setOperationAction(ISD::SREM, MVT::i32, Expand); 148 setOperationAction(ISD::UREM, MVT::i32, Expand); 149 setOperationAction(ISD::SREM, MVT::i64, Expand); 150 setOperationAction(ISD::UREM, MVT::i64, Expand); 151 152 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 153 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 154 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 155 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 156 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 157 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 158 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 159 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 160 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 161 162 // We don't support sin/cos/sqrt/fmod/pow 163 setOperationAction(ISD::FSIN , MVT::f64, Expand); 164 setOperationAction(ISD::FCOS , MVT::f64, Expand); 165 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 166 setOperationAction(ISD::FREM , MVT::f64, Expand); 167 setOperationAction(ISD::FPOW , MVT::f64, Expand); 168 setOperationAction(ISD::FMA , MVT::f64, Legal); 169 setOperationAction(ISD::FSIN , MVT::f32, Expand); 170 setOperationAction(ISD::FCOS , MVT::f32, Expand); 171 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 172 setOperationAction(ISD::FREM , MVT::f32, Expand); 173 setOperationAction(ISD::FPOW , MVT::f32, Expand); 174 setOperationAction(ISD::FMA , MVT::f32, Legal); 175 176 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 177 178 // If we're enabling GP optimizations, use hardware square root 179 if (!Subtarget.hasFSQRT() && 180 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 181 Subtarget.hasFRE())) 182 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 183 184 if (!Subtarget.hasFSQRT() && 185 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 186 Subtarget.hasFRES())) 187 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 188 189 if (Subtarget.hasFCPSGN()) { 190 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 191 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 192 } else { 193 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 194 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 195 } 196 197 if (Subtarget.hasFPRND()) { 198 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 199 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 200 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 201 setOperationAction(ISD::FROUND, MVT::f64, Legal); 202 203 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 204 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 205 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 206 setOperationAction(ISD::FROUND, MVT::f32, Legal); 207 } 208 209 // PowerPC does not have BSWAP, CTPOP or CTTZ 210 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 211 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 212 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 213 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 214 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 215 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 216 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 217 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 218 219 if (Subtarget.hasPOPCNTD()) { 220 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 221 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 222 } else { 223 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 224 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 225 } 226 227 // PowerPC does not have ROTR 228 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 229 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 230 231 if (!Subtarget.useCRBits()) { 232 // PowerPC does not have Select 233 setOperationAction(ISD::SELECT, MVT::i32, Expand); 234 setOperationAction(ISD::SELECT, MVT::i64, Expand); 235 setOperationAction(ISD::SELECT, MVT::f32, Expand); 236 setOperationAction(ISD::SELECT, MVT::f64, Expand); 237 } 238 239 // PowerPC wants to turn select_cc of FP into fsel when possible. 240 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 241 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 242 243 // PowerPC wants to optimize integer setcc a bit 244 if (!Subtarget.useCRBits()) 245 setOperationAction(ISD::SETCC, MVT::i32, Custom); 246 247 // PowerPC does not have BRCOND which requires SetCC 248 if (!Subtarget.useCRBits()) 249 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 250 251 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 252 253 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 254 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 255 256 // PowerPC does not have [U|S]INT_TO_FP 257 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 258 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 259 260 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 261 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 262 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 263 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 264 265 // We cannot sextinreg(i1). Expand to shifts. 266 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 267 268 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 269 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 270 // support continuation, user-level threading, and etc.. As a result, no 271 // other SjLj exception interfaces are implemented and please don't build 272 // your own exception handling based on them. 273 // LLVM/Clang supports zero-cost DWARF exception handling. 274 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 275 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 276 277 // We want to legalize GlobalAddress and ConstantPool nodes into the 278 // appropriate instructions to materialize the address. 279 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 280 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 281 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 282 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 283 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 284 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 285 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 286 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 287 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 288 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 289 290 // TRAP is legal. 291 setOperationAction(ISD::TRAP, MVT::Other, Legal); 292 293 // TRAMPOLINE is custom lowered. 294 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 295 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 296 297 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 298 setOperationAction(ISD::VASTART , MVT::Other, Custom); 299 300 if (Subtarget.isSVR4ABI()) { 301 if (isPPC64) { 302 // VAARG always uses double-word chunks, so promote anything smaller. 303 setOperationAction(ISD::VAARG, MVT::i1, Promote); 304 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 305 setOperationAction(ISD::VAARG, MVT::i8, Promote); 306 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 307 setOperationAction(ISD::VAARG, MVT::i16, Promote); 308 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 309 setOperationAction(ISD::VAARG, MVT::i32, Promote); 310 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 311 setOperationAction(ISD::VAARG, MVT::Other, Expand); 312 } else { 313 // VAARG is custom lowered with the 32-bit SVR4 ABI. 314 setOperationAction(ISD::VAARG, MVT::Other, Custom); 315 setOperationAction(ISD::VAARG, MVT::i64, Custom); 316 } 317 } else 318 setOperationAction(ISD::VAARG, MVT::Other, Expand); 319 320 if (Subtarget.isSVR4ABI() && !isPPC64) 321 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 322 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 323 else 324 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 325 326 // Use the default implementation. 327 setOperationAction(ISD::VAEND , MVT::Other, Expand); 328 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 329 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 330 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 331 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 332 333 // We want to custom lower some of our intrinsics. 334 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 335 336 // To handle counter-based loop conditions. 337 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 338 339 // Comparisons that require checking two conditions. 340 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 341 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 342 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 343 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 344 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 345 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 346 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 347 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 348 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 349 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 350 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 351 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 352 353 if (Subtarget.has64BitSupport()) { 354 // They also have instructions for converting between i64 and fp. 355 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 356 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 357 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 358 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 359 // This is just the low 32 bits of a (signed) fp->i64 conversion. 360 // We cannot do this with Promote because i64 is not a legal type. 361 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 362 363 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 364 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 365 } else { 366 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 367 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 368 } 369 370 // With the instructions enabled under FPCVT, we can do everything. 371 if (Subtarget.hasFPCVT()) { 372 if (Subtarget.has64BitSupport()) { 373 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 374 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 375 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 376 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 377 } 378 379 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 380 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 381 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 382 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 383 } 384 385 if (Subtarget.use64BitRegs()) { 386 // 64-bit PowerPC implementations can support i64 types directly 387 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 388 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 389 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 390 // 64-bit PowerPC wants to expand i128 shifts itself. 391 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 392 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 393 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 394 } else { 395 // 32-bit PowerPC wants to expand i64 shifts itself. 396 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 397 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 398 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 399 } 400 401 if (Subtarget.hasAltivec()) { 402 // First set operation action for all vector types to expand. Then we 403 // will selectively turn on ones that can be effectively codegen'd. 404 for (MVT VT : MVT::vector_valuetypes()) { 405 // add/sub are legal for all supported vector VT's. 406 setOperationAction(ISD::ADD , VT, Legal); 407 setOperationAction(ISD::SUB , VT, Legal); 408 409 // Vector instructions introduced in P8 410 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 411 setOperationAction(ISD::CTPOP, VT, Legal); 412 setOperationAction(ISD::CTLZ, VT, Legal); 413 } 414 else { 415 setOperationAction(ISD::CTPOP, VT, Expand); 416 setOperationAction(ISD::CTLZ, VT, Expand); 417 } 418 419 // We promote all shuffles to v16i8. 420 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 421 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 422 423 // We promote all non-typed operations to v4i32. 424 setOperationAction(ISD::AND , VT, Promote); 425 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 426 setOperationAction(ISD::OR , VT, Promote); 427 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 428 setOperationAction(ISD::XOR , VT, Promote); 429 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 430 setOperationAction(ISD::LOAD , VT, Promote); 431 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 432 setOperationAction(ISD::SELECT, VT, Promote); 433 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 434 setOperationAction(ISD::STORE, VT, Promote); 435 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 436 437 // No other operations are legal. 438 setOperationAction(ISD::MUL , VT, Expand); 439 setOperationAction(ISD::SDIV, VT, Expand); 440 setOperationAction(ISD::SREM, VT, Expand); 441 setOperationAction(ISD::UDIV, VT, Expand); 442 setOperationAction(ISD::UREM, VT, Expand); 443 setOperationAction(ISD::FDIV, VT, Expand); 444 setOperationAction(ISD::FREM, VT, Expand); 445 setOperationAction(ISD::FNEG, VT, Expand); 446 setOperationAction(ISD::FSQRT, VT, Expand); 447 setOperationAction(ISD::FLOG, VT, Expand); 448 setOperationAction(ISD::FLOG10, VT, Expand); 449 setOperationAction(ISD::FLOG2, VT, Expand); 450 setOperationAction(ISD::FEXP, VT, Expand); 451 setOperationAction(ISD::FEXP2, VT, Expand); 452 setOperationAction(ISD::FSIN, VT, Expand); 453 setOperationAction(ISD::FCOS, VT, Expand); 454 setOperationAction(ISD::FABS, VT, Expand); 455 setOperationAction(ISD::FPOWI, VT, Expand); 456 setOperationAction(ISD::FFLOOR, VT, Expand); 457 setOperationAction(ISD::FCEIL, VT, Expand); 458 setOperationAction(ISD::FTRUNC, VT, Expand); 459 setOperationAction(ISD::FRINT, VT, Expand); 460 setOperationAction(ISD::FNEARBYINT, VT, Expand); 461 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 462 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 463 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 464 setOperationAction(ISD::MULHU, VT, Expand); 465 setOperationAction(ISD::MULHS, VT, Expand); 466 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 467 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 468 setOperationAction(ISD::UDIVREM, VT, Expand); 469 setOperationAction(ISD::SDIVREM, VT, Expand); 470 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 471 setOperationAction(ISD::FPOW, VT, Expand); 472 setOperationAction(ISD::BSWAP, VT, Expand); 473 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 474 setOperationAction(ISD::CTTZ, VT, Expand); 475 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 476 setOperationAction(ISD::VSELECT, VT, Expand); 477 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 478 479 for (MVT InnerVT : MVT::vector_valuetypes()) { 480 setTruncStoreAction(VT, InnerVT, Expand); 481 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 482 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 483 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 484 } 485 } 486 487 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 488 // with merges, splats, etc. 489 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 490 491 setOperationAction(ISD::AND , MVT::v4i32, Legal); 492 setOperationAction(ISD::OR , MVT::v4i32, Legal); 493 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 494 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 495 setOperationAction(ISD::SELECT, MVT::v4i32, 496 Subtarget.useCRBits() ? Legal : Expand); 497 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 498 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 499 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 500 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 501 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 502 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 503 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 504 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 505 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 506 507 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 508 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 509 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 510 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 511 512 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 513 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 514 515 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 516 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 517 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 518 } 519 520 521 if (Subtarget.hasP8Altivec()) 522 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 523 else 524 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 525 526 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 527 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 528 529 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 530 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 531 532 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 533 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 534 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 535 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 536 537 // Altivec does not contain unordered floating-point compare instructions 538 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 539 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 540 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 541 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 542 543 if (Subtarget.hasVSX()) { 544 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 545 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 546 547 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 548 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 549 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 550 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 551 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 552 553 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 554 555 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 556 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 557 558 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 559 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 560 561 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 562 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 563 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 564 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 565 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 566 567 // Share the Altivec comparison restrictions. 568 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 569 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 570 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 571 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 572 573 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 574 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 575 576 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 577 578 if (Subtarget.hasP8Vector()) 579 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 580 581 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 582 583 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 584 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 585 586 if (Subtarget.hasP8Altivec()) { 587 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 588 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 589 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 590 591 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 592 } 593 else { 594 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 595 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 596 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 597 598 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 599 600 // VSX v2i64 only supports non-arithmetic operations. 601 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 602 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 603 } 604 605 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 606 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 607 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 608 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 609 610 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 611 612 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 613 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 614 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 615 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 616 617 // Vector operation legalization checks the result type of 618 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 619 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 620 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 621 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 622 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 623 624 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 625 } 626 627 if (Subtarget.hasP8Altivec()) { 628 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 629 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 630 } 631 } 632 633 if (Subtarget.hasQPX()) { 634 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 635 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 636 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 637 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 638 639 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 640 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 641 642 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 643 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 644 645 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 646 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 647 648 if (!Subtarget.useCRBits()) 649 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 650 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 651 652 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 653 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 654 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 655 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 656 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 657 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 658 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 659 660 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 661 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 662 663 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 664 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 665 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 666 667 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 668 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 669 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 670 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 671 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 672 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 673 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 674 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 675 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 676 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 677 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 678 679 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 680 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 681 682 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 683 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 684 685 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 686 687 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 688 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 689 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 690 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 691 692 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 693 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 694 695 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 696 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 697 698 if (!Subtarget.useCRBits()) 699 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 700 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 701 702 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 703 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 704 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 705 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 706 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 707 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 708 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 709 710 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 711 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 712 713 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 714 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 715 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 716 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 717 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 718 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 719 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 720 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 721 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 722 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 723 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 724 725 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 726 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 727 728 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 729 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 730 731 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 732 733 setOperationAction(ISD::AND , MVT::v4i1, Legal); 734 setOperationAction(ISD::OR , MVT::v4i1, Legal); 735 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 736 737 if (!Subtarget.useCRBits()) 738 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 739 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 740 741 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 742 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 743 744 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 745 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 746 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 747 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 748 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 749 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 750 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 751 752 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 753 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 754 755 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 756 757 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 758 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 759 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 760 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 761 762 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 763 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 764 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 765 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 766 767 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 768 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 769 770 // These need to set FE_INEXACT, and so cannot be vectorized here. 771 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 772 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 773 774 if (TM.Options.UnsafeFPMath) { 775 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 776 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 777 778 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 779 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 780 } else { 781 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 782 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 783 784 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 785 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 786 } 787 } 788 789 if (Subtarget.has64BitSupport()) 790 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 791 792 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 793 794 if (!isPPC64) { 795 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 796 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 797 } 798 799 setBooleanContents(ZeroOrOneBooleanContent); 800 801 if (Subtarget.hasAltivec()) { 802 // Altivec instructions set fields to all zeros or all ones. 803 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 804 } 805 806 if (!isPPC64) { 807 // These libcalls are not available in 32-bit. 808 setLibcallName(RTLIB::SHL_I128, nullptr); 809 setLibcallName(RTLIB::SRL_I128, nullptr); 810 setLibcallName(RTLIB::SRA_I128, nullptr); 811 } 812 813 if (isPPC64) { 814 setStackPointerRegisterToSaveRestore(PPC::X1); 815 setExceptionPointerRegister(PPC::X3); 816 setExceptionSelectorRegister(PPC::X4); 817 } else { 818 setStackPointerRegisterToSaveRestore(PPC::R1); 819 setExceptionPointerRegister(PPC::R3); 820 setExceptionSelectorRegister(PPC::R4); 821 } 822 823 // We have target-specific dag combine patterns for the following nodes: 824 setTargetDAGCombine(ISD::SINT_TO_FP); 825 if (Subtarget.hasFPCVT()) 826 setTargetDAGCombine(ISD::UINT_TO_FP); 827 setTargetDAGCombine(ISD::LOAD); 828 setTargetDAGCombine(ISD::STORE); 829 setTargetDAGCombine(ISD::BR_CC); 830 if (Subtarget.useCRBits()) 831 setTargetDAGCombine(ISD::BRCOND); 832 setTargetDAGCombine(ISD::BSWAP); 833 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 834 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 835 setTargetDAGCombine(ISD::INTRINSIC_VOID); 836 837 setTargetDAGCombine(ISD::SIGN_EXTEND); 838 setTargetDAGCombine(ISD::ZERO_EXTEND); 839 setTargetDAGCombine(ISD::ANY_EXTEND); 840 841 if (Subtarget.useCRBits()) { 842 setTargetDAGCombine(ISD::TRUNCATE); 843 setTargetDAGCombine(ISD::SETCC); 844 setTargetDAGCombine(ISD::SELECT_CC); 845 } 846 847 // Use reciprocal estimates. 848 if (TM.Options.UnsafeFPMath) { 849 setTargetDAGCombine(ISD::FDIV); 850 setTargetDAGCombine(ISD::FSQRT); 851 } 852 853 // Darwin long double math library functions have $LDBL128 appended. 854 if (Subtarget.isDarwin()) { 855 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 856 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 857 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 858 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 859 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 860 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 861 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 862 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 863 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 864 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 865 } 866 867 // With 32 condition bits, we don't need to sink (and duplicate) compares 868 // aggressively in CodeGenPrep. 869 if (Subtarget.useCRBits()) { 870 setHasMultipleConditionRegisters(); 871 setJumpIsExpensive(); 872 } 873 874 setMinFunctionAlignment(2); 875 if (Subtarget.isDarwin()) 876 setPrefFunctionAlignment(4); 877 878 switch (Subtarget.getDarwinDirective()) { 879 default: break; 880 case PPC::DIR_970: 881 case PPC::DIR_A2: 882 case PPC::DIR_E500mc: 883 case PPC::DIR_E5500: 884 case PPC::DIR_PWR4: 885 case PPC::DIR_PWR5: 886 case PPC::DIR_PWR5X: 887 case PPC::DIR_PWR6: 888 case PPC::DIR_PWR6X: 889 case PPC::DIR_PWR7: 890 case PPC::DIR_PWR8: 891 setPrefFunctionAlignment(4); 892 setPrefLoopAlignment(4); 893 break; 894 } 895 896 setInsertFencesForAtomic(true); 897 898 if (Subtarget.enableMachineScheduler()) 899 setSchedulingPreference(Sched::Source); 900 else 901 setSchedulingPreference(Sched::Hybrid); 902 903 computeRegisterProperties(STI.getRegisterInfo()); 904 905 // The Freescale cores do better with aggressive inlining of memcpy and 906 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 907 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 908 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 909 MaxStoresPerMemset = 32; 910 MaxStoresPerMemsetOptSize = 16; 911 MaxStoresPerMemcpy = 32; 912 MaxStoresPerMemcpyOptSize = 8; 913 MaxStoresPerMemmove = 32; 914 MaxStoresPerMemmoveOptSize = 8; 915 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 916 // The A2 also benefits from (very) aggressive inlining of memcpy and 917 // friends. The overhead of a the function call, even when warm, can be 918 // over one hundred cycles. 919 MaxStoresPerMemset = 128; 920 MaxStoresPerMemcpy = 128; 921 MaxStoresPerMemmove = 128; 922 } 923 } 924 925 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 926 /// the desired ByVal argument alignment. 927 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 928 unsigned MaxMaxAlign) { 929 if (MaxAlign == MaxMaxAlign) 930 return; 931 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 932 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 933 MaxAlign = 32; 934 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 935 MaxAlign = 16; 936 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 937 unsigned EltAlign = 0; 938 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 939 if (EltAlign > MaxAlign) 940 MaxAlign = EltAlign; 941 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 942 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 943 unsigned EltAlign = 0; 944 getMaxByValAlign(STy->getElementType(i), EltAlign, MaxMaxAlign); 945 if (EltAlign > MaxAlign) 946 MaxAlign = EltAlign; 947 if (MaxAlign == MaxMaxAlign) 948 break; 949 } 950 } 951 } 952 953 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 954 /// function arguments in the caller parameter area. 955 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 956 const DataLayout &DL) const { 957 // Darwin passes everything on 4 byte boundary. 958 if (Subtarget.isDarwin()) 959 return 4; 960 961 // 16byte and wider vectors are passed on 16byte boundary. 962 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 963 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 964 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 965 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 966 return Align; 967 } 968 969 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 970 switch ((PPCISD::NodeType)Opcode) { 971 case PPCISD::FIRST_NUMBER: break; 972 case PPCISD::FSEL: return "PPCISD::FSEL"; 973 case PPCISD::FCFID: return "PPCISD::FCFID"; 974 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 975 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 976 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 977 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 978 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 979 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 980 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 981 case PPCISD::FRE: return "PPCISD::FRE"; 982 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 983 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 984 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 985 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 986 case PPCISD::VPERM: return "PPCISD::VPERM"; 987 case PPCISD::CMPB: return "PPCISD::CMPB"; 988 case PPCISD::Hi: return "PPCISD::Hi"; 989 case PPCISD::Lo: return "PPCISD::Lo"; 990 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 991 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 992 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 993 case PPCISD::SRL: return "PPCISD::SRL"; 994 case PPCISD::SRA: return "PPCISD::SRA"; 995 case PPCISD::SHL: return "PPCISD::SHL"; 996 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 997 case PPCISD::CALL: return "PPCISD::CALL"; 998 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 999 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1000 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1001 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1002 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1003 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1004 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1005 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1006 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1007 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1008 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1009 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1010 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1011 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1012 case PPCISD::VCMP: return "PPCISD::VCMP"; 1013 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1014 case PPCISD::LBRX: return "PPCISD::LBRX"; 1015 case PPCISD::STBRX: return "PPCISD::STBRX"; 1016 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1017 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1018 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1019 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1020 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1021 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1022 case PPCISD::BDZ: return "PPCISD::BDZ"; 1023 case PPCISD::MFFS: return "PPCISD::MFFS"; 1024 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1025 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1026 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1027 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1028 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1029 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1030 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1031 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1032 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1033 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1034 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1035 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1036 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1037 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1038 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1039 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1040 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1041 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1042 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1043 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1044 case PPCISD::SC: return "PPCISD::SC"; 1045 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1046 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1047 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1048 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1049 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1050 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1051 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1052 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1053 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1054 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1055 } 1056 return nullptr; 1057 } 1058 1059 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1060 EVT VT) const { 1061 if (!VT.isVector()) 1062 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1063 1064 if (Subtarget.hasQPX()) 1065 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1066 1067 return VT.changeVectorElementTypeToInteger(); 1068 } 1069 1070 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1071 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1072 return true; 1073 } 1074 1075 //===----------------------------------------------------------------------===// 1076 // Node matching predicates, for use by the tblgen matching code. 1077 //===----------------------------------------------------------------------===// 1078 1079 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1080 static bool isFloatingPointZero(SDValue Op) { 1081 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1082 return CFP->getValueAPF().isZero(); 1083 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1084 // Maybe this has already been legalized into the constant pool? 1085 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1086 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1087 return CFP->getValueAPF().isZero(); 1088 } 1089 return false; 1090 } 1091 1092 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1093 /// true if Op is undef or if it matches the specified value. 1094 static bool isConstantOrUndef(int Op, int Val) { 1095 return Op < 0 || Op == Val; 1096 } 1097 1098 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1099 /// VPKUHUM instruction. 1100 /// The ShuffleKind distinguishes between big-endian operations with 1101 /// two different inputs (0), either-endian operations with two identical 1102 /// inputs (1), and little-endian operations with two different inputs (2). 1103 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1104 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1105 SelectionDAG &DAG) { 1106 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1107 if (ShuffleKind == 0) { 1108 if (IsLE) 1109 return false; 1110 for (unsigned i = 0; i != 16; ++i) 1111 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1112 return false; 1113 } else if (ShuffleKind == 2) { 1114 if (!IsLE) 1115 return false; 1116 for (unsigned i = 0; i != 16; ++i) 1117 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1118 return false; 1119 } else if (ShuffleKind == 1) { 1120 unsigned j = IsLE ? 0 : 1; 1121 for (unsigned i = 0; i != 8; ++i) 1122 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1123 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1124 return false; 1125 } 1126 return true; 1127 } 1128 1129 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1130 /// VPKUWUM instruction. 1131 /// The ShuffleKind distinguishes between big-endian operations with 1132 /// two different inputs (0), either-endian operations with two identical 1133 /// inputs (1), and little-endian operations with two different inputs (2). 1134 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1135 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1136 SelectionDAG &DAG) { 1137 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1138 if (ShuffleKind == 0) { 1139 if (IsLE) 1140 return false; 1141 for (unsigned i = 0; i != 16; i += 2) 1142 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1143 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1144 return false; 1145 } else if (ShuffleKind == 2) { 1146 if (!IsLE) 1147 return false; 1148 for (unsigned i = 0; i != 16; i += 2) 1149 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1150 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1151 return false; 1152 } else if (ShuffleKind == 1) { 1153 unsigned j = IsLE ? 0 : 2; 1154 for (unsigned i = 0; i != 8; i += 2) 1155 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1156 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1157 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1158 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1159 return false; 1160 } 1161 return true; 1162 } 1163 1164 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1165 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1166 /// current subtarget. 1167 /// 1168 /// The ShuffleKind distinguishes between big-endian operations with 1169 /// two different inputs (0), either-endian operations with two identical 1170 /// inputs (1), and little-endian operations with two different inputs (2). 1171 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1172 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1173 SelectionDAG &DAG) { 1174 const PPCSubtarget& Subtarget = 1175 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1176 if (!Subtarget.hasP8Vector()) 1177 return false; 1178 1179 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1180 if (ShuffleKind == 0) { 1181 if (IsLE) 1182 return false; 1183 for (unsigned i = 0; i != 16; i += 4) 1184 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1185 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1186 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1187 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1188 return false; 1189 } else if (ShuffleKind == 2) { 1190 if (!IsLE) 1191 return false; 1192 for (unsigned i = 0; i != 16; i += 4) 1193 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1194 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1195 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1196 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1197 return false; 1198 } else if (ShuffleKind == 1) { 1199 unsigned j = IsLE ? 0 : 4; 1200 for (unsigned i = 0; i != 8; i += 4) 1201 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1202 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1203 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1204 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1205 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1206 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1207 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1208 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1209 return false; 1210 } 1211 return true; 1212 } 1213 1214 /// isVMerge - Common function, used to match vmrg* shuffles. 1215 /// 1216 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1217 unsigned LHSStart, unsigned RHSStart) { 1218 if (N->getValueType(0) != MVT::v16i8) 1219 return false; 1220 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1221 "Unsupported merge size!"); 1222 1223 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1224 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1225 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1226 LHSStart+j+i*UnitSize) || 1227 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1228 RHSStart+j+i*UnitSize)) 1229 return false; 1230 } 1231 return true; 1232 } 1233 1234 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1235 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1236 /// The ShuffleKind distinguishes between big-endian merges with two 1237 /// different inputs (0), either-endian merges with two identical inputs (1), 1238 /// and little-endian merges with two different inputs (2). For the latter, 1239 /// the input operands are swapped (see PPCInstrAltivec.td). 1240 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1241 unsigned ShuffleKind, SelectionDAG &DAG) { 1242 if (DAG.getDataLayout().isLittleEndian()) { 1243 if (ShuffleKind == 1) // unary 1244 return isVMerge(N, UnitSize, 0, 0); 1245 else if (ShuffleKind == 2) // swapped 1246 return isVMerge(N, UnitSize, 0, 16); 1247 else 1248 return false; 1249 } else { 1250 if (ShuffleKind == 1) // unary 1251 return isVMerge(N, UnitSize, 8, 8); 1252 else if (ShuffleKind == 0) // normal 1253 return isVMerge(N, UnitSize, 8, 24); 1254 else 1255 return false; 1256 } 1257 } 1258 1259 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1260 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1261 /// The ShuffleKind distinguishes between big-endian merges with two 1262 /// different inputs (0), either-endian merges with two identical inputs (1), 1263 /// and little-endian merges with two different inputs (2). For the latter, 1264 /// the input operands are swapped (see PPCInstrAltivec.td). 1265 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1266 unsigned ShuffleKind, SelectionDAG &DAG) { 1267 if (DAG.getDataLayout().isLittleEndian()) { 1268 if (ShuffleKind == 1) // unary 1269 return isVMerge(N, UnitSize, 8, 8); 1270 else if (ShuffleKind == 2) // swapped 1271 return isVMerge(N, UnitSize, 8, 24); 1272 else 1273 return false; 1274 } else { 1275 if (ShuffleKind == 1) // unary 1276 return isVMerge(N, UnitSize, 0, 0); 1277 else if (ShuffleKind == 0) // normal 1278 return isVMerge(N, UnitSize, 0, 16); 1279 else 1280 return false; 1281 } 1282 } 1283 1284 /** 1285 * \brief Common function used to match vmrgew and vmrgow shuffles 1286 * 1287 * The indexOffset determines whether to look for even or odd words in 1288 * the shuffle mask. This is based on the of the endianness of the target 1289 * machine. 1290 * - Little Endian: 1291 * - Use offset of 0 to check for odd elements 1292 * - Use offset of 4 to check for even elements 1293 * - Big Endian: 1294 * - Use offset of 0 to check for even elements 1295 * - Use offset of 4 to check for odd elements 1296 * A detailed description of the vector element ordering for little endian and 1297 * big endian can be found at 1298 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1299 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1300 * compiler differences mean to you 1301 * 1302 * The mask to the shuffle vector instruction specifies the indices of the 1303 * elements from the two input vectors to place in the result. The elements are 1304 * numbered in array-access order, starting with the first vector. These vectors 1305 * are always of type v16i8, thus each vector will contain 16 elements of size 1306 * 8. More info on the shuffle vector can be found in the 1307 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1308 * Language Reference. 1309 * 1310 * The RHSStartValue indicates whether the same input vectors are used (unary) 1311 * or two different input vectors are used, based on the following: 1312 * - If the instruction uses the same vector for both inputs, the range of the 1313 * indices will be 0 to 15. In this case, the RHSStart value passed should 1314 * be 0. 1315 * - If the instruction has two different vectors then the range of the 1316 * indices will be 0 to 31. In this case, the RHSStart value passed should 1317 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1318 * to 31 specify elements in the second vector). 1319 * 1320 * \param[in] N The shuffle vector SD Node to analyze 1321 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1322 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1323 * vector to the shuffle_vector instruction 1324 * \return true iff this shuffle vector represents an even or odd word merge 1325 */ 1326 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1327 unsigned RHSStartValue) { 1328 if (N->getValueType(0) != MVT::v16i8) 1329 return false; 1330 1331 for (unsigned i = 0; i < 2; ++i) 1332 for (unsigned j = 0; j < 4; ++j) 1333 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1334 i*RHSStartValue+j+IndexOffset) || 1335 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1336 i*RHSStartValue+j+IndexOffset+8)) 1337 return false; 1338 return true; 1339 } 1340 1341 /** 1342 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1343 * vmrgow instructions. 1344 * 1345 * \param[in] N The shuffle vector SD Node to analyze 1346 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1347 * \param[in] ShuffleKind Identify the type of merge: 1348 * - 0 = big-endian merge with two different inputs; 1349 * - 1 = either-endian merge with two identical inputs; 1350 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1351 * little-endian merges). 1352 * \param[in] DAG The current SelectionDAG 1353 * \return true iff this shuffle mask 1354 */ 1355 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1356 unsigned ShuffleKind, SelectionDAG &DAG) { 1357 if (DAG.getDataLayout().isLittleEndian()) { 1358 unsigned indexOffset = CheckEven ? 4 : 0; 1359 if (ShuffleKind == 1) // Unary 1360 return isVMerge(N, indexOffset, 0); 1361 else if (ShuffleKind == 2) // swapped 1362 return isVMerge(N, indexOffset, 16); 1363 else 1364 return false; 1365 } 1366 else { 1367 unsigned indexOffset = CheckEven ? 0 : 4; 1368 if (ShuffleKind == 1) // Unary 1369 return isVMerge(N, indexOffset, 0); 1370 else if (ShuffleKind == 0) // Normal 1371 return isVMerge(N, indexOffset, 16); 1372 else 1373 return false; 1374 } 1375 return false; 1376 } 1377 1378 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1379 /// amount, otherwise return -1. 1380 /// The ShuffleKind distinguishes between big-endian operations with two 1381 /// different inputs (0), either-endian operations with two identical inputs 1382 /// (1), and little-endian operations with two different inputs (2). For the 1383 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1384 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1385 SelectionDAG &DAG) { 1386 if (N->getValueType(0) != MVT::v16i8) 1387 return -1; 1388 1389 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1390 1391 // Find the first non-undef value in the shuffle mask. 1392 unsigned i; 1393 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1394 /*search*/; 1395 1396 if (i == 16) return -1; // all undef. 1397 1398 // Otherwise, check to see if the rest of the elements are consecutively 1399 // numbered from this value. 1400 unsigned ShiftAmt = SVOp->getMaskElt(i); 1401 if (ShiftAmt < i) return -1; 1402 1403 ShiftAmt -= i; 1404 bool isLE = DAG.getDataLayout().isLittleEndian(); 1405 1406 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1407 // Check the rest of the elements to see if they are consecutive. 1408 for (++i; i != 16; ++i) 1409 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1410 return -1; 1411 } else if (ShuffleKind == 1) { 1412 // Check the rest of the elements to see if they are consecutive. 1413 for (++i; i != 16; ++i) 1414 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1415 return -1; 1416 } else 1417 return -1; 1418 1419 if (ShuffleKind == 2 && isLE) 1420 ShiftAmt = 16 - ShiftAmt; 1421 1422 return ShiftAmt; 1423 } 1424 1425 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1426 /// specifies a splat of a single element that is suitable for input to 1427 /// VSPLTB/VSPLTH/VSPLTW. 1428 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1429 assert(N->getValueType(0) == MVT::v16i8 && 1430 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1431 1432 // This is a splat operation if each element of the permute is the same, and 1433 // if the value doesn't reference the second vector. 1434 unsigned ElementBase = N->getMaskElt(0); 1435 1436 // FIXME: Handle UNDEF elements too! 1437 if (ElementBase >= 16) 1438 return false; 1439 1440 // Check that the indices are consecutive, in the case of a multi-byte element 1441 // splatted with a v16i8 mask. 1442 for (unsigned i = 1; i != EltSize; ++i) 1443 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1444 return false; 1445 1446 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1447 if (N->getMaskElt(i) < 0) continue; 1448 for (unsigned j = 0; j != EltSize; ++j) 1449 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1450 return false; 1451 } 1452 return true; 1453 } 1454 1455 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1456 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1457 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1458 SelectionDAG &DAG) { 1459 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1460 assert(isSplatShuffleMask(SVOp, EltSize)); 1461 if (DAG.getDataLayout().isLittleEndian()) 1462 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1463 else 1464 return SVOp->getMaskElt(0) / EltSize; 1465 } 1466 1467 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1468 /// by using a vspltis[bhw] instruction of the specified element size, return 1469 /// the constant being splatted. The ByteSize field indicates the number of 1470 /// bytes of each element [124] -> [bhw]. 1471 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1472 SDValue OpVal(nullptr, 0); 1473 1474 // If ByteSize of the splat is bigger than the element size of the 1475 // build_vector, then we have a case where we are checking for a splat where 1476 // multiple elements of the buildvector are folded together into a single 1477 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1478 unsigned EltSize = 16/N->getNumOperands(); 1479 if (EltSize < ByteSize) { 1480 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1481 SDValue UniquedVals[4]; 1482 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1483 1484 // See if all of the elements in the buildvector agree across. 1485 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1486 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1487 // If the element isn't a constant, bail fully out. 1488 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1489 1490 1491 if (!UniquedVals[i&(Multiple-1)].getNode()) 1492 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1493 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1494 return SDValue(); // no match. 1495 } 1496 1497 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1498 // either constant or undef values that are identical for each chunk. See 1499 // if these chunks can form into a larger vspltis*. 1500 1501 // Check to see if all of the leading entries are either 0 or -1. If 1502 // neither, then this won't fit into the immediate field. 1503 bool LeadingZero = true; 1504 bool LeadingOnes = true; 1505 for (unsigned i = 0; i != Multiple-1; ++i) { 1506 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1507 1508 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 1509 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 1510 } 1511 // Finally, check the least significant entry. 1512 if (LeadingZero) { 1513 if (!UniquedVals[Multiple-1].getNode()) 1514 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1515 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1516 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1517 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1518 } 1519 if (LeadingOnes) { 1520 if (!UniquedVals[Multiple-1].getNode()) 1521 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1522 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1523 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1524 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1525 } 1526 1527 return SDValue(); 1528 } 1529 1530 // Check to see if this buildvec has a single non-undef value in its elements. 1531 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1532 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1533 if (!OpVal.getNode()) 1534 OpVal = N->getOperand(i); 1535 else if (OpVal != N->getOperand(i)) 1536 return SDValue(); 1537 } 1538 1539 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1540 1541 unsigned ValSizeInBytes = EltSize; 1542 uint64_t Value = 0; 1543 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1544 Value = CN->getZExtValue(); 1545 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1546 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1547 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1548 } 1549 1550 // If the splat value is larger than the element value, then we can never do 1551 // this splat. The only case that we could fit the replicated bits into our 1552 // immediate field for would be zero, and we prefer to use vxor for it. 1553 if (ValSizeInBytes < ByteSize) return SDValue(); 1554 1555 // If the element value is larger than the splat value, check if it consists 1556 // of a repeated bit pattern of size ByteSize. 1557 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1558 return SDValue(); 1559 1560 // Properly sign extend the value. 1561 int MaskVal = SignExtend32(Value, ByteSize * 8); 1562 1563 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1564 if (MaskVal == 0) return SDValue(); 1565 1566 // Finally, if this value fits in a 5 bit sext field, return it 1567 if (SignExtend32<5>(MaskVal) == MaskVal) 1568 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1569 return SDValue(); 1570 } 1571 1572 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1573 /// amount, otherwise return -1. 1574 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1575 EVT VT = N->getValueType(0); 1576 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1577 return -1; 1578 1579 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1580 1581 // Find the first non-undef value in the shuffle mask. 1582 unsigned i; 1583 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1584 /*search*/; 1585 1586 if (i == 4) return -1; // all undef. 1587 1588 // Otherwise, check to see if the rest of the elements are consecutively 1589 // numbered from this value. 1590 unsigned ShiftAmt = SVOp->getMaskElt(i); 1591 if (ShiftAmt < i) return -1; 1592 ShiftAmt -= i; 1593 1594 // Check the rest of the elements to see if they are consecutive. 1595 for (++i; i != 4; ++i) 1596 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1597 return -1; 1598 1599 return ShiftAmt; 1600 } 1601 1602 //===----------------------------------------------------------------------===// 1603 // Addressing Mode Selection 1604 //===----------------------------------------------------------------------===// 1605 1606 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1607 /// or 64-bit immediate, and if the value can be accurately represented as a 1608 /// sign extension from a 16-bit value. If so, this returns true and the 1609 /// immediate. 1610 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1611 if (!isa<ConstantSDNode>(N)) 1612 return false; 1613 1614 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1615 if (N->getValueType(0) == MVT::i32) 1616 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1617 else 1618 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1619 } 1620 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1621 return isIntS16Immediate(Op.getNode(), Imm); 1622 } 1623 1624 1625 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1626 /// can be represented as an indexed [r+r] operation. Returns false if it 1627 /// can be more efficiently represented with [r+imm]. 1628 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1629 SDValue &Index, 1630 SelectionDAG &DAG) const { 1631 short imm = 0; 1632 if (N.getOpcode() == ISD::ADD) { 1633 if (isIntS16Immediate(N.getOperand(1), imm)) 1634 return false; // r+i 1635 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1636 return false; // r+i 1637 1638 Base = N.getOperand(0); 1639 Index = N.getOperand(1); 1640 return true; 1641 } else if (N.getOpcode() == ISD::OR) { 1642 if (isIntS16Immediate(N.getOperand(1), imm)) 1643 return false; // r+i can fold it if we can. 1644 1645 // If this is an or of disjoint bitfields, we can codegen this as an add 1646 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1647 // disjoint. 1648 APInt LHSKnownZero, LHSKnownOne; 1649 APInt RHSKnownZero, RHSKnownOne; 1650 DAG.computeKnownBits(N.getOperand(0), 1651 LHSKnownZero, LHSKnownOne); 1652 1653 if (LHSKnownZero.getBoolValue()) { 1654 DAG.computeKnownBits(N.getOperand(1), 1655 RHSKnownZero, RHSKnownOne); 1656 // If all of the bits are known zero on the LHS or RHS, the add won't 1657 // carry. 1658 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1659 Base = N.getOperand(0); 1660 Index = N.getOperand(1); 1661 return true; 1662 } 1663 } 1664 } 1665 1666 return false; 1667 } 1668 1669 // If we happen to be doing an i64 load or store into a stack slot that has 1670 // less than a 4-byte alignment, then the frame-index elimination may need to 1671 // use an indexed load or store instruction (because the offset may not be a 1672 // multiple of 4). The extra register needed to hold the offset comes from the 1673 // register scavenger, and it is possible that the scavenger will need to use 1674 // an emergency spill slot. As a result, we need to make sure that a spill slot 1675 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1676 // stack slot. 1677 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1678 // FIXME: This does not handle the LWA case. 1679 if (VT != MVT::i64) 1680 return; 1681 1682 // NOTE: We'll exclude negative FIs here, which come from argument 1683 // lowering, because there are no known test cases triggering this problem 1684 // using packed structures (or similar). We can remove this exclusion if 1685 // we find such a test case. The reason why this is so test-case driven is 1686 // because this entire 'fixup' is only to prevent crashes (from the 1687 // register scavenger) on not-really-valid inputs. For example, if we have: 1688 // %a = alloca i1 1689 // %b = bitcast i1* %a to i64* 1690 // store i64* a, i64 b 1691 // then the store should really be marked as 'align 1', but is not. If it 1692 // were marked as 'align 1' then the indexed form would have been 1693 // instruction-selected initially, and the problem this 'fixup' is preventing 1694 // won't happen regardless. 1695 if (FrameIdx < 0) 1696 return; 1697 1698 MachineFunction &MF = DAG.getMachineFunction(); 1699 MachineFrameInfo *MFI = MF.getFrameInfo(); 1700 1701 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1702 if (Align >= 4) 1703 return; 1704 1705 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1706 FuncInfo->setHasNonRISpills(); 1707 } 1708 1709 /// Returns true if the address N can be represented by a base register plus 1710 /// a signed 16-bit displacement [r+imm], and if it is not better 1711 /// represented as reg+reg. If Aligned is true, only accept displacements 1712 /// suitable for STD and friends, i.e. multiples of 4. 1713 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1714 SDValue &Base, 1715 SelectionDAG &DAG, 1716 bool Aligned) const { 1717 // FIXME dl should come from parent load or store, not from address 1718 SDLoc dl(N); 1719 // If this can be more profitably realized as r+r, fail. 1720 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1721 return false; 1722 1723 if (N.getOpcode() == ISD::ADD) { 1724 short imm = 0; 1725 if (isIntS16Immediate(N.getOperand(1), imm) && 1726 (!Aligned || (imm & 3) == 0)) { 1727 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1728 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1729 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1730 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1731 } else { 1732 Base = N.getOperand(0); 1733 } 1734 return true; // [r+i] 1735 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1736 // Match LOAD (ADD (X, Lo(G))). 1737 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1738 && "Cannot handle constant offsets yet!"); 1739 Disp = N.getOperand(1).getOperand(0); // The global address. 1740 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1741 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1742 Disp.getOpcode() == ISD::TargetConstantPool || 1743 Disp.getOpcode() == ISD::TargetJumpTable); 1744 Base = N.getOperand(0); 1745 return true; // [&g+r] 1746 } 1747 } else if (N.getOpcode() == ISD::OR) { 1748 short imm = 0; 1749 if (isIntS16Immediate(N.getOperand(1), imm) && 1750 (!Aligned || (imm & 3) == 0)) { 1751 // If this is an or of disjoint bitfields, we can codegen this as an add 1752 // (for better address arithmetic) if the LHS and RHS of the OR are 1753 // provably disjoint. 1754 APInt LHSKnownZero, LHSKnownOne; 1755 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1756 1757 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1758 // If all of the bits are known zero on the LHS or RHS, the add won't 1759 // carry. 1760 if (FrameIndexSDNode *FI = 1761 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1762 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1763 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1764 } else { 1765 Base = N.getOperand(0); 1766 } 1767 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1768 return true; 1769 } 1770 } 1771 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1772 // Loading from a constant address. 1773 1774 // If this address fits entirely in a 16-bit sext immediate field, codegen 1775 // this as "d, 0" 1776 short Imm; 1777 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1778 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1779 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1780 CN->getValueType(0)); 1781 return true; 1782 } 1783 1784 // Handle 32-bit sext immediates with LIS + addr mode. 1785 if ((CN->getValueType(0) == MVT::i32 || 1786 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1787 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1788 int Addr = (int)CN->getZExtValue(); 1789 1790 // Otherwise, break this down into an LIS + disp. 1791 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1792 1793 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1794 MVT::i32); 1795 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1796 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1797 return true; 1798 } 1799 } 1800 1801 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 1802 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1803 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1804 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1805 } else 1806 Base = N; 1807 return true; // [r+0] 1808 } 1809 1810 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1811 /// represented as an indexed [r+r] operation. 1812 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1813 SDValue &Index, 1814 SelectionDAG &DAG) const { 1815 // Check to see if we can easily represent this as an [r+r] address. This 1816 // will fail if it thinks that the address is more profitably represented as 1817 // reg+imm, e.g. where imm = 0. 1818 if (SelectAddressRegReg(N, Base, Index, DAG)) 1819 return true; 1820 1821 // If the operand is an addition, always emit this as [r+r], since this is 1822 // better (for code size, and execution, as the memop does the add for free) 1823 // than emitting an explicit add. 1824 if (N.getOpcode() == ISD::ADD) { 1825 Base = N.getOperand(0); 1826 Index = N.getOperand(1); 1827 return true; 1828 } 1829 1830 // Otherwise, do it the hard way, using R0 as the base register. 1831 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1832 N.getValueType()); 1833 Index = N; 1834 return true; 1835 } 1836 1837 /// getPreIndexedAddressParts - returns true by value, base pointer and 1838 /// offset pointer and addressing mode by reference if the node's address 1839 /// can be legally represented as pre-indexed load / store address. 1840 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1841 SDValue &Offset, 1842 ISD::MemIndexedMode &AM, 1843 SelectionDAG &DAG) const { 1844 if (DisablePPCPreinc) return false; 1845 1846 bool isLoad = true; 1847 SDValue Ptr; 1848 EVT VT; 1849 unsigned Alignment; 1850 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1851 Ptr = LD->getBasePtr(); 1852 VT = LD->getMemoryVT(); 1853 Alignment = LD->getAlignment(); 1854 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1855 Ptr = ST->getBasePtr(); 1856 VT = ST->getMemoryVT(); 1857 Alignment = ST->getAlignment(); 1858 isLoad = false; 1859 } else 1860 return false; 1861 1862 // PowerPC doesn't have preinc load/store instructions for vectors (except 1863 // for QPX, which does have preinc r+r forms). 1864 if (VT.isVector()) { 1865 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 1866 return false; 1867 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 1868 AM = ISD::PRE_INC; 1869 return true; 1870 } 1871 } 1872 1873 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1874 1875 // Common code will reject creating a pre-inc form if the base pointer 1876 // is a frame index, or if N is a store and the base pointer is either 1877 // the same as or a predecessor of the value being stored. Check for 1878 // those situations here, and try with swapped Base/Offset instead. 1879 bool Swap = false; 1880 1881 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1882 Swap = true; 1883 else if (!isLoad) { 1884 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1885 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1886 Swap = true; 1887 } 1888 1889 if (Swap) 1890 std::swap(Base, Offset); 1891 1892 AM = ISD::PRE_INC; 1893 return true; 1894 } 1895 1896 // LDU/STU can only handle immediates that are a multiple of 4. 1897 if (VT != MVT::i64) { 1898 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1899 return false; 1900 } else { 1901 // LDU/STU need an address with at least 4-byte alignment. 1902 if (Alignment < 4) 1903 return false; 1904 1905 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1906 return false; 1907 } 1908 1909 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1910 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1911 // sext i32 to i64 when addr mode is r+i. 1912 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1913 LD->getExtensionType() == ISD::SEXTLOAD && 1914 isa<ConstantSDNode>(Offset)) 1915 return false; 1916 } 1917 1918 AM = ISD::PRE_INC; 1919 return true; 1920 } 1921 1922 //===----------------------------------------------------------------------===// 1923 // LowerOperation implementation 1924 //===----------------------------------------------------------------------===// 1925 1926 /// GetLabelAccessInfo - Return true if we should reference labels using a 1927 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1928 static bool GetLabelAccessInfo(const TargetMachine &TM, 1929 const PPCSubtarget &Subtarget, 1930 unsigned &HiOpFlags, unsigned &LoOpFlags, 1931 const GlobalValue *GV = nullptr) { 1932 HiOpFlags = PPCII::MO_HA; 1933 LoOpFlags = PPCII::MO_LO; 1934 1935 // Don't use the pic base if not in PIC relocation model. 1936 bool isPIC = TM.getRelocationModel() == Reloc::PIC_; 1937 1938 if (isPIC) { 1939 HiOpFlags |= PPCII::MO_PIC_FLAG; 1940 LoOpFlags |= PPCII::MO_PIC_FLAG; 1941 } 1942 1943 // If this is a reference to a global value that requires a non-lazy-ptr, make 1944 // sure that instruction lowering adds it. 1945 if (GV && Subtarget.hasLazyResolverStub(GV)) { 1946 HiOpFlags |= PPCII::MO_NLP_FLAG; 1947 LoOpFlags |= PPCII::MO_NLP_FLAG; 1948 1949 if (GV->hasHiddenVisibility()) { 1950 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1951 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1952 } 1953 } 1954 1955 return isPIC; 1956 } 1957 1958 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1959 SelectionDAG &DAG) { 1960 SDLoc DL(HiPart); 1961 EVT PtrVT = HiPart.getValueType(); 1962 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 1963 1964 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1965 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1966 1967 // With PIC, the first instruction is actually "GR+hi(&G)". 1968 if (isPIC) 1969 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1970 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1971 1972 // Generate non-pic code that has direct accesses to the constant pool. 1973 // The address of the global is just (hi(&g)+lo(&g)). 1974 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1975 } 1976 1977 static void setUsesTOCBasePtr(MachineFunction &MF) { 1978 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1979 FuncInfo->setUsesTOCBasePtr(); 1980 } 1981 1982 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 1983 setUsesTOCBasePtr(DAG.getMachineFunction()); 1984 } 1985 1986 static SDValue getTOCEntry(SelectionDAG &DAG, SDLoc dl, bool Is64Bit, 1987 SDValue GA) { 1988 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 1989 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 1990 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 1991 1992 SDValue Ops[] = { GA, Reg }; 1993 return DAG.getMemIntrinsicNode(PPCISD::TOC_ENTRY, dl, 1994 DAG.getVTList(VT, MVT::Other), Ops, VT, 1995 MachinePointerInfo::getGOT(), 0, false, true, 1996 false, 0); 1997 } 1998 1999 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2000 SelectionDAG &DAG) const { 2001 EVT PtrVT = Op.getValueType(); 2002 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2003 const Constant *C = CP->getConstVal(); 2004 2005 // 64-bit SVR4 ABI code is always position-independent. 2006 // The actual address of the GlobalValue is stored in the TOC. 2007 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2008 setUsesTOCBasePtr(DAG); 2009 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2010 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2011 } 2012 2013 unsigned MOHiFlag, MOLoFlag; 2014 bool isPIC = 2015 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2016 2017 if (isPIC && Subtarget.isSVR4ABI()) { 2018 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2019 PPCII::MO_PIC_FLAG); 2020 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2021 } 2022 2023 SDValue CPIHi = 2024 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2025 SDValue CPILo = 2026 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2027 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 2028 } 2029 2030 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2031 EVT PtrVT = Op.getValueType(); 2032 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2033 2034 // 64-bit SVR4 ABI code is always position-independent. 2035 // The actual address of the GlobalValue is stored in the TOC. 2036 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2037 setUsesTOCBasePtr(DAG); 2038 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2039 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2040 } 2041 2042 unsigned MOHiFlag, MOLoFlag; 2043 bool isPIC = 2044 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2045 2046 if (isPIC && Subtarget.isSVR4ABI()) { 2047 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2048 PPCII::MO_PIC_FLAG); 2049 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2050 } 2051 2052 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2053 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2054 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 2055 } 2056 2057 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2058 SelectionDAG &DAG) const { 2059 EVT PtrVT = Op.getValueType(); 2060 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2061 const BlockAddress *BA = BASDN->getBlockAddress(); 2062 2063 // 64-bit SVR4 ABI code is always position-independent. 2064 // The actual BlockAddress is stored in the TOC. 2065 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2066 setUsesTOCBasePtr(DAG); 2067 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2068 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2069 } 2070 2071 unsigned MOHiFlag, MOLoFlag; 2072 bool isPIC = 2073 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2074 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2075 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2076 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 2077 } 2078 2079 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2080 SelectionDAG &DAG) const { 2081 2082 // FIXME: TLS addresses currently use medium model code sequences, 2083 // which is the most useful form. Eventually support for small and 2084 // large models could be added if users need it, at the cost of 2085 // additional complexity. 2086 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2087 SDLoc dl(GA); 2088 const GlobalValue *GV = GA->getGlobal(); 2089 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2090 bool is64bit = Subtarget.isPPC64(); 2091 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2092 PICLevel::Level picLevel = M->getPICLevel(); 2093 2094 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2095 2096 if (Model == TLSModel::LocalExec) { 2097 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2098 PPCII::MO_TPREL_HA); 2099 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2100 PPCII::MO_TPREL_LO); 2101 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2102 is64bit ? MVT::i64 : MVT::i32); 2103 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2104 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2105 } 2106 2107 if (Model == TLSModel::InitialExec) { 2108 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2109 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2110 PPCII::MO_TLS); 2111 SDValue GOTPtr; 2112 if (is64bit) { 2113 setUsesTOCBasePtr(DAG); 2114 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2115 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2116 PtrVT, GOTReg, TGA); 2117 } else 2118 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2119 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2120 PtrVT, TGA, GOTPtr); 2121 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2122 } 2123 2124 if (Model == TLSModel::GeneralDynamic) { 2125 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2126 SDValue GOTPtr; 2127 if (is64bit) { 2128 setUsesTOCBasePtr(DAG); 2129 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2130 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2131 GOTReg, TGA); 2132 } else { 2133 if (picLevel == PICLevel::Small) 2134 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2135 else 2136 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2137 } 2138 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2139 GOTPtr, TGA, TGA); 2140 } 2141 2142 if (Model == TLSModel::LocalDynamic) { 2143 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2144 SDValue GOTPtr; 2145 if (is64bit) { 2146 setUsesTOCBasePtr(DAG); 2147 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2148 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2149 GOTReg, TGA); 2150 } else { 2151 if (picLevel == PICLevel::Small) 2152 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2153 else 2154 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2155 } 2156 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2157 PtrVT, GOTPtr, TGA, TGA); 2158 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2159 PtrVT, TLSAddr, TGA); 2160 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2161 } 2162 2163 llvm_unreachable("Unknown TLS model!"); 2164 } 2165 2166 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2167 SelectionDAG &DAG) const { 2168 EVT PtrVT = Op.getValueType(); 2169 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2170 SDLoc DL(GSDN); 2171 const GlobalValue *GV = GSDN->getGlobal(); 2172 2173 // 64-bit SVR4 ABI code is always position-independent. 2174 // The actual address of the GlobalValue is stored in the TOC. 2175 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2176 setUsesTOCBasePtr(DAG); 2177 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2178 return getTOCEntry(DAG, DL, true, GA); 2179 } 2180 2181 unsigned MOHiFlag, MOLoFlag; 2182 bool isPIC = 2183 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag, GV); 2184 2185 if (isPIC && Subtarget.isSVR4ABI()) { 2186 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2187 GSDN->getOffset(), 2188 PPCII::MO_PIC_FLAG); 2189 return getTOCEntry(DAG, DL, false, GA); 2190 } 2191 2192 SDValue GAHi = 2193 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2194 SDValue GALo = 2195 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2196 2197 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 2198 2199 // If the global reference is actually to a non-lazy-pointer, we have to do an 2200 // extra load to get the address of the global. 2201 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2202 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 2203 false, false, false, 0); 2204 return Ptr; 2205 } 2206 2207 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2208 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2209 SDLoc dl(Op); 2210 2211 if (Op.getValueType() == MVT::v2i64) { 2212 // When the operands themselves are v2i64 values, we need to do something 2213 // special because VSX has no underlying comparison operations for these. 2214 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2215 // Equality can be handled by casting to the legal type for Altivec 2216 // comparisons, everything else needs to be expanded. 2217 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2218 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2219 DAG.getSetCC(dl, MVT::v4i32, 2220 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2221 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2222 CC)); 2223 } 2224 2225 return SDValue(); 2226 } 2227 2228 // We handle most of these in the usual way. 2229 return Op; 2230 } 2231 2232 // If we're comparing for equality to zero, expose the fact that this is 2233 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 2234 // fold the new nodes. 2235 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2236 if (C->isNullValue() && CC == ISD::SETEQ) { 2237 EVT VT = Op.getOperand(0).getValueType(); 2238 SDValue Zext = Op.getOperand(0); 2239 if (VT.bitsLT(MVT::i32)) { 2240 VT = MVT::i32; 2241 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 2242 } 2243 unsigned Log2b = Log2_32(VT.getSizeInBits()); 2244 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 2245 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 2246 DAG.getConstant(Log2b, dl, MVT::i32)); 2247 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 2248 } 2249 // Leave comparisons against 0 and -1 alone for now, since they're usually 2250 // optimized. FIXME: revisit this when we can custom lower all setcc 2251 // optimizations. 2252 if (C->isAllOnesValue() || C->isNullValue()) 2253 return SDValue(); 2254 } 2255 2256 // If we have an integer seteq/setne, turn it into a compare against zero 2257 // by xor'ing the rhs with the lhs, which is faster than setting a 2258 // condition register, reading it back out, and masking the correct bit. The 2259 // normal approach here uses sub to do this instead of xor. Using xor exposes 2260 // the result to other bit-twiddling opportunities. 2261 EVT LHSVT = Op.getOperand(0).getValueType(); 2262 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2263 EVT VT = Op.getValueType(); 2264 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2265 Op.getOperand(1)); 2266 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2267 } 2268 return SDValue(); 2269 } 2270 2271 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 2272 const PPCSubtarget &Subtarget) const { 2273 SDNode *Node = Op.getNode(); 2274 EVT VT = Node->getValueType(0); 2275 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2276 SDValue InChain = Node->getOperand(0); 2277 SDValue VAListPtr = Node->getOperand(1); 2278 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2279 SDLoc dl(Node); 2280 2281 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2282 2283 // gpr_index 2284 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2285 VAListPtr, MachinePointerInfo(SV), MVT::i8, 2286 false, false, false, 0); 2287 InChain = GprIndex.getValue(1); 2288 2289 if (VT == MVT::i64) { 2290 // Check if GprIndex is even 2291 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2292 DAG.getConstant(1, dl, MVT::i32)); 2293 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2294 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2295 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2296 DAG.getConstant(1, dl, MVT::i32)); 2297 // Align GprIndex to be even if it isn't 2298 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2299 GprIndex); 2300 } 2301 2302 // fpr index is 1 byte after gpr 2303 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2304 DAG.getConstant(1, dl, MVT::i32)); 2305 2306 // fpr 2307 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2308 FprPtr, MachinePointerInfo(SV), MVT::i8, 2309 false, false, false, 0); 2310 InChain = FprIndex.getValue(1); 2311 2312 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2313 DAG.getConstant(8, dl, MVT::i32)); 2314 2315 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2316 DAG.getConstant(4, dl, MVT::i32)); 2317 2318 // areas 2319 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 2320 MachinePointerInfo(), false, false, 2321 false, 0); 2322 InChain = OverflowArea.getValue(1); 2323 2324 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 2325 MachinePointerInfo(), false, false, 2326 false, 0); 2327 InChain = RegSaveArea.getValue(1); 2328 2329 // select overflow_area if index > 8 2330 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2331 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2332 2333 // adjustment constant gpr_index * 4/8 2334 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2335 VT.isInteger() ? GprIndex : FprIndex, 2336 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2337 MVT::i32)); 2338 2339 // OurReg = RegSaveArea + RegConstant 2340 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2341 RegConstant); 2342 2343 // Floating types are 32 bytes into RegSaveArea 2344 if (VT.isFloatingPoint()) 2345 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2346 DAG.getConstant(32, dl, MVT::i32)); 2347 2348 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2349 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2350 VT.isInteger() ? GprIndex : FprIndex, 2351 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2352 MVT::i32)); 2353 2354 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2355 VT.isInteger() ? VAListPtr : FprPtr, 2356 MachinePointerInfo(SV), 2357 MVT::i8, false, false, 0); 2358 2359 // determine if we should load from reg_save_area or overflow_area 2360 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2361 2362 // increase overflow_area by 4/8 if gpr/fpr > 8 2363 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2364 DAG.getConstant(VT.isInteger() ? 4 : 8, 2365 dl, MVT::i32)); 2366 2367 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2368 OverflowAreaPlusN); 2369 2370 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 2371 OverflowAreaPtr, 2372 MachinePointerInfo(), 2373 MVT::i32, false, false, 0); 2374 2375 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 2376 false, false, false, 0); 2377 } 2378 2379 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 2380 const PPCSubtarget &Subtarget) const { 2381 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2382 2383 // We have to copy the entire va_list struct: 2384 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2385 return DAG.getMemcpy(Op.getOperand(0), Op, 2386 Op.getOperand(1), Op.getOperand(2), 2387 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2388 false, MachinePointerInfo(), MachinePointerInfo()); 2389 } 2390 2391 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2392 SelectionDAG &DAG) const { 2393 return Op.getOperand(0); 2394 } 2395 2396 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2397 SelectionDAG &DAG) const { 2398 SDValue Chain = Op.getOperand(0); 2399 SDValue Trmp = Op.getOperand(1); // trampoline 2400 SDValue FPtr = Op.getOperand(2); // nested function 2401 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2402 SDLoc dl(Op); 2403 2404 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2405 bool isPPC64 = (PtrVT == MVT::i64); 2406 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2407 2408 TargetLowering::ArgListTy Args; 2409 TargetLowering::ArgListEntry Entry; 2410 2411 Entry.Ty = IntPtrTy; 2412 Entry.Node = Trmp; Args.push_back(Entry); 2413 2414 // TrampSize == (isPPC64 ? 48 : 40); 2415 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2416 isPPC64 ? MVT::i64 : MVT::i32); 2417 Args.push_back(Entry); 2418 2419 Entry.Node = FPtr; Args.push_back(Entry); 2420 Entry.Node = Nest; Args.push_back(Entry); 2421 2422 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2423 TargetLowering::CallLoweringInfo CLI(DAG); 2424 CLI.setDebugLoc(dl).setChain(Chain) 2425 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2426 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2427 std::move(Args), 0); 2428 2429 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2430 return CallResult.second; 2431 } 2432 2433 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 2434 const PPCSubtarget &Subtarget) const { 2435 MachineFunction &MF = DAG.getMachineFunction(); 2436 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2437 2438 SDLoc dl(Op); 2439 2440 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2441 // vastart just stores the address of the VarArgsFrameIndex slot into the 2442 // memory location argument. 2443 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2444 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2445 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2446 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2447 MachinePointerInfo(SV), 2448 false, false, 0); 2449 } 2450 2451 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2452 // We suppose the given va_list is already allocated. 2453 // 2454 // typedef struct { 2455 // char gpr; /* index into the array of 8 GPRs 2456 // * stored in the register save area 2457 // * gpr=0 corresponds to r3, 2458 // * gpr=1 to r4, etc. 2459 // */ 2460 // char fpr; /* index into the array of 8 FPRs 2461 // * stored in the register save area 2462 // * fpr=0 corresponds to f1, 2463 // * fpr=1 to f2, etc. 2464 // */ 2465 // char *overflow_arg_area; 2466 // /* location on stack that holds 2467 // * the next overflow argument 2468 // */ 2469 // char *reg_save_area; 2470 // /* where r3:r10 and f1:f8 (if saved) 2471 // * are stored 2472 // */ 2473 // } va_list[1]; 2474 2475 2476 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2477 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2478 2479 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2480 2481 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2482 PtrVT); 2483 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2484 PtrVT); 2485 2486 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2487 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2488 2489 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2490 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2491 2492 uint64_t FPROffset = 1; 2493 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2494 2495 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2496 2497 // Store first byte : number of int regs 2498 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2499 Op.getOperand(1), 2500 MachinePointerInfo(SV), 2501 MVT::i8, false, false, 0); 2502 uint64_t nextOffset = FPROffset; 2503 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2504 ConstFPROffset); 2505 2506 // Store second byte : number of float regs 2507 SDValue secondStore = 2508 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2509 MachinePointerInfo(SV, nextOffset), MVT::i8, 2510 false, false, 0); 2511 nextOffset += StackOffset; 2512 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2513 2514 // Store second word : arguments given on stack 2515 SDValue thirdStore = 2516 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2517 MachinePointerInfo(SV, nextOffset), 2518 false, false, 0); 2519 nextOffset += FrameOffset; 2520 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2521 2522 // Store third word : arguments given in registers 2523 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2524 MachinePointerInfo(SV, nextOffset), 2525 false, false, 0); 2526 2527 } 2528 2529 #include "PPCGenCallingConv.inc" 2530 2531 // Function whose sole purpose is to kill compiler warnings 2532 // stemming from unused functions included from PPCGenCallingConv.inc. 2533 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2534 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2535 } 2536 2537 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2538 CCValAssign::LocInfo &LocInfo, 2539 ISD::ArgFlagsTy &ArgFlags, 2540 CCState &State) { 2541 return true; 2542 } 2543 2544 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2545 MVT &LocVT, 2546 CCValAssign::LocInfo &LocInfo, 2547 ISD::ArgFlagsTy &ArgFlags, 2548 CCState &State) { 2549 static const MCPhysReg ArgRegs[] = { 2550 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2551 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2552 }; 2553 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2554 2555 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2556 2557 // Skip one register if the first unallocated register has an even register 2558 // number and there are still argument registers available which have not been 2559 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2560 // need to skip a register if RegNum is odd. 2561 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2562 State.AllocateReg(ArgRegs[RegNum]); 2563 } 2564 2565 // Always return false here, as this function only makes sure that the first 2566 // unallocated register has an odd register number and does not actually 2567 // allocate a register for the current argument. 2568 return false; 2569 } 2570 2571 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2572 MVT &LocVT, 2573 CCValAssign::LocInfo &LocInfo, 2574 ISD::ArgFlagsTy &ArgFlags, 2575 CCState &State) { 2576 static const MCPhysReg ArgRegs[] = { 2577 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2578 PPC::F8 2579 }; 2580 2581 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2582 2583 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2584 2585 // If there is only one Floating-point register left we need to put both f64 2586 // values of a split ppc_fp128 value on the stack. 2587 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2588 State.AllocateReg(ArgRegs[RegNum]); 2589 } 2590 2591 // Always return false here, as this function only makes sure that the two f64 2592 // values a ppc_fp128 value is split into are both passed in registers or both 2593 // passed on the stack and does not actually allocate a register for the 2594 // current argument. 2595 return false; 2596 } 2597 2598 /// FPR - The set of FP registers that should be allocated for arguments, 2599 /// on Darwin. 2600 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2601 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2602 PPC::F11, PPC::F12, PPC::F13}; 2603 2604 /// QFPR - The set of QPX registers that should be allocated for arguments. 2605 static const MCPhysReg QFPR[] = { 2606 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2607 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2608 2609 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2610 /// the stack. 2611 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2612 unsigned PtrByteSize) { 2613 unsigned ArgSize = ArgVT.getStoreSize(); 2614 if (Flags.isByVal()) 2615 ArgSize = Flags.getByValSize(); 2616 2617 // Round up to multiples of the pointer size, except for array members, 2618 // which are always packed. 2619 if (!Flags.isInConsecutiveRegs()) 2620 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2621 2622 return ArgSize; 2623 } 2624 2625 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2626 /// on the stack. 2627 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2628 ISD::ArgFlagsTy Flags, 2629 unsigned PtrByteSize) { 2630 unsigned Align = PtrByteSize; 2631 2632 // Altivec parameters are padded to a 16 byte boundary. 2633 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2634 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2635 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2636 ArgVT == MVT::v1i128) 2637 Align = 16; 2638 // QPX vector types stored in double-precision are padded to a 32 byte 2639 // boundary. 2640 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2641 Align = 32; 2642 2643 // ByVal parameters are aligned as requested. 2644 if (Flags.isByVal()) { 2645 unsigned BVAlign = Flags.getByValAlign(); 2646 if (BVAlign > PtrByteSize) { 2647 if (BVAlign % PtrByteSize != 0) 2648 llvm_unreachable( 2649 "ByVal alignment is not a multiple of the pointer size"); 2650 2651 Align = BVAlign; 2652 } 2653 } 2654 2655 // Array members are always packed to their original alignment. 2656 if (Flags.isInConsecutiveRegs()) { 2657 // If the array member was split into multiple registers, the first 2658 // needs to be aligned to the size of the full type. (Except for 2659 // ppcf128, which is only aligned as its f64 components.) 2660 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2661 Align = OrigVT.getStoreSize(); 2662 else 2663 Align = ArgVT.getStoreSize(); 2664 } 2665 2666 return Align; 2667 } 2668 2669 /// CalculateStackSlotUsed - Return whether this argument will use its 2670 /// stack slot (instead of being passed in registers). ArgOffset, 2671 /// AvailableFPRs, and AvailableVRs must hold the current argument 2672 /// position, and will be updated to account for this argument. 2673 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2674 ISD::ArgFlagsTy Flags, 2675 unsigned PtrByteSize, 2676 unsigned LinkageSize, 2677 unsigned ParamAreaSize, 2678 unsigned &ArgOffset, 2679 unsigned &AvailableFPRs, 2680 unsigned &AvailableVRs, bool HasQPX) { 2681 bool UseMemory = false; 2682 2683 // Respect alignment of argument on the stack. 2684 unsigned Align = 2685 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2686 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2687 // If there's no space left in the argument save area, we must 2688 // use memory (this check also catches zero-sized arguments). 2689 if (ArgOffset >= LinkageSize + ParamAreaSize) 2690 UseMemory = true; 2691 2692 // Allocate argument on the stack. 2693 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2694 if (Flags.isInConsecutiveRegsLast()) 2695 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2696 // If we overran the argument save area, we must use memory 2697 // (this check catches arguments passed partially in memory) 2698 if (ArgOffset > LinkageSize + ParamAreaSize) 2699 UseMemory = true; 2700 2701 // However, if the argument is actually passed in an FPR or a VR, 2702 // we don't use memory after all. 2703 if (!Flags.isByVal()) { 2704 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2705 // QPX registers overlap with the scalar FP registers. 2706 (HasQPX && (ArgVT == MVT::v4f32 || 2707 ArgVT == MVT::v4f64 || 2708 ArgVT == MVT::v4i1))) 2709 if (AvailableFPRs > 0) { 2710 --AvailableFPRs; 2711 return false; 2712 } 2713 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2714 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2715 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2716 ArgVT == MVT::v1i128) 2717 if (AvailableVRs > 0) { 2718 --AvailableVRs; 2719 return false; 2720 } 2721 } 2722 2723 return UseMemory; 2724 } 2725 2726 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2727 /// ensure minimum alignment required for target. 2728 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2729 unsigned NumBytes) { 2730 unsigned TargetAlign = Lowering->getStackAlignment(); 2731 unsigned AlignMask = TargetAlign - 1; 2732 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2733 return NumBytes; 2734 } 2735 2736 SDValue 2737 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 2738 CallingConv::ID CallConv, bool isVarArg, 2739 const SmallVectorImpl<ISD::InputArg> 2740 &Ins, 2741 SDLoc dl, SelectionDAG &DAG, 2742 SmallVectorImpl<SDValue> &InVals) 2743 const { 2744 if (Subtarget.isSVR4ABI()) { 2745 if (Subtarget.isPPC64()) 2746 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2747 dl, DAG, InVals); 2748 else 2749 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2750 dl, DAG, InVals); 2751 } else { 2752 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2753 dl, DAG, InVals); 2754 } 2755 } 2756 2757 SDValue 2758 PPCTargetLowering::LowerFormalArguments_32SVR4( 2759 SDValue Chain, 2760 CallingConv::ID CallConv, bool isVarArg, 2761 const SmallVectorImpl<ISD::InputArg> 2762 &Ins, 2763 SDLoc dl, SelectionDAG &DAG, 2764 SmallVectorImpl<SDValue> &InVals) const { 2765 2766 // 32-bit SVR4 ABI Stack Frame Layout: 2767 // +-----------------------------------+ 2768 // +--> | Back chain | 2769 // | +-----------------------------------+ 2770 // | | Floating-point register save area | 2771 // | +-----------------------------------+ 2772 // | | General register save area | 2773 // | +-----------------------------------+ 2774 // | | CR save word | 2775 // | +-----------------------------------+ 2776 // | | VRSAVE save word | 2777 // | +-----------------------------------+ 2778 // | | Alignment padding | 2779 // | +-----------------------------------+ 2780 // | | Vector register save area | 2781 // | +-----------------------------------+ 2782 // | | Local variable space | 2783 // | +-----------------------------------+ 2784 // | | Parameter list area | 2785 // | +-----------------------------------+ 2786 // | | LR save word | 2787 // | +-----------------------------------+ 2788 // SP--> +--- | Back chain | 2789 // +-----------------------------------+ 2790 // 2791 // Specifications: 2792 // System V Application Binary Interface PowerPC Processor Supplement 2793 // AltiVec Technology Programming Interface Manual 2794 2795 MachineFunction &MF = DAG.getMachineFunction(); 2796 MachineFrameInfo *MFI = MF.getFrameInfo(); 2797 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2798 2799 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2800 // Potential tail calls could cause overwriting of argument stack slots. 2801 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2802 (CallConv == CallingConv::Fast)); 2803 unsigned PtrByteSize = 4; 2804 2805 // Assign locations to all of the incoming arguments. 2806 SmallVector<CCValAssign, 16> ArgLocs; 2807 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2808 *DAG.getContext()); 2809 2810 // Reserve space for the linkage area on the stack. 2811 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2812 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2813 2814 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2815 2816 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2817 CCValAssign &VA = ArgLocs[i]; 2818 2819 // Arguments stored in registers. 2820 if (VA.isRegLoc()) { 2821 const TargetRegisterClass *RC; 2822 EVT ValVT = VA.getValVT(); 2823 2824 switch (ValVT.getSimpleVT().SimpleTy) { 2825 default: 2826 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2827 case MVT::i1: 2828 case MVT::i32: 2829 RC = &PPC::GPRCRegClass; 2830 break; 2831 case MVT::f32: 2832 if (Subtarget.hasP8Vector()) 2833 RC = &PPC::VSSRCRegClass; 2834 else 2835 RC = &PPC::F4RCRegClass; 2836 break; 2837 case MVT::f64: 2838 if (Subtarget.hasVSX()) 2839 RC = &PPC::VSFRCRegClass; 2840 else 2841 RC = &PPC::F8RCRegClass; 2842 break; 2843 case MVT::v16i8: 2844 case MVT::v8i16: 2845 case MVT::v4i32: 2846 RC = &PPC::VRRCRegClass; 2847 break; 2848 case MVT::v4f32: 2849 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 2850 break; 2851 case MVT::v2f64: 2852 case MVT::v2i64: 2853 RC = &PPC::VSHRCRegClass; 2854 break; 2855 case MVT::v4f64: 2856 RC = &PPC::QFRCRegClass; 2857 break; 2858 case MVT::v4i1: 2859 RC = &PPC::QBRCRegClass; 2860 break; 2861 } 2862 2863 // Transform the arguments stored in physical registers into virtual ones. 2864 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2865 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2866 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2867 2868 if (ValVT == MVT::i1) 2869 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2870 2871 InVals.push_back(ArgValue); 2872 } else { 2873 // Argument stored in memory. 2874 assert(VA.isMemLoc()); 2875 2876 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2877 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2878 isImmutable); 2879 2880 // Create load nodes to retrieve arguments from the stack. 2881 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2882 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2883 MachinePointerInfo(), 2884 false, false, false, 0)); 2885 } 2886 } 2887 2888 // Assign locations to all of the incoming aggregate by value arguments. 2889 // Aggregates passed by value are stored in the local variable space of the 2890 // caller's stack frame, right above the parameter list area. 2891 SmallVector<CCValAssign, 16> ByValArgLocs; 2892 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2893 ByValArgLocs, *DAG.getContext()); 2894 2895 // Reserve stack space for the allocations in CCInfo. 2896 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2897 2898 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2899 2900 // Area that is at least reserved in the caller of this function. 2901 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2902 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2903 2904 // Set the size that is at least reserved in caller of this function. Tail 2905 // call optimized function's reserved stack space needs to be aligned so that 2906 // taking the difference between two stack areas will result in an aligned 2907 // stack. 2908 MinReservedArea = 2909 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 2910 FuncInfo->setMinReservedArea(MinReservedArea); 2911 2912 SmallVector<SDValue, 8> MemOps; 2913 2914 // If the function takes variable number of arguments, make a frame index for 2915 // the start of the first vararg value... for expansion of llvm.va_start. 2916 if (isVarArg) { 2917 static const MCPhysReg GPArgRegs[] = { 2918 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2919 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2920 }; 2921 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2922 2923 static const MCPhysReg FPArgRegs[] = { 2924 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2925 PPC::F8 2926 }; 2927 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2928 if (DisablePPCFloatInVariadic) 2929 NumFPArgRegs = 0; 2930 2931 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 2932 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 2933 2934 // Make room for NumGPArgRegs and NumFPArgRegs. 2935 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2936 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 2937 2938 FuncInfo->setVarArgsStackOffset( 2939 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2940 CCInfo.getNextStackOffset(), true)); 2941 2942 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2943 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2944 2945 // The fixed integer arguments of a variadic function are stored to the 2946 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2947 // the result of va_next. 2948 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2949 // Get an existing live-in vreg, or add a new one. 2950 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2951 if (!VReg) 2952 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2953 2954 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2955 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2956 MachinePointerInfo(), false, false, 0); 2957 MemOps.push_back(Store); 2958 // Increment the address by four for the next argument to store 2959 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 2960 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2961 } 2962 2963 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2964 // is set. 2965 // The double arguments are stored to the VarArgsFrameIndex 2966 // on the stack. 2967 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2968 // Get an existing live-in vreg, or add a new one. 2969 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2970 if (!VReg) 2971 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2972 2973 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2974 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2975 MachinePointerInfo(), false, false, 0); 2976 MemOps.push_back(Store); 2977 // Increment the address by eight for the next argument to store 2978 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 2979 PtrVT); 2980 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2981 } 2982 } 2983 2984 if (!MemOps.empty()) 2985 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 2986 2987 return Chain; 2988 } 2989 2990 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2991 // value to MVT::i64 and then truncate to the correct register size. 2992 SDValue 2993 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 2994 SelectionDAG &DAG, SDValue ArgVal, 2995 SDLoc dl) const { 2996 if (Flags.isSExt()) 2997 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 2998 DAG.getValueType(ObjectVT)); 2999 else if (Flags.isZExt()) 3000 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3001 DAG.getValueType(ObjectVT)); 3002 3003 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3004 } 3005 3006 SDValue 3007 PPCTargetLowering::LowerFormalArguments_64SVR4( 3008 SDValue Chain, 3009 CallingConv::ID CallConv, bool isVarArg, 3010 const SmallVectorImpl<ISD::InputArg> 3011 &Ins, 3012 SDLoc dl, SelectionDAG &DAG, 3013 SmallVectorImpl<SDValue> &InVals) const { 3014 // TODO: add description of PPC stack frame format, or at least some docs. 3015 // 3016 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3017 bool isLittleEndian = Subtarget.isLittleEndian(); 3018 MachineFunction &MF = DAG.getMachineFunction(); 3019 MachineFrameInfo *MFI = MF.getFrameInfo(); 3020 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3021 3022 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3023 "fastcc not supported on varargs functions"); 3024 3025 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3026 // Potential tail calls could cause overwriting of argument stack slots. 3027 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3028 (CallConv == CallingConv::Fast)); 3029 unsigned PtrByteSize = 8; 3030 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3031 3032 static const MCPhysReg GPR[] = { 3033 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3034 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3035 }; 3036 static const MCPhysReg VR[] = { 3037 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3038 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3039 }; 3040 static const MCPhysReg VSRH[] = { 3041 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 3042 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 3043 }; 3044 3045 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3046 const unsigned Num_FPR_Regs = 13; 3047 const unsigned Num_VR_Regs = array_lengthof(VR); 3048 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3049 3050 // Do a first pass over the arguments to determine whether the ABI 3051 // guarantees that our caller has allocated the parameter save area 3052 // on its stack frame. In the ELFv1 ABI, this is always the case; 3053 // in the ELFv2 ABI, it is true if this is a vararg function or if 3054 // any parameter is located in a stack slot. 3055 3056 bool HasParameterArea = !isELFv2ABI || isVarArg; 3057 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3058 unsigned NumBytes = LinkageSize; 3059 unsigned AvailableFPRs = Num_FPR_Regs; 3060 unsigned AvailableVRs = Num_VR_Regs; 3061 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3062 if (Ins[i].Flags.isNest()) 3063 continue; 3064 3065 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3066 PtrByteSize, LinkageSize, ParamAreaSize, 3067 NumBytes, AvailableFPRs, AvailableVRs, 3068 Subtarget.hasQPX())) 3069 HasParameterArea = true; 3070 } 3071 3072 // Add DAG nodes to load the arguments or copy them out of registers. On 3073 // entry to a function on PPC, the arguments start after the linkage area, 3074 // although the first ones are often in registers. 3075 3076 unsigned ArgOffset = LinkageSize; 3077 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3078 unsigned &QFPR_idx = FPR_idx; 3079 SmallVector<SDValue, 8> MemOps; 3080 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3081 unsigned CurArgIdx = 0; 3082 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3083 SDValue ArgVal; 3084 bool needsLoad = false; 3085 EVT ObjectVT = Ins[ArgNo].VT; 3086 EVT OrigVT = Ins[ArgNo].ArgVT; 3087 unsigned ObjSize = ObjectVT.getStoreSize(); 3088 unsigned ArgSize = ObjSize; 3089 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3090 if (Ins[ArgNo].isOrigArg()) { 3091 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3092 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3093 } 3094 // We re-align the argument offset for each argument, except when using the 3095 // fast calling convention, when we need to make sure we do that only when 3096 // we'll actually use a stack slot. 3097 unsigned CurArgOffset, Align; 3098 auto ComputeArgOffset = [&]() { 3099 /* Respect alignment of argument on the stack. */ 3100 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3101 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3102 CurArgOffset = ArgOffset; 3103 }; 3104 3105 if (CallConv != CallingConv::Fast) { 3106 ComputeArgOffset(); 3107 3108 /* Compute GPR index associated with argument offset. */ 3109 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3110 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3111 } 3112 3113 // FIXME the codegen can be much improved in some cases. 3114 // We do not have to keep everything in memory. 3115 if (Flags.isByVal()) { 3116 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3117 3118 if (CallConv == CallingConv::Fast) 3119 ComputeArgOffset(); 3120 3121 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3122 ObjSize = Flags.getByValSize(); 3123 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3124 // Empty aggregate parameters do not take up registers. Examples: 3125 // struct { } a; 3126 // union { } b; 3127 // int c[0]; 3128 // etc. However, we have to provide a place-holder in InVals, so 3129 // pretend we have an 8-byte item at the current address for that 3130 // purpose. 3131 if (!ObjSize) { 3132 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3133 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3134 InVals.push_back(FIN); 3135 continue; 3136 } 3137 3138 // Create a stack object covering all stack doublewords occupied 3139 // by the argument. If the argument is (fully or partially) on 3140 // the stack, or if the argument is fully in registers but the 3141 // caller has allocated the parameter save anyway, we can refer 3142 // directly to the caller's stack frame. Otherwise, create a 3143 // local copy in our own frame. 3144 int FI; 3145 if (HasParameterArea || 3146 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3147 FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true); 3148 else 3149 FI = MFI->CreateStackObject(ArgSize, Align, false); 3150 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3151 3152 // Handle aggregates smaller than 8 bytes. 3153 if (ObjSize < PtrByteSize) { 3154 // The value of the object is its address, which differs from the 3155 // address of the enclosing doubleword on big-endian systems. 3156 SDValue Arg = FIN; 3157 if (!isLittleEndian) { 3158 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3159 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3160 } 3161 InVals.push_back(Arg); 3162 3163 if (GPR_idx != Num_GPR_Regs) { 3164 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3165 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3166 SDValue Store; 3167 3168 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3169 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3170 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3171 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3172 MachinePointerInfo(FuncArg), 3173 ObjType, false, false, 0); 3174 } else { 3175 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3176 // store the whole register as-is to the parameter save area 3177 // slot. 3178 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3179 MachinePointerInfo(FuncArg), 3180 false, false, 0); 3181 } 3182 3183 MemOps.push_back(Store); 3184 } 3185 // Whether we copied from a register or not, advance the offset 3186 // into the parameter save area by a full doubleword. 3187 ArgOffset += PtrByteSize; 3188 continue; 3189 } 3190 3191 // The value of the object is its address, which is the address of 3192 // its first stack doubleword. 3193 InVals.push_back(FIN); 3194 3195 // Store whatever pieces of the object are in registers to memory. 3196 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3197 if (GPR_idx == Num_GPR_Regs) 3198 break; 3199 3200 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3201 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3202 SDValue Addr = FIN; 3203 if (j) { 3204 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3205 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3206 } 3207 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3208 MachinePointerInfo(FuncArg, j), 3209 false, false, 0); 3210 MemOps.push_back(Store); 3211 ++GPR_idx; 3212 } 3213 ArgOffset += ArgSize; 3214 continue; 3215 } 3216 3217 switch (ObjectVT.getSimpleVT().SimpleTy) { 3218 default: llvm_unreachable("Unhandled argument type!"); 3219 case MVT::i1: 3220 case MVT::i32: 3221 case MVT::i64: 3222 if (Flags.isNest()) { 3223 // The 'nest' parameter, if any, is passed in R11. 3224 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3225 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3226 3227 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3228 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3229 3230 break; 3231 } 3232 3233 // These can be scalar arguments or elements of an integer array type 3234 // passed directly. Clang may use those instead of "byval" aggregate 3235 // types to avoid forcing arguments to memory unnecessarily. 3236 if (GPR_idx != Num_GPR_Regs) { 3237 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3238 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3239 3240 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3241 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3242 // value to MVT::i64 and then truncate to the correct register size. 3243 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3244 } else { 3245 if (CallConv == CallingConv::Fast) 3246 ComputeArgOffset(); 3247 3248 needsLoad = true; 3249 ArgSize = PtrByteSize; 3250 } 3251 if (CallConv != CallingConv::Fast || needsLoad) 3252 ArgOffset += 8; 3253 break; 3254 3255 case MVT::f32: 3256 case MVT::f64: 3257 // These can be scalar arguments or elements of a float array type 3258 // passed directly. The latter are used to implement ELFv2 homogenous 3259 // float aggregates. 3260 if (FPR_idx != Num_FPR_Regs) { 3261 unsigned VReg; 3262 3263 if (ObjectVT == MVT::f32) 3264 VReg = MF.addLiveIn(FPR[FPR_idx], 3265 Subtarget.hasP8Vector() 3266 ? &PPC::VSSRCRegClass 3267 : &PPC::F4RCRegClass); 3268 else 3269 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3270 ? &PPC::VSFRCRegClass 3271 : &PPC::F8RCRegClass); 3272 3273 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3274 ++FPR_idx; 3275 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3276 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3277 // once we support fp <-> gpr moves. 3278 3279 // This can only ever happen in the presence of f32 array types, 3280 // since otherwise we never run out of FPRs before running out 3281 // of GPRs. 3282 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3283 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3284 3285 if (ObjectVT == MVT::f32) { 3286 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3287 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3288 DAG.getConstant(32, dl, MVT::i32)); 3289 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3290 } 3291 3292 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3293 } else { 3294 if (CallConv == CallingConv::Fast) 3295 ComputeArgOffset(); 3296 3297 needsLoad = true; 3298 } 3299 3300 // When passing an array of floats, the array occupies consecutive 3301 // space in the argument area; only round up to the next doubleword 3302 // at the end of the array. Otherwise, each float takes 8 bytes. 3303 if (CallConv != CallingConv::Fast || needsLoad) { 3304 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3305 ArgOffset += ArgSize; 3306 if (Flags.isInConsecutiveRegsLast()) 3307 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3308 } 3309 break; 3310 case MVT::v4f32: 3311 case MVT::v4i32: 3312 case MVT::v8i16: 3313 case MVT::v16i8: 3314 case MVT::v2f64: 3315 case MVT::v2i64: 3316 case MVT::v1i128: 3317 if (!Subtarget.hasQPX()) { 3318 // These can be scalar arguments or elements of a vector array type 3319 // passed directly. The latter are used to implement ELFv2 homogenous 3320 // vector aggregates. 3321 if (VR_idx != Num_VR_Regs) { 3322 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 3323 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 3324 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3325 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3326 ++VR_idx; 3327 } else { 3328 if (CallConv == CallingConv::Fast) 3329 ComputeArgOffset(); 3330 3331 needsLoad = true; 3332 } 3333 if (CallConv != CallingConv::Fast || needsLoad) 3334 ArgOffset += 16; 3335 break; 3336 } // not QPX 3337 3338 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3339 "Invalid QPX parameter type"); 3340 /* fall through */ 3341 3342 case MVT::v4f64: 3343 case MVT::v4i1: 3344 // QPX vectors are treated like their scalar floating-point subregisters 3345 // (except that they're larger). 3346 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3347 if (QFPR_idx != Num_QFPR_Regs) { 3348 const TargetRegisterClass *RC; 3349 switch (ObjectVT.getSimpleVT().SimpleTy) { 3350 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3351 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3352 default: RC = &PPC::QBRCRegClass; break; 3353 } 3354 3355 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3356 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3357 ++QFPR_idx; 3358 } else { 3359 if (CallConv == CallingConv::Fast) 3360 ComputeArgOffset(); 3361 needsLoad = true; 3362 } 3363 if (CallConv != CallingConv::Fast || needsLoad) 3364 ArgOffset += Sz; 3365 break; 3366 } 3367 3368 // We need to load the argument to a virtual register if we determined 3369 // above that we ran out of physical registers of the appropriate type. 3370 if (needsLoad) { 3371 if (ObjSize < ArgSize && !isLittleEndian) 3372 CurArgOffset += ArgSize - ObjSize; 3373 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3374 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3375 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3376 false, false, false, 0); 3377 } 3378 3379 InVals.push_back(ArgVal); 3380 } 3381 3382 // Area that is at least reserved in the caller of this function. 3383 unsigned MinReservedArea; 3384 if (HasParameterArea) 3385 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3386 else 3387 MinReservedArea = LinkageSize; 3388 3389 // Set the size that is at least reserved in caller of this function. Tail 3390 // call optimized functions' reserved stack space needs to be aligned so that 3391 // taking the difference between two stack areas will result in an aligned 3392 // stack. 3393 MinReservedArea = 3394 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3395 FuncInfo->setMinReservedArea(MinReservedArea); 3396 3397 // If the function takes variable number of arguments, make a frame index for 3398 // the start of the first vararg value... for expansion of llvm.va_start. 3399 if (isVarArg) { 3400 int Depth = ArgOffset; 3401 3402 FuncInfo->setVarArgsFrameIndex( 3403 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 3404 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3405 3406 // If this function is vararg, store any remaining integer argument regs 3407 // to their spots on the stack so that they may be loaded by deferencing the 3408 // result of va_next. 3409 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3410 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3411 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3412 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3413 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3414 MachinePointerInfo(), false, false, 0); 3415 MemOps.push_back(Store); 3416 // Increment the address by four for the next argument to store 3417 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3418 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3419 } 3420 } 3421 3422 if (!MemOps.empty()) 3423 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3424 3425 return Chain; 3426 } 3427 3428 SDValue 3429 PPCTargetLowering::LowerFormalArguments_Darwin( 3430 SDValue Chain, 3431 CallingConv::ID CallConv, bool isVarArg, 3432 const SmallVectorImpl<ISD::InputArg> 3433 &Ins, 3434 SDLoc dl, SelectionDAG &DAG, 3435 SmallVectorImpl<SDValue> &InVals) const { 3436 // TODO: add description of PPC stack frame format, or at least some docs. 3437 // 3438 MachineFunction &MF = DAG.getMachineFunction(); 3439 MachineFrameInfo *MFI = MF.getFrameInfo(); 3440 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3441 3442 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3443 bool isPPC64 = PtrVT == MVT::i64; 3444 // Potential tail calls could cause overwriting of argument stack slots. 3445 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3446 (CallConv == CallingConv::Fast)); 3447 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3448 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3449 unsigned ArgOffset = LinkageSize; 3450 // Area that is at least reserved in caller of this function. 3451 unsigned MinReservedArea = ArgOffset; 3452 3453 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3454 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3455 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3456 }; 3457 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3458 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3459 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3460 }; 3461 static const MCPhysReg VR[] = { 3462 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3463 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3464 }; 3465 3466 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3467 const unsigned Num_FPR_Regs = 13; 3468 const unsigned Num_VR_Regs = array_lengthof( VR); 3469 3470 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3471 3472 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3473 3474 // In 32-bit non-varargs functions, the stack space for vectors is after the 3475 // stack space for non-vectors. We do not use this space unless we have 3476 // too many vectors to fit in registers, something that only occurs in 3477 // constructed examples:), but we have to walk the arglist to figure 3478 // that out...for the pathological case, compute VecArgOffset as the 3479 // start of the vector parameter area. Computing VecArgOffset is the 3480 // entire point of the following loop. 3481 unsigned VecArgOffset = ArgOffset; 3482 if (!isVarArg && !isPPC64) { 3483 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3484 ++ArgNo) { 3485 EVT ObjectVT = Ins[ArgNo].VT; 3486 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3487 3488 if (Flags.isByVal()) { 3489 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3490 unsigned ObjSize = Flags.getByValSize(); 3491 unsigned ArgSize = 3492 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3493 VecArgOffset += ArgSize; 3494 continue; 3495 } 3496 3497 switch(ObjectVT.getSimpleVT().SimpleTy) { 3498 default: llvm_unreachable("Unhandled argument type!"); 3499 case MVT::i1: 3500 case MVT::i32: 3501 case MVT::f32: 3502 VecArgOffset += 4; 3503 break; 3504 case MVT::i64: // PPC64 3505 case MVT::f64: 3506 // FIXME: We are guaranteed to be !isPPC64 at this point. 3507 // Does MVT::i64 apply? 3508 VecArgOffset += 8; 3509 break; 3510 case MVT::v4f32: 3511 case MVT::v4i32: 3512 case MVT::v8i16: 3513 case MVT::v16i8: 3514 // Nothing to do, we're only looking at Nonvector args here. 3515 break; 3516 } 3517 } 3518 } 3519 // We've found where the vector parameter area in memory is. Skip the 3520 // first 12 parameters; these don't use that memory. 3521 VecArgOffset = ((VecArgOffset+15)/16)*16; 3522 VecArgOffset += 12*16; 3523 3524 // Add DAG nodes to load the arguments or copy them out of registers. On 3525 // entry to a function on PPC, the arguments start after the linkage area, 3526 // although the first ones are often in registers. 3527 3528 SmallVector<SDValue, 8> MemOps; 3529 unsigned nAltivecParamsAtEnd = 0; 3530 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3531 unsigned CurArgIdx = 0; 3532 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3533 SDValue ArgVal; 3534 bool needsLoad = false; 3535 EVT ObjectVT = Ins[ArgNo].VT; 3536 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3537 unsigned ArgSize = ObjSize; 3538 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3539 if (Ins[ArgNo].isOrigArg()) { 3540 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3541 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3542 } 3543 unsigned CurArgOffset = ArgOffset; 3544 3545 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3546 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3547 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3548 if (isVarArg || isPPC64) { 3549 MinReservedArea = ((MinReservedArea+15)/16)*16; 3550 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3551 Flags, 3552 PtrByteSize); 3553 } else nAltivecParamsAtEnd++; 3554 } else 3555 // Calculate min reserved area. 3556 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3557 Flags, 3558 PtrByteSize); 3559 3560 // FIXME the codegen can be much improved in some cases. 3561 // We do not have to keep everything in memory. 3562 if (Flags.isByVal()) { 3563 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3564 3565 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3566 ObjSize = Flags.getByValSize(); 3567 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3568 // Objects of size 1 and 2 are right justified, everything else is 3569 // left justified. This means the memory address is adjusted forwards. 3570 if (ObjSize==1 || ObjSize==2) { 3571 CurArgOffset = CurArgOffset + (4 - ObjSize); 3572 } 3573 // The value of the object is its address. 3574 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true); 3575 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3576 InVals.push_back(FIN); 3577 if (ObjSize==1 || ObjSize==2) { 3578 if (GPR_idx != Num_GPR_Regs) { 3579 unsigned VReg; 3580 if (isPPC64) 3581 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3582 else 3583 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3584 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3585 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3586 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3587 MachinePointerInfo(FuncArg), 3588 ObjType, false, false, 0); 3589 MemOps.push_back(Store); 3590 ++GPR_idx; 3591 } 3592 3593 ArgOffset += PtrByteSize; 3594 3595 continue; 3596 } 3597 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3598 // Store whatever pieces of the object are in registers 3599 // to memory. ArgOffset will be the address of the beginning 3600 // of the object. 3601 if (GPR_idx != Num_GPR_Regs) { 3602 unsigned VReg; 3603 if (isPPC64) 3604 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3605 else 3606 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3607 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3608 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3609 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3610 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3611 MachinePointerInfo(FuncArg, j), 3612 false, false, 0); 3613 MemOps.push_back(Store); 3614 ++GPR_idx; 3615 ArgOffset += PtrByteSize; 3616 } else { 3617 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3618 break; 3619 } 3620 } 3621 continue; 3622 } 3623 3624 switch (ObjectVT.getSimpleVT().SimpleTy) { 3625 default: llvm_unreachable("Unhandled argument type!"); 3626 case MVT::i1: 3627 case MVT::i32: 3628 if (!isPPC64) { 3629 if (GPR_idx != Num_GPR_Regs) { 3630 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3631 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3632 3633 if (ObjectVT == MVT::i1) 3634 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3635 3636 ++GPR_idx; 3637 } else { 3638 needsLoad = true; 3639 ArgSize = PtrByteSize; 3640 } 3641 // All int arguments reserve stack space in the Darwin ABI. 3642 ArgOffset += PtrByteSize; 3643 break; 3644 } 3645 // FALLTHROUGH 3646 case MVT::i64: // PPC64 3647 if (GPR_idx != Num_GPR_Regs) { 3648 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3649 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3650 3651 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3652 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3653 // value to MVT::i64 and then truncate to the correct register size. 3654 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3655 3656 ++GPR_idx; 3657 } else { 3658 needsLoad = true; 3659 ArgSize = PtrByteSize; 3660 } 3661 // All int arguments reserve stack space in the Darwin ABI. 3662 ArgOffset += 8; 3663 break; 3664 3665 case MVT::f32: 3666 case MVT::f64: 3667 // Every 4 bytes of argument space consumes one of the GPRs available for 3668 // argument passing. 3669 if (GPR_idx != Num_GPR_Regs) { 3670 ++GPR_idx; 3671 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3672 ++GPR_idx; 3673 } 3674 if (FPR_idx != Num_FPR_Regs) { 3675 unsigned VReg; 3676 3677 if (ObjectVT == MVT::f32) 3678 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3679 else 3680 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3681 3682 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3683 ++FPR_idx; 3684 } else { 3685 needsLoad = true; 3686 } 3687 3688 // All FP arguments reserve stack space in the Darwin ABI. 3689 ArgOffset += isPPC64 ? 8 : ObjSize; 3690 break; 3691 case MVT::v4f32: 3692 case MVT::v4i32: 3693 case MVT::v8i16: 3694 case MVT::v16i8: 3695 // Note that vector arguments in registers don't reserve stack space, 3696 // except in varargs functions. 3697 if (VR_idx != Num_VR_Regs) { 3698 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3699 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3700 if (isVarArg) { 3701 while ((ArgOffset % 16) != 0) { 3702 ArgOffset += PtrByteSize; 3703 if (GPR_idx != Num_GPR_Regs) 3704 GPR_idx++; 3705 } 3706 ArgOffset += 16; 3707 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3708 } 3709 ++VR_idx; 3710 } else { 3711 if (!isVarArg && !isPPC64) { 3712 // Vectors go after all the nonvectors. 3713 CurArgOffset = VecArgOffset; 3714 VecArgOffset += 16; 3715 } else { 3716 // Vectors are aligned. 3717 ArgOffset = ((ArgOffset+15)/16)*16; 3718 CurArgOffset = ArgOffset; 3719 ArgOffset += 16; 3720 } 3721 needsLoad = true; 3722 } 3723 break; 3724 } 3725 3726 // We need to load the argument to a virtual register if we determined above 3727 // that we ran out of physical registers of the appropriate type. 3728 if (needsLoad) { 3729 int FI = MFI->CreateFixedObject(ObjSize, 3730 CurArgOffset + (ArgSize - ObjSize), 3731 isImmutable); 3732 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3733 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3734 false, false, false, 0); 3735 } 3736 3737 InVals.push_back(ArgVal); 3738 } 3739 3740 // Allow for Altivec parameters at the end, if needed. 3741 if (nAltivecParamsAtEnd) { 3742 MinReservedArea = ((MinReservedArea+15)/16)*16; 3743 MinReservedArea += 16*nAltivecParamsAtEnd; 3744 } 3745 3746 // Area that is at least reserved in the caller of this function. 3747 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3748 3749 // Set the size that is at least reserved in caller of this function. Tail 3750 // call optimized functions' reserved stack space needs to be aligned so that 3751 // taking the difference between two stack areas will result in an aligned 3752 // stack. 3753 MinReservedArea = 3754 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3755 FuncInfo->setMinReservedArea(MinReservedArea); 3756 3757 // If the function takes variable number of arguments, make a frame index for 3758 // the start of the first vararg value... for expansion of llvm.va_start. 3759 if (isVarArg) { 3760 int Depth = ArgOffset; 3761 3762 FuncInfo->setVarArgsFrameIndex( 3763 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3764 Depth, true)); 3765 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3766 3767 // If this function is vararg, store any remaining integer argument regs 3768 // to their spots on the stack so that they may be loaded by deferencing the 3769 // result of va_next. 3770 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3771 unsigned VReg; 3772 3773 if (isPPC64) 3774 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3775 else 3776 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3777 3778 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3779 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3780 MachinePointerInfo(), false, false, 0); 3781 MemOps.push_back(Store); 3782 // Increment the address by four for the next argument to store 3783 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3784 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3785 } 3786 } 3787 3788 if (!MemOps.empty()) 3789 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3790 3791 return Chain; 3792 } 3793 3794 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3795 /// adjusted to accommodate the arguments for the tailcall. 3796 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3797 unsigned ParamSize) { 3798 3799 if (!isTailCall) return 0; 3800 3801 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3802 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3803 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3804 // Remember only if the new adjustement is bigger. 3805 if (SPDiff < FI->getTailCallSPDelta()) 3806 FI->setTailCallSPDelta(SPDiff); 3807 3808 return SPDiff; 3809 } 3810 3811 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 3812 /// for tail call optimization. Targets which want to do tail call 3813 /// optimization should implement this function. 3814 bool 3815 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 3816 CallingConv::ID CalleeCC, 3817 bool isVarArg, 3818 const SmallVectorImpl<ISD::InputArg> &Ins, 3819 SelectionDAG& DAG) const { 3820 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 3821 return false; 3822 3823 // Variable argument functions are not supported. 3824 if (isVarArg) 3825 return false; 3826 3827 MachineFunction &MF = DAG.getMachineFunction(); 3828 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3829 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 3830 // Functions containing by val parameters are not supported. 3831 for (unsigned i = 0; i != Ins.size(); i++) { 3832 ISD::ArgFlagsTy Flags = Ins[i].Flags; 3833 if (Flags.isByVal()) return false; 3834 } 3835 3836 // Non-PIC/GOT tail calls are supported. 3837 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 3838 return true; 3839 3840 // At the moment we can only do local tail calls (in same module, hidden 3841 // or protected) if we are generating PIC. 3842 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3843 return G->getGlobal()->hasHiddenVisibility() 3844 || G->getGlobal()->hasProtectedVisibility(); 3845 } 3846 3847 return false; 3848 } 3849 3850 /// isCallCompatibleAddress - Return the immediate to use if the specified 3851 /// 32-bit value is representable in the immediate field of a BxA instruction. 3852 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 3853 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 3854 if (!C) return nullptr; 3855 3856 int Addr = C->getZExtValue(); 3857 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 3858 SignExtend32<26>(Addr) != Addr) 3859 return nullptr; // Top 6 bits have to be sext of immediate. 3860 3861 return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op), 3862 DAG.getTargetLoweringInfo().getPointerTy( 3863 DAG.getDataLayout())).getNode(); 3864 } 3865 3866 namespace { 3867 3868 struct TailCallArgumentInfo { 3869 SDValue Arg; 3870 SDValue FrameIdxOp; 3871 int FrameIdx; 3872 3873 TailCallArgumentInfo() : FrameIdx(0) {} 3874 }; 3875 3876 } 3877 3878 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 3879 static void 3880 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 3881 SDValue Chain, 3882 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 3883 SmallVectorImpl<SDValue> &MemOpChains, 3884 SDLoc dl) { 3885 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 3886 SDValue Arg = TailCallArgs[i].Arg; 3887 SDValue FIN = TailCallArgs[i].FrameIdxOp; 3888 int FI = TailCallArgs[i].FrameIdx; 3889 // Store relative to framepointer. 3890 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 3891 MachinePointerInfo::getFixedStack(FI), 3892 false, false, 0)); 3893 } 3894 } 3895 3896 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 3897 /// the appropriate stack slot for the tail call optimized function call. 3898 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 3899 MachineFunction &MF, 3900 SDValue Chain, 3901 SDValue OldRetAddr, 3902 SDValue OldFP, 3903 int SPDiff, 3904 bool isPPC64, 3905 bool isDarwinABI, 3906 SDLoc dl) { 3907 if (SPDiff) { 3908 // Calculate the new stack slot for the return address. 3909 int SlotSize = isPPC64 ? 8 : 4; 3910 const PPCFrameLowering *FL = 3911 MF.getSubtarget<PPCSubtarget>().getFrameLowering(); 3912 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 3913 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3914 NewRetAddrLoc, true); 3915 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3916 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 3917 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 3918 MachinePointerInfo::getFixedStack(NewRetAddr), 3919 false, false, 0); 3920 3921 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 3922 // slot as the FP is never overwritten. 3923 if (isDarwinABI) { 3924 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 3925 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3926 true); 3927 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3928 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 3929 MachinePointerInfo::getFixedStack(NewFPIdx), 3930 false, false, 0); 3931 } 3932 } 3933 return Chain; 3934 } 3935 3936 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3937 /// the position of the argument. 3938 static void 3939 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3940 SDValue Arg, int SPDiff, unsigned ArgOffset, 3941 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3942 int Offset = ArgOffset + SPDiff; 3943 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3944 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3945 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3946 SDValue FIN = DAG.getFrameIndex(FI, VT); 3947 TailCallArgumentInfo Info; 3948 Info.Arg = Arg; 3949 Info.FrameIdxOp = FIN; 3950 Info.FrameIdx = FI; 3951 TailCallArguments.push_back(Info); 3952 } 3953 3954 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3955 /// stack slot. Returns the chain as result and the loaded frame pointers in 3956 /// LROpOut/FPOpout. Used when tail calling. 3957 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3958 int SPDiff, 3959 SDValue Chain, 3960 SDValue &LROpOut, 3961 SDValue &FPOpOut, 3962 bool isDarwinABI, 3963 SDLoc dl) const { 3964 if (SPDiff) { 3965 // Load the LR and FP stack slot for later adjusting. 3966 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 3967 LROpOut = getReturnAddrFrameIndex(DAG); 3968 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3969 false, false, false, 0); 3970 Chain = SDValue(LROpOut.getNode(), 1); 3971 3972 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3973 // slot as the FP is never overwritten. 3974 if (isDarwinABI) { 3975 FPOpOut = getFramePointerFrameIndex(DAG); 3976 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 3977 false, false, false, 0); 3978 Chain = SDValue(FPOpOut.getNode(), 1); 3979 } 3980 } 3981 return Chain; 3982 } 3983 3984 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 3985 /// by "Src" to address "Dst" of size "Size". Alignment information is 3986 /// specified by the specific parameter attribute. The copy will be passed as 3987 /// a byval function parameter. 3988 /// Sometimes what we are copying is the end of a larger object, the part that 3989 /// does not fit in registers. 3990 static SDValue 3991 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 3992 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 3993 SDLoc dl) { 3994 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 3995 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 3996 false, false, false, MachinePointerInfo(), 3997 MachinePointerInfo()); 3998 } 3999 4000 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4001 /// tail calls. 4002 static void 4003 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 4004 SDValue Arg, SDValue PtrOff, int SPDiff, 4005 unsigned ArgOffset, bool isPPC64, bool isTailCall, 4006 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4007 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 4008 SDLoc dl) { 4009 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4010 if (!isTailCall) { 4011 if (isVector) { 4012 SDValue StackPtr; 4013 if (isPPC64) 4014 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4015 else 4016 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4017 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4018 DAG.getConstant(ArgOffset, dl, PtrVT)); 4019 } 4020 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4021 MachinePointerInfo(), false, false, 0)); 4022 // Calculate and remember argument location. 4023 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4024 TailCallArguments); 4025 } 4026 4027 static 4028 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4029 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 4030 SDValue LROp, SDValue FPOp, bool isDarwinABI, 4031 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4032 MachineFunction &MF = DAG.getMachineFunction(); 4033 4034 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4035 // might overwrite each other in case of tail call optimization. 4036 SmallVector<SDValue, 8> MemOpChains2; 4037 // Do not flag preceding copytoreg stuff together with the following stuff. 4038 InFlag = SDValue(); 4039 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4040 MemOpChains2, dl); 4041 if (!MemOpChains2.empty()) 4042 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4043 4044 // Store the return address to the appropriate stack slot. 4045 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 4046 isPPC64, isDarwinABI, dl); 4047 4048 // Emit callseq_end just before tailcall node. 4049 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4050 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4051 InFlag = Chain.getValue(1); 4052 } 4053 4054 // Is this global address that of a function that can be called by name? (as 4055 // opposed to something that must hold a descriptor for an indirect call). 4056 static bool isFunctionGlobalAddress(SDValue Callee) { 4057 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4058 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4059 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4060 return false; 4061 4062 return G->getGlobal()->getType()->getElementType()->isFunctionTy(); 4063 } 4064 4065 return false; 4066 } 4067 4068 static 4069 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 4070 SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff, 4071 bool isTailCall, bool IsPatchPoint, bool hasNest, 4072 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 4073 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4074 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4075 4076 bool isPPC64 = Subtarget.isPPC64(); 4077 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4078 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4079 4080 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4081 NodeTys.push_back(MVT::Other); // Returns a chain 4082 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4083 4084 unsigned CallOpc = PPCISD::CALL; 4085 4086 bool needIndirectCall = true; 4087 if (!isSVR4ABI || !isPPC64) 4088 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4089 // If this is an absolute destination address, use the munged value. 4090 Callee = SDValue(Dest, 0); 4091 needIndirectCall = false; 4092 } 4093 4094 if (isFunctionGlobalAddress(Callee)) { 4095 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4096 // A call to a TLS address is actually an indirect call to a 4097 // thread-specific pointer. 4098 unsigned OpFlags = 0; 4099 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4100 (Subtarget.getTargetTriple().isMacOSX() && 4101 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 4102 !G->getGlobal()->isStrongDefinitionForLinker()) || 4103 (Subtarget.isTargetELF() && !isPPC64 && 4104 !G->getGlobal()->hasLocalLinkage() && 4105 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4106 // PC-relative references to external symbols should go through $stub, 4107 // unless we're building with the leopard linker or later, which 4108 // automatically synthesizes these stubs. 4109 OpFlags = PPCII::MO_PLT_OR_STUB; 4110 } 4111 4112 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4113 // every direct call is) turn it into a TargetGlobalAddress / 4114 // TargetExternalSymbol node so that legalize doesn't hack it. 4115 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4116 Callee.getValueType(), 0, OpFlags); 4117 needIndirectCall = false; 4118 } 4119 4120 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4121 unsigned char OpFlags = 0; 4122 4123 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4124 (Subtarget.getTargetTriple().isMacOSX() && 4125 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) || 4126 (Subtarget.isTargetELF() && !isPPC64 && 4127 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4128 // PC-relative references to external symbols should go through $stub, 4129 // unless we're building with the leopard linker or later, which 4130 // automatically synthesizes these stubs. 4131 OpFlags = PPCII::MO_PLT_OR_STUB; 4132 } 4133 4134 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4135 OpFlags); 4136 needIndirectCall = false; 4137 } 4138 4139 if (IsPatchPoint) { 4140 // We'll form an invalid direct call when lowering a patchpoint; the full 4141 // sequence for an indirect call is complicated, and many of the 4142 // instructions introduced might have side effects (and, thus, can't be 4143 // removed later). The call itself will be removed as soon as the 4144 // argument/return lowering is complete, so the fact that it has the wrong 4145 // kind of operands should not really matter. 4146 needIndirectCall = false; 4147 } 4148 4149 if (needIndirectCall) { 4150 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4151 // to do the call, we can't use PPCISD::CALL. 4152 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4153 4154 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4155 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4156 // entry point, but to the function descriptor (the function entry point 4157 // address is part of the function descriptor though). 4158 // The function descriptor is a three doubleword structure with the 4159 // following fields: function entry point, TOC base address and 4160 // environment pointer. 4161 // Thus for a call through a function pointer, the following actions need 4162 // to be performed: 4163 // 1. Save the TOC of the caller in the TOC save area of its stack 4164 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4165 // 2. Load the address of the function entry point from the function 4166 // descriptor. 4167 // 3. Load the TOC of the callee from the function descriptor into r2. 4168 // 4. Load the environment pointer from the function descriptor into 4169 // r11. 4170 // 5. Branch to the function entry point address. 4171 // 6. On return of the callee, the TOC of the caller needs to be 4172 // restored (this is done in FinishCall()). 4173 // 4174 // The loads are scheduled at the beginning of the call sequence, and the 4175 // register copies are flagged together to ensure that no other 4176 // operations can be scheduled in between. E.g. without flagging the 4177 // copies together, a TOC access in the caller could be scheduled between 4178 // the assignment of the callee TOC and the branch to the callee, which 4179 // results in the TOC access going through the TOC of the callee instead 4180 // of going through the TOC of the caller, which leads to incorrect code. 4181 4182 // Load the address of the function entry point from the function 4183 // descriptor. 4184 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4185 if (LDChain.getValueType() == MVT::Glue) 4186 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4187 4188 bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors(); 4189 4190 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4191 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4192 false, false, LoadsInv, 8); 4193 4194 // Load environment pointer into r11. 4195 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4196 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4197 SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, 4198 MPI.getWithOffset(16), false, false, 4199 LoadsInv, 8); 4200 4201 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4202 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4203 SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, 4204 MPI.getWithOffset(8), false, false, 4205 LoadsInv, 8); 4206 4207 setUsesTOCBasePtr(DAG); 4208 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4209 InFlag); 4210 Chain = TOCVal.getValue(0); 4211 InFlag = TOCVal.getValue(1); 4212 4213 // If the function call has an explicit 'nest' parameter, it takes the 4214 // place of the environment pointer. 4215 if (!hasNest) { 4216 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4217 InFlag); 4218 4219 Chain = EnvVal.getValue(0); 4220 InFlag = EnvVal.getValue(1); 4221 } 4222 4223 MTCTROps[0] = Chain; 4224 MTCTROps[1] = LoadFuncPtr; 4225 MTCTROps[2] = InFlag; 4226 } 4227 4228 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4229 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4230 InFlag = Chain.getValue(1); 4231 4232 NodeTys.clear(); 4233 NodeTys.push_back(MVT::Other); 4234 NodeTys.push_back(MVT::Glue); 4235 Ops.push_back(Chain); 4236 CallOpc = PPCISD::BCTRL; 4237 Callee.setNode(nullptr); 4238 // Add use of X11 (holding environment pointer) 4239 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4240 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4241 // Add CTR register as callee so a bctr can be emitted later. 4242 if (isTailCall) 4243 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4244 } 4245 4246 // If this is a direct call, pass the chain and the callee. 4247 if (Callee.getNode()) { 4248 Ops.push_back(Chain); 4249 Ops.push_back(Callee); 4250 } 4251 // If this is a tail call add stack pointer delta. 4252 if (isTailCall) 4253 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4254 4255 // Add argument registers to the end of the list so that they are known live 4256 // into the call. 4257 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4258 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4259 RegsToPass[i].second.getValueType())); 4260 4261 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4262 // into the call. 4263 if (isSVR4ABI && isPPC64 && !IsPatchPoint) { 4264 setUsesTOCBasePtr(DAG); 4265 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4266 } 4267 4268 return CallOpc; 4269 } 4270 4271 static 4272 bool isLocalCall(const SDValue &Callee) 4273 { 4274 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4275 return G->getGlobal()->isStrongDefinitionForLinker(); 4276 return false; 4277 } 4278 4279 SDValue 4280 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 4281 CallingConv::ID CallConv, bool isVarArg, 4282 const SmallVectorImpl<ISD::InputArg> &Ins, 4283 SDLoc dl, SelectionDAG &DAG, 4284 SmallVectorImpl<SDValue> &InVals) const { 4285 4286 SmallVector<CCValAssign, 16> RVLocs; 4287 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4288 *DAG.getContext()); 4289 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4290 4291 // Copy all of the result registers out of their specified physreg. 4292 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4293 CCValAssign &VA = RVLocs[i]; 4294 assert(VA.isRegLoc() && "Can only return in registers!"); 4295 4296 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4297 VA.getLocReg(), VA.getLocVT(), InFlag); 4298 Chain = Val.getValue(1); 4299 InFlag = Val.getValue(2); 4300 4301 switch (VA.getLocInfo()) { 4302 default: llvm_unreachable("Unknown loc info!"); 4303 case CCValAssign::Full: break; 4304 case CCValAssign::AExt: 4305 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4306 break; 4307 case CCValAssign::ZExt: 4308 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4309 DAG.getValueType(VA.getValVT())); 4310 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4311 break; 4312 case CCValAssign::SExt: 4313 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4314 DAG.getValueType(VA.getValVT())); 4315 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4316 break; 4317 } 4318 4319 InVals.push_back(Val); 4320 } 4321 4322 return Chain; 4323 } 4324 4325 SDValue 4326 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 4327 bool isTailCall, bool isVarArg, bool IsPatchPoint, 4328 bool hasNest, SelectionDAG &DAG, 4329 SmallVector<std::pair<unsigned, SDValue>, 8> 4330 &RegsToPass, 4331 SDValue InFlag, SDValue Chain, 4332 SDValue CallSeqStart, SDValue &Callee, 4333 int SPDiff, unsigned NumBytes, 4334 const SmallVectorImpl<ISD::InputArg> &Ins, 4335 SmallVectorImpl<SDValue> &InVals, 4336 ImmutableCallSite *CS) const { 4337 4338 std::vector<EVT> NodeTys; 4339 SmallVector<SDValue, 8> Ops; 4340 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4341 SPDiff, isTailCall, IsPatchPoint, hasNest, 4342 RegsToPass, Ops, NodeTys, CS, Subtarget); 4343 4344 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4345 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4346 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4347 4348 // When performing tail call optimization the callee pops its arguments off 4349 // the stack. Account for this here so these bytes can be pushed back on in 4350 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4351 int BytesCalleePops = 4352 (CallConv == CallingConv::Fast && 4353 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4354 4355 // Add a register mask operand representing the call-preserved registers. 4356 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4357 const uint32_t *Mask = 4358 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4359 assert(Mask && "Missing call preserved mask for calling convention"); 4360 Ops.push_back(DAG.getRegisterMask(Mask)); 4361 4362 if (InFlag.getNode()) 4363 Ops.push_back(InFlag); 4364 4365 // Emit tail call. 4366 if (isTailCall) { 4367 assert(((Callee.getOpcode() == ISD::Register && 4368 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4369 Callee.getOpcode() == ISD::TargetExternalSymbol || 4370 Callee.getOpcode() == ISD::TargetGlobalAddress || 4371 isa<ConstantSDNode>(Callee)) && 4372 "Expecting an global address, external symbol, absolute value or register"); 4373 4374 DAG.getMachineFunction().getFrameInfo()->setHasTailCall(); 4375 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4376 } 4377 4378 // Add a NOP immediately after the branch instruction when using the 64-bit 4379 // SVR4 ABI. At link time, if caller and callee are in a different module and 4380 // thus have a different TOC, the call will be replaced with a call to a stub 4381 // function which saves the current TOC, loads the TOC of the callee and 4382 // branches to the callee. The NOP will be replaced with a load instruction 4383 // which restores the TOC of the caller from the TOC save slot of the current 4384 // stack frame. If caller and callee belong to the same module (and have the 4385 // same TOC), the NOP will remain unchanged. 4386 4387 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4388 !IsPatchPoint) { 4389 if (CallOpc == PPCISD::BCTRL) { 4390 // This is a call through a function pointer. 4391 // Restore the caller TOC from the save area into R2. 4392 // See PrepareCall() for more information about calls through function 4393 // pointers in the 64-bit SVR4 ABI. 4394 // We are using a target-specific load with r2 hard coded, because the 4395 // result of a target-independent load would never go directly into r2, 4396 // since r2 is a reserved register (which prevents the register allocator 4397 // from allocating it), resulting in an additional register being 4398 // allocated and an unnecessary move instruction being generated. 4399 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4400 4401 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4402 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4403 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4404 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4405 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4406 4407 // The address needs to go after the chain input but before the flag (or 4408 // any other variadic arguments). 4409 Ops.insert(std::next(Ops.begin()), AddTOC); 4410 } else if ((CallOpc == PPCISD::CALL) && 4411 (!isLocalCall(Callee) || 4412 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) 4413 // Otherwise insert NOP for non-local calls. 4414 CallOpc = PPCISD::CALL_NOP; 4415 } 4416 4417 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4418 InFlag = Chain.getValue(1); 4419 4420 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4421 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4422 InFlag, dl); 4423 if (!Ins.empty()) 4424 InFlag = Chain.getValue(1); 4425 4426 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4427 Ins, dl, DAG, InVals); 4428 } 4429 4430 SDValue 4431 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4432 SmallVectorImpl<SDValue> &InVals) const { 4433 SelectionDAG &DAG = CLI.DAG; 4434 SDLoc &dl = CLI.DL; 4435 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4436 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4437 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4438 SDValue Chain = CLI.Chain; 4439 SDValue Callee = CLI.Callee; 4440 bool &isTailCall = CLI.IsTailCall; 4441 CallingConv::ID CallConv = CLI.CallConv; 4442 bool isVarArg = CLI.IsVarArg; 4443 bool IsPatchPoint = CLI.IsPatchPoint; 4444 ImmutableCallSite *CS = CLI.CS; 4445 4446 if (isTailCall) 4447 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4448 Ins, DAG); 4449 4450 if (!isTailCall && CS && CS->isMustTailCall()) 4451 report_fatal_error("failed to perform tail call elimination on a call " 4452 "site marked musttail"); 4453 4454 if (Subtarget.isSVR4ABI()) { 4455 if (Subtarget.isPPC64()) 4456 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4457 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4458 dl, DAG, InVals, CS); 4459 else 4460 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4461 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4462 dl, DAG, InVals, CS); 4463 } 4464 4465 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4466 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4467 dl, DAG, InVals, CS); 4468 } 4469 4470 SDValue 4471 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 4472 CallingConv::ID CallConv, bool isVarArg, 4473 bool isTailCall, bool IsPatchPoint, 4474 const SmallVectorImpl<ISD::OutputArg> &Outs, 4475 const SmallVectorImpl<SDValue> &OutVals, 4476 const SmallVectorImpl<ISD::InputArg> &Ins, 4477 SDLoc dl, SelectionDAG &DAG, 4478 SmallVectorImpl<SDValue> &InVals, 4479 ImmutableCallSite *CS) const { 4480 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4481 // of the 32-bit SVR4 ABI stack frame layout. 4482 4483 assert((CallConv == CallingConv::C || 4484 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4485 4486 unsigned PtrByteSize = 4; 4487 4488 MachineFunction &MF = DAG.getMachineFunction(); 4489 4490 // Mark this function as potentially containing a function that contains a 4491 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4492 // and restoring the callers stack pointer in this functions epilog. This is 4493 // done because by tail calling the called function might overwrite the value 4494 // in this function's (MF) stack pointer stack slot 0(SP). 4495 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4496 CallConv == CallingConv::Fast) 4497 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4498 4499 // Count how many bytes are to be pushed on the stack, including the linkage 4500 // area, parameter list area and the part of the local variable space which 4501 // contains copies of aggregates which are passed by value. 4502 4503 // Assign locations to all of the outgoing arguments. 4504 SmallVector<CCValAssign, 16> ArgLocs; 4505 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4506 *DAG.getContext()); 4507 4508 // Reserve space for the linkage area on the stack. 4509 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4510 PtrByteSize); 4511 4512 if (isVarArg) { 4513 // Handle fixed and variable vector arguments differently. 4514 // Fixed vector arguments go into registers as long as registers are 4515 // available. Variable vector arguments always go into memory. 4516 unsigned NumArgs = Outs.size(); 4517 4518 for (unsigned i = 0; i != NumArgs; ++i) { 4519 MVT ArgVT = Outs[i].VT; 4520 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4521 bool Result; 4522 4523 if (Outs[i].IsFixed) { 4524 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4525 CCInfo); 4526 } else { 4527 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4528 ArgFlags, CCInfo); 4529 } 4530 4531 if (Result) { 4532 #ifndef NDEBUG 4533 errs() << "Call operand #" << i << " has unhandled type " 4534 << EVT(ArgVT).getEVTString() << "\n"; 4535 #endif 4536 llvm_unreachable(nullptr); 4537 } 4538 } 4539 } else { 4540 // All arguments are treated the same. 4541 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4542 } 4543 4544 // Assign locations to all of the outgoing aggregate by value arguments. 4545 SmallVector<CCValAssign, 16> ByValArgLocs; 4546 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4547 ByValArgLocs, *DAG.getContext()); 4548 4549 // Reserve stack space for the allocations in CCInfo. 4550 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4551 4552 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4553 4554 // Size of the linkage area, parameter list area and the part of the local 4555 // space variable where copies of aggregates which are passed by value are 4556 // stored. 4557 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4558 4559 // Calculate by how many bytes the stack has to be adjusted in case of tail 4560 // call optimization. 4561 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4562 4563 // Adjust the stack pointer for the new arguments... 4564 // These operations are automatically eliminated by the prolog/epilog pass 4565 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4566 dl); 4567 SDValue CallSeqStart = Chain; 4568 4569 // Load the return address and frame pointer so it can be moved somewhere else 4570 // later. 4571 SDValue LROp, FPOp; 4572 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 4573 dl); 4574 4575 // Set up a copy of the stack pointer for use loading and storing any 4576 // arguments that may not fit in the registers available for argument 4577 // passing. 4578 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4579 4580 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4581 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4582 SmallVector<SDValue, 8> MemOpChains; 4583 4584 bool seenFloatArg = false; 4585 // Walk the register/memloc assignments, inserting copies/loads. 4586 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4587 i != e; 4588 ++i) { 4589 CCValAssign &VA = ArgLocs[i]; 4590 SDValue Arg = OutVals[i]; 4591 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4592 4593 if (Flags.isByVal()) { 4594 // Argument is an aggregate which is passed by value, thus we need to 4595 // create a copy of it in the local variable space of the current stack 4596 // frame (which is the stack frame of the caller) and pass the address of 4597 // this copy to the callee. 4598 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4599 CCValAssign &ByValVA = ByValArgLocs[j++]; 4600 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4601 4602 // Memory reserved in the local variable space of the callers stack frame. 4603 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4604 4605 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4606 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4607 StackPtr, PtrOff); 4608 4609 // Create a copy of the argument in the local area of the current 4610 // stack frame. 4611 SDValue MemcpyCall = 4612 CreateCopyOfByValArgument(Arg, PtrOff, 4613 CallSeqStart.getNode()->getOperand(0), 4614 Flags, DAG, dl); 4615 4616 // This must go outside the CALLSEQ_START..END. 4617 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4618 CallSeqStart.getNode()->getOperand(1), 4619 SDLoc(MemcpyCall)); 4620 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4621 NewCallSeqStart.getNode()); 4622 Chain = CallSeqStart = NewCallSeqStart; 4623 4624 // Pass the address of the aggregate copy on the stack either in a 4625 // physical register or in the parameter list area of the current stack 4626 // frame to the callee. 4627 Arg = PtrOff; 4628 } 4629 4630 if (VA.isRegLoc()) { 4631 if (Arg.getValueType() == MVT::i1) 4632 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4633 4634 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4635 // Put argument in a physical register. 4636 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4637 } else { 4638 // Put argument in the parameter list area of the current stack frame. 4639 assert(VA.isMemLoc()); 4640 unsigned LocMemOffset = VA.getLocMemOffset(); 4641 4642 if (!isTailCall) { 4643 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4644 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4645 StackPtr, PtrOff); 4646 4647 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4648 MachinePointerInfo(), 4649 false, false, 0)); 4650 } else { 4651 // Calculate and remember argument location. 4652 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4653 TailCallArguments); 4654 } 4655 } 4656 } 4657 4658 if (!MemOpChains.empty()) 4659 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4660 4661 // Build a sequence of copy-to-reg nodes chained together with token chain 4662 // and flag operands which copy the outgoing args into the appropriate regs. 4663 SDValue InFlag; 4664 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4665 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4666 RegsToPass[i].second, InFlag); 4667 InFlag = Chain.getValue(1); 4668 } 4669 4670 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4671 // registers. 4672 if (isVarArg) { 4673 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4674 SDValue Ops[] = { Chain, InFlag }; 4675 4676 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4677 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4678 4679 InFlag = Chain.getValue(1); 4680 } 4681 4682 if (isTailCall) 4683 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 4684 false, TailCallArguments); 4685 4686 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 4687 /* unused except on PPC64 ELFv1 */ false, DAG, 4688 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 4689 NumBytes, Ins, InVals, CS); 4690 } 4691 4692 // Copy an argument into memory, being careful to do this outside the 4693 // call sequence for the call to which the argument belongs. 4694 SDValue 4695 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 4696 SDValue CallSeqStart, 4697 ISD::ArgFlagsTy Flags, 4698 SelectionDAG &DAG, 4699 SDLoc dl) const { 4700 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4701 CallSeqStart.getNode()->getOperand(0), 4702 Flags, DAG, dl); 4703 // The MEMCPY must go outside the CALLSEQ_START..END. 4704 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4705 CallSeqStart.getNode()->getOperand(1), 4706 SDLoc(MemcpyCall)); 4707 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4708 NewCallSeqStart.getNode()); 4709 return NewCallSeqStart; 4710 } 4711 4712 SDValue 4713 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 4714 CallingConv::ID CallConv, bool isVarArg, 4715 bool isTailCall, bool IsPatchPoint, 4716 const SmallVectorImpl<ISD::OutputArg> &Outs, 4717 const SmallVectorImpl<SDValue> &OutVals, 4718 const SmallVectorImpl<ISD::InputArg> &Ins, 4719 SDLoc dl, SelectionDAG &DAG, 4720 SmallVectorImpl<SDValue> &InVals, 4721 ImmutableCallSite *CS) const { 4722 4723 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4724 bool isLittleEndian = Subtarget.isLittleEndian(); 4725 unsigned NumOps = Outs.size(); 4726 bool hasNest = false; 4727 4728 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4729 unsigned PtrByteSize = 8; 4730 4731 MachineFunction &MF = DAG.getMachineFunction(); 4732 4733 // Mark this function as potentially containing a function that contains a 4734 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4735 // and restoring the callers stack pointer in this functions epilog. This is 4736 // done because by tail calling the called function might overwrite the value 4737 // in this function's (MF) stack pointer stack slot 0(SP). 4738 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4739 CallConv == CallingConv::Fast) 4740 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4741 4742 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4743 "fastcc not supported on varargs functions"); 4744 4745 // Count how many bytes are to be pushed on the stack, including the linkage 4746 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 4747 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 4748 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 4749 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4750 unsigned NumBytes = LinkageSize; 4751 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4752 unsigned &QFPR_idx = FPR_idx; 4753 4754 static const MCPhysReg GPR[] = { 4755 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4756 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4757 }; 4758 static const MCPhysReg VR[] = { 4759 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4760 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4761 }; 4762 static const MCPhysReg VSRH[] = { 4763 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 4764 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 4765 }; 4766 4767 const unsigned NumGPRs = array_lengthof(GPR); 4768 const unsigned NumFPRs = 13; 4769 const unsigned NumVRs = array_lengthof(VR); 4770 const unsigned NumQFPRs = NumFPRs; 4771 4772 // When using the fast calling convention, we don't provide backing for 4773 // arguments that will be in registers. 4774 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 4775 4776 // Add up all the space actually used. 4777 for (unsigned i = 0; i != NumOps; ++i) { 4778 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4779 EVT ArgVT = Outs[i].VT; 4780 EVT OrigVT = Outs[i].ArgVT; 4781 4782 if (Flags.isNest()) 4783 continue; 4784 4785 if (CallConv == CallingConv::Fast) { 4786 if (Flags.isByVal()) 4787 NumGPRsUsed += (Flags.getByValSize()+7)/8; 4788 else 4789 switch (ArgVT.getSimpleVT().SimpleTy) { 4790 default: llvm_unreachable("Unexpected ValueType for argument!"); 4791 case MVT::i1: 4792 case MVT::i32: 4793 case MVT::i64: 4794 if (++NumGPRsUsed <= NumGPRs) 4795 continue; 4796 break; 4797 case MVT::v4i32: 4798 case MVT::v8i16: 4799 case MVT::v16i8: 4800 case MVT::v2f64: 4801 case MVT::v2i64: 4802 case MVT::v1i128: 4803 if (++NumVRsUsed <= NumVRs) 4804 continue; 4805 break; 4806 case MVT::v4f32: 4807 // When using QPX, this is handled like a FP register, otherwise, it 4808 // is an Altivec register. 4809 if (Subtarget.hasQPX()) { 4810 if (++NumFPRsUsed <= NumFPRs) 4811 continue; 4812 } else { 4813 if (++NumVRsUsed <= NumVRs) 4814 continue; 4815 } 4816 break; 4817 case MVT::f32: 4818 case MVT::f64: 4819 case MVT::v4f64: // QPX 4820 case MVT::v4i1: // QPX 4821 if (++NumFPRsUsed <= NumFPRs) 4822 continue; 4823 break; 4824 } 4825 } 4826 4827 /* Respect alignment of argument on the stack. */ 4828 unsigned Align = 4829 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4830 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 4831 4832 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 4833 if (Flags.isInConsecutiveRegsLast()) 4834 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4835 } 4836 4837 unsigned NumBytesActuallyUsed = NumBytes; 4838 4839 // The prolog code of the callee may store up to 8 GPR argument registers to 4840 // the stack, allowing va_start to index over them in memory if its varargs. 4841 // Because we cannot tell if this is needed on the caller side, we have to 4842 // conservatively assume that it is needed. As such, make sure we have at 4843 // least enough stack space for the caller to store the 8 GPRs. 4844 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 4845 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 4846 4847 // Tail call needs the stack to be aligned. 4848 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4849 CallConv == CallingConv::Fast) 4850 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 4851 4852 // Calculate by how many bytes the stack has to be adjusted in case of tail 4853 // call optimization. 4854 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4855 4856 // To protect arguments on the stack from being clobbered in a tail call, 4857 // force all the loads to happen before doing any other lowering. 4858 if (isTailCall) 4859 Chain = DAG.getStackArgumentTokenFactor(Chain); 4860 4861 // Adjust the stack pointer for the new arguments... 4862 // These operations are automatically eliminated by the prolog/epilog pass 4863 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4864 dl); 4865 SDValue CallSeqStart = Chain; 4866 4867 // Load the return address and frame pointer so it can be move somewhere else 4868 // later. 4869 SDValue LROp, FPOp; 4870 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4871 dl); 4872 4873 // Set up a copy of the stack pointer for use loading and storing any 4874 // arguments that may not fit in the registers available for argument 4875 // passing. 4876 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4877 4878 // Figure out which arguments are going to go in registers, and which in 4879 // memory. Also, if this is a vararg function, floating point operations 4880 // must be stored to our stack, and loaded into integer regs as well, if 4881 // any integer regs are available for argument passing. 4882 unsigned ArgOffset = LinkageSize; 4883 4884 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4885 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4886 4887 SmallVector<SDValue, 8> MemOpChains; 4888 for (unsigned i = 0; i != NumOps; ++i) { 4889 SDValue Arg = OutVals[i]; 4890 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4891 EVT ArgVT = Outs[i].VT; 4892 EVT OrigVT = Outs[i].ArgVT; 4893 4894 // PtrOff will be used to store the current argument to the stack if a 4895 // register cannot be found for it. 4896 SDValue PtrOff; 4897 4898 // We re-align the argument offset for each argument, except when using the 4899 // fast calling convention, when we need to make sure we do that only when 4900 // we'll actually use a stack slot. 4901 auto ComputePtrOff = [&]() { 4902 /* Respect alignment of argument on the stack. */ 4903 unsigned Align = 4904 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4905 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 4906 4907 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 4908 4909 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4910 }; 4911 4912 if (CallConv != CallingConv::Fast) { 4913 ComputePtrOff(); 4914 4915 /* Compute GPR index associated with argument offset. */ 4916 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4917 GPR_idx = std::min(GPR_idx, NumGPRs); 4918 } 4919 4920 // Promote integers to 64-bit values. 4921 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 4922 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4923 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4924 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4925 } 4926 4927 // FIXME memcpy is used way more than necessary. Correctness first. 4928 // Note: "by value" is code for passing a structure by value, not 4929 // basic types. 4930 if (Flags.isByVal()) { 4931 // Note: Size includes alignment padding, so 4932 // struct x { short a; char b; } 4933 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 4934 // These are the proper values we need for right-justifying the 4935 // aggregate in a parameter register. 4936 unsigned Size = Flags.getByValSize(); 4937 4938 // An empty aggregate parameter takes up no storage and no 4939 // registers. 4940 if (Size == 0) 4941 continue; 4942 4943 if (CallConv == CallingConv::Fast) 4944 ComputePtrOff(); 4945 4946 // All aggregates smaller than 8 bytes must be passed right-justified. 4947 if (Size==1 || Size==2 || Size==4) { 4948 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 4949 if (GPR_idx != NumGPRs) { 4950 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4951 MachinePointerInfo(), VT, 4952 false, false, false, 0); 4953 MemOpChains.push_back(Load.getValue(1)); 4954 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4955 4956 ArgOffset += PtrByteSize; 4957 continue; 4958 } 4959 } 4960 4961 if (GPR_idx == NumGPRs && Size < 8) { 4962 SDValue AddPtr = PtrOff; 4963 if (!isLittleEndian) { 4964 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 4965 PtrOff.getValueType()); 4966 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4967 } 4968 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4969 CallSeqStart, 4970 Flags, DAG, dl); 4971 ArgOffset += PtrByteSize; 4972 continue; 4973 } 4974 // Copy entire object into memory. There are cases where gcc-generated 4975 // code assumes it is there, even if it could be put entirely into 4976 // registers. (This is not what the doc says.) 4977 4978 // FIXME: The above statement is likely due to a misunderstanding of the 4979 // documents. All arguments must be copied into the parameter area BY 4980 // THE CALLEE in the event that the callee takes the address of any 4981 // formal argument. That has not yet been implemented. However, it is 4982 // reasonable to use the stack area as a staging area for the register 4983 // load. 4984 4985 // Skip this for small aggregates, as we will use the same slot for a 4986 // right-justified copy, below. 4987 if (Size >= 8) 4988 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4989 CallSeqStart, 4990 Flags, DAG, dl); 4991 4992 // When a register is available, pass a small aggregate right-justified. 4993 if (Size < 8 && GPR_idx != NumGPRs) { 4994 // The easiest way to get this right-justified in a register 4995 // is to copy the structure into the rightmost portion of a 4996 // local variable slot, then load the whole slot into the 4997 // register. 4998 // FIXME: The memcpy seems to produce pretty awful code for 4999 // small aggregates, particularly for packed ones. 5000 // FIXME: It would be preferable to use the slot in the 5001 // parameter save area instead of a new local variable. 5002 SDValue AddPtr = PtrOff; 5003 if (!isLittleEndian) { 5004 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5005 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5006 } 5007 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5008 CallSeqStart, 5009 Flags, DAG, dl); 5010 5011 // Load the slot into the register. 5012 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 5013 MachinePointerInfo(), 5014 false, false, false, 0); 5015 MemOpChains.push_back(Load.getValue(1)); 5016 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5017 5018 // Done with this argument. 5019 ArgOffset += PtrByteSize; 5020 continue; 5021 } 5022 5023 // For aggregates larger than PtrByteSize, copy the pieces of the 5024 // object that fit into registers from the parameter save area. 5025 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5026 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5027 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5028 if (GPR_idx != NumGPRs) { 5029 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5030 MachinePointerInfo(), 5031 false, false, false, 0); 5032 MemOpChains.push_back(Load.getValue(1)); 5033 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5034 ArgOffset += PtrByteSize; 5035 } else { 5036 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5037 break; 5038 } 5039 } 5040 continue; 5041 } 5042 5043 switch (Arg.getSimpleValueType().SimpleTy) { 5044 default: llvm_unreachable("Unexpected ValueType for argument!"); 5045 case MVT::i1: 5046 case MVT::i32: 5047 case MVT::i64: 5048 if (Flags.isNest()) { 5049 // The 'nest' parameter, if any, is passed in R11. 5050 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5051 hasNest = true; 5052 break; 5053 } 5054 5055 // These can be scalar arguments or elements of an integer array type 5056 // passed directly. Clang may use those instead of "byval" aggregate 5057 // types to avoid forcing arguments to memory unnecessarily. 5058 if (GPR_idx != NumGPRs) { 5059 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5060 } else { 5061 if (CallConv == CallingConv::Fast) 5062 ComputePtrOff(); 5063 5064 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5065 true, isTailCall, false, MemOpChains, 5066 TailCallArguments, dl); 5067 if (CallConv == CallingConv::Fast) 5068 ArgOffset += PtrByteSize; 5069 } 5070 if (CallConv != CallingConv::Fast) 5071 ArgOffset += PtrByteSize; 5072 break; 5073 case MVT::f32: 5074 case MVT::f64: { 5075 // These can be scalar arguments or elements of a float array type 5076 // passed directly. The latter are used to implement ELFv2 homogenous 5077 // float aggregates. 5078 5079 // Named arguments go into FPRs first, and once they overflow, the 5080 // remaining arguments go into GPRs and then the parameter save area. 5081 // Unnamed arguments for vararg functions always go to GPRs and 5082 // then the parameter save area. For now, put all arguments to vararg 5083 // routines always in both locations (FPR *and* GPR or stack slot). 5084 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5085 bool NeededLoad = false; 5086 5087 // First load the argument into the next available FPR. 5088 if (FPR_idx != NumFPRs) 5089 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5090 5091 // Next, load the argument into GPR or stack slot if needed. 5092 if (!NeedGPROrStack) 5093 ; 5094 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5095 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5096 // once we support fp <-> gpr moves. 5097 5098 // In the non-vararg case, this can only ever happen in the 5099 // presence of f32 array types, since otherwise we never run 5100 // out of FPRs before running out of GPRs. 5101 SDValue ArgVal; 5102 5103 // Double values are always passed in a single GPR. 5104 if (Arg.getValueType() != MVT::f32) { 5105 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5106 5107 // Non-array float values are extended and passed in a GPR. 5108 } else if (!Flags.isInConsecutiveRegs()) { 5109 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5110 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5111 5112 // If we have an array of floats, we collect every odd element 5113 // together with its predecessor into one GPR. 5114 } else if (ArgOffset % PtrByteSize != 0) { 5115 SDValue Lo, Hi; 5116 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5117 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5118 if (!isLittleEndian) 5119 std::swap(Lo, Hi); 5120 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5121 5122 // The final element, if even, goes into the first half of a GPR. 5123 } else if (Flags.isInConsecutiveRegsLast()) { 5124 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5125 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5126 if (!isLittleEndian) 5127 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5128 DAG.getConstant(32, dl, MVT::i32)); 5129 5130 // Non-final even elements are skipped; they will be handled 5131 // together the with subsequent argument on the next go-around. 5132 } else 5133 ArgVal = SDValue(); 5134 5135 if (ArgVal.getNode()) 5136 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5137 } else { 5138 if (CallConv == CallingConv::Fast) 5139 ComputePtrOff(); 5140 5141 // Single-precision floating-point values are mapped to the 5142 // second (rightmost) word of the stack doubleword. 5143 if (Arg.getValueType() == MVT::f32 && 5144 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5145 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5146 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5147 } 5148 5149 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5150 true, isTailCall, false, MemOpChains, 5151 TailCallArguments, dl); 5152 5153 NeededLoad = true; 5154 } 5155 // When passing an array of floats, the array occupies consecutive 5156 // space in the argument area; only round up to the next doubleword 5157 // at the end of the array. Otherwise, each float takes 8 bytes. 5158 if (CallConv != CallingConv::Fast || NeededLoad) { 5159 ArgOffset += (Arg.getValueType() == MVT::f32 && 5160 Flags.isInConsecutiveRegs()) ? 4 : 8; 5161 if (Flags.isInConsecutiveRegsLast()) 5162 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5163 } 5164 break; 5165 } 5166 case MVT::v4f32: 5167 case MVT::v4i32: 5168 case MVT::v8i16: 5169 case MVT::v16i8: 5170 case MVT::v2f64: 5171 case MVT::v2i64: 5172 case MVT::v1i128: 5173 if (!Subtarget.hasQPX()) { 5174 // These can be scalar arguments or elements of a vector array type 5175 // passed directly. The latter are used to implement ELFv2 homogenous 5176 // vector aggregates. 5177 5178 // For a varargs call, named arguments go into VRs or on the stack as 5179 // usual; unnamed arguments always go to the stack or the corresponding 5180 // GPRs when within range. For now, we always put the value in both 5181 // locations (or even all three). 5182 if (isVarArg) { 5183 // We could elide this store in the case where the object fits 5184 // entirely in R registers. Maybe later. 5185 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5186 MachinePointerInfo(), false, false, 0); 5187 MemOpChains.push_back(Store); 5188 if (VR_idx != NumVRs) { 5189 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5190 MachinePointerInfo(), 5191 false, false, false, 0); 5192 MemOpChains.push_back(Load.getValue(1)); 5193 5194 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5195 Arg.getSimpleValueType() == MVT::v2i64) ? 5196 VSRH[VR_idx] : VR[VR_idx]; 5197 ++VR_idx; 5198 5199 RegsToPass.push_back(std::make_pair(VReg, Load)); 5200 } 5201 ArgOffset += 16; 5202 for (unsigned i=0; i<16; i+=PtrByteSize) { 5203 if (GPR_idx == NumGPRs) 5204 break; 5205 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5206 DAG.getConstant(i, dl, PtrVT)); 5207 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5208 false, false, false, 0); 5209 MemOpChains.push_back(Load.getValue(1)); 5210 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5211 } 5212 break; 5213 } 5214 5215 // Non-varargs Altivec params go into VRs or on the stack. 5216 if (VR_idx != NumVRs) { 5217 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5218 Arg.getSimpleValueType() == MVT::v2i64) ? 5219 VSRH[VR_idx] : VR[VR_idx]; 5220 ++VR_idx; 5221 5222 RegsToPass.push_back(std::make_pair(VReg, Arg)); 5223 } else { 5224 if (CallConv == CallingConv::Fast) 5225 ComputePtrOff(); 5226 5227 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5228 true, isTailCall, true, MemOpChains, 5229 TailCallArguments, dl); 5230 if (CallConv == CallingConv::Fast) 5231 ArgOffset += 16; 5232 } 5233 5234 if (CallConv != CallingConv::Fast) 5235 ArgOffset += 16; 5236 break; 5237 } // not QPX 5238 5239 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5240 "Invalid QPX parameter type"); 5241 5242 /* fall through */ 5243 case MVT::v4f64: 5244 case MVT::v4i1: { 5245 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5246 if (isVarArg) { 5247 // We could elide this store in the case where the object fits 5248 // entirely in R registers. Maybe later. 5249 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5250 MachinePointerInfo(), false, false, 0); 5251 MemOpChains.push_back(Store); 5252 if (QFPR_idx != NumQFPRs) { 5253 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, 5254 Store, PtrOff, MachinePointerInfo(), 5255 false, false, false, 0); 5256 MemOpChains.push_back(Load.getValue(1)); 5257 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5258 } 5259 ArgOffset += (IsF32 ? 16 : 32); 5260 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5261 if (GPR_idx == NumGPRs) 5262 break; 5263 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5264 DAG.getConstant(i, dl, PtrVT)); 5265 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5266 false, false, false, 0); 5267 MemOpChains.push_back(Load.getValue(1)); 5268 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5269 } 5270 break; 5271 } 5272 5273 // Non-varargs QPX params go into registers or on the stack. 5274 if (QFPR_idx != NumQFPRs) { 5275 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5276 } else { 5277 if (CallConv == CallingConv::Fast) 5278 ComputePtrOff(); 5279 5280 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5281 true, isTailCall, true, MemOpChains, 5282 TailCallArguments, dl); 5283 if (CallConv == CallingConv::Fast) 5284 ArgOffset += (IsF32 ? 16 : 32); 5285 } 5286 5287 if (CallConv != CallingConv::Fast) 5288 ArgOffset += (IsF32 ? 16 : 32); 5289 break; 5290 } 5291 } 5292 } 5293 5294 assert(NumBytesActuallyUsed == ArgOffset); 5295 (void)NumBytesActuallyUsed; 5296 5297 if (!MemOpChains.empty()) 5298 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5299 5300 // Check if this is an indirect call (MTCTR/BCTRL). 5301 // See PrepareCall() for more information about calls through function 5302 // pointers in the 64-bit SVR4 ABI. 5303 if (!isTailCall && !IsPatchPoint && 5304 !isFunctionGlobalAddress(Callee) && 5305 !isa<ExternalSymbolSDNode>(Callee)) { 5306 // Load r2 into a virtual register and store it to the TOC save area. 5307 setUsesTOCBasePtr(DAG); 5308 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5309 // TOC save area offset. 5310 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5311 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5312 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5313 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, 5314 MachinePointerInfo::getStack(TOCSaveOffset), 5315 false, false, 0); 5316 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5317 // This does not mean the MTCTR instruction must use R12; it's easier 5318 // to model this as an extra parameter, so do that. 5319 if (isELFv2ABI && !IsPatchPoint) 5320 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5321 } 5322 5323 // Build a sequence of copy-to-reg nodes chained together with token chain 5324 // and flag operands which copy the outgoing args into the appropriate regs. 5325 SDValue InFlag; 5326 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5327 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5328 RegsToPass[i].second, InFlag); 5329 InFlag = Chain.getValue(1); 5330 } 5331 5332 if (isTailCall) 5333 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 5334 FPOp, true, TailCallArguments); 5335 5336 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5337 hasNest, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 5338 Callee, SPDiff, NumBytes, Ins, InVals, CS); 5339 } 5340 5341 SDValue 5342 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 5343 CallingConv::ID CallConv, bool isVarArg, 5344 bool isTailCall, bool IsPatchPoint, 5345 const SmallVectorImpl<ISD::OutputArg> &Outs, 5346 const SmallVectorImpl<SDValue> &OutVals, 5347 const SmallVectorImpl<ISD::InputArg> &Ins, 5348 SDLoc dl, SelectionDAG &DAG, 5349 SmallVectorImpl<SDValue> &InVals, 5350 ImmutableCallSite *CS) const { 5351 5352 unsigned NumOps = Outs.size(); 5353 5354 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5355 bool isPPC64 = PtrVT == MVT::i64; 5356 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5357 5358 MachineFunction &MF = DAG.getMachineFunction(); 5359 5360 // Mark this function as potentially containing a function that contains a 5361 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5362 // and restoring the callers stack pointer in this functions epilog. This is 5363 // done because by tail calling the called function might overwrite the value 5364 // in this function's (MF) stack pointer stack slot 0(SP). 5365 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5366 CallConv == CallingConv::Fast) 5367 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5368 5369 // Count how many bytes are to be pushed on the stack, including the linkage 5370 // area, and parameter passing area. We start with 24/48 bytes, which is 5371 // prereserved space for [SP][CR][LR][3 x unused]. 5372 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5373 unsigned NumBytes = LinkageSize; 5374 5375 // Add up all the space actually used. 5376 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5377 // they all go in registers, but we must reserve stack space for them for 5378 // possible use by the caller. In varargs or 64-bit calls, parameters are 5379 // assigned stack space in order, with padding so Altivec parameters are 5380 // 16-byte aligned. 5381 unsigned nAltivecParamsAtEnd = 0; 5382 for (unsigned i = 0; i != NumOps; ++i) { 5383 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5384 EVT ArgVT = Outs[i].VT; 5385 // Varargs Altivec parameters are padded to a 16 byte boundary. 5386 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5387 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5388 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5389 if (!isVarArg && !isPPC64) { 5390 // Non-varargs Altivec parameters go after all the non-Altivec 5391 // parameters; handle those later so we know how much padding we need. 5392 nAltivecParamsAtEnd++; 5393 continue; 5394 } 5395 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5396 NumBytes = ((NumBytes+15)/16)*16; 5397 } 5398 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5399 } 5400 5401 // Allow for Altivec parameters at the end, if needed. 5402 if (nAltivecParamsAtEnd) { 5403 NumBytes = ((NumBytes+15)/16)*16; 5404 NumBytes += 16*nAltivecParamsAtEnd; 5405 } 5406 5407 // The prolog code of the callee may store up to 8 GPR argument registers to 5408 // the stack, allowing va_start to index over them in memory if its varargs. 5409 // Because we cannot tell if this is needed on the caller side, we have to 5410 // conservatively assume that it is needed. As such, make sure we have at 5411 // least enough stack space for the caller to store the 8 GPRs. 5412 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5413 5414 // Tail call needs the stack to be aligned. 5415 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5416 CallConv == CallingConv::Fast) 5417 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5418 5419 // Calculate by how many bytes the stack has to be adjusted in case of tail 5420 // call optimization. 5421 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5422 5423 // To protect arguments on the stack from being clobbered in a tail call, 5424 // force all the loads to happen before doing any other lowering. 5425 if (isTailCall) 5426 Chain = DAG.getStackArgumentTokenFactor(Chain); 5427 5428 // Adjust the stack pointer for the new arguments... 5429 // These operations are automatically eliminated by the prolog/epilog pass 5430 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5431 dl); 5432 SDValue CallSeqStart = Chain; 5433 5434 // Load the return address and frame pointer so it can be move somewhere else 5435 // later. 5436 SDValue LROp, FPOp; 5437 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5438 dl); 5439 5440 // Set up a copy of the stack pointer for use loading and storing any 5441 // arguments that may not fit in the registers available for argument 5442 // passing. 5443 SDValue StackPtr; 5444 if (isPPC64) 5445 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5446 else 5447 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5448 5449 // Figure out which arguments are going to go in registers, and which in 5450 // memory. Also, if this is a vararg function, floating point operations 5451 // must be stored to our stack, and loaded into integer regs as well, if 5452 // any integer regs are available for argument passing. 5453 unsigned ArgOffset = LinkageSize; 5454 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5455 5456 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5457 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5458 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5459 }; 5460 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5461 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5462 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5463 }; 5464 static const MCPhysReg VR[] = { 5465 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5466 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5467 }; 5468 const unsigned NumGPRs = array_lengthof(GPR_32); 5469 const unsigned NumFPRs = 13; 5470 const unsigned NumVRs = array_lengthof(VR); 5471 5472 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5473 5474 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5475 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5476 5477 SmallVector<SDValue, 8> MemOpChains; 5478 for (unsigned i = 0; i != NumOps; ++i) { 5479 SDValue Arg = OutVals[i]; 5480 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5481 5482 // PtrOff will be used to store the current argument to the stack if a 5483 // register cannot be found for it. 5484 SDValue PtrOff; 5485 5486 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5487 5488 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5489 5490 // On PPC64, promote integers to 64-bit values. 5491 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5492 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5493 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5494 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5495 } 5496 5497 // FIXME memcpy is used way more than necessary. Correctness first. 5498 // Note: "by value" is code for passing a structure by value, not 5499 // basic types. 5500 if (Flags.isByVal()) { 5501 unsigned Size = Flags.getByValSize(); 5502 // Very small objects are passed right-justified. Everything else is 5503 // passed left-justified. 5504 if (Size==1 || Size==2) { 5505 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5506 if (GPR_idx != NumGPRs) { 5507 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5508 MachinePointerInfo(), VT, 5509 false, false, false, 0); 5510 MemOpChains.push_back(Load.getValue(1)); 5511 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5512 5513 ArgOffset += PtrByteSize; 5514 } else { 5515 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5516 PtrOff.getValueType()); 5517 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5518 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5519 CallSeqStart, 5520 Flags, DAG, dl); 5521 ArgOffset += PtrByteSize; 5522 } 5523 continue; 5524 } 5525 // Copy entire object into memory. There are cases where gcc-generated 5526 // code assumes it is there, even if it could be put entirely into 5527 // registers. (This is not what the doc says.) 5528 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5529 CallSeqStart, 5530 Flags, DAG, dl); 5531 5532 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5533 // copy the pieces of the object that fit into registers from the 5534 // parameter save area. 5535 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5536 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5537 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5538 if (GPR_idx != NumGPRs) { 5539 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5540 MachinePointerInfo(), 5541 false, false, false, 0); 5542 MemOpChains.push_back(Load.getValue(1)); 5543 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5544 ArgOffset += PtrByteSize; 5545 } else { 5546 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5547 break; 5548 } 5549 } 5550 continue; 5551 } 5552 5553 switch (Arg.getSimpleValueType().SimpleTy) { 5554 default: llvm_unreachable("Unexpected ValueType for argument!"); 5555 case MVT::i1: 5556 case MVT::i32: 5557 case MVT::i64: 5558 if (GPR_idx != NumGPRs) { 5559 if (Arg.getValueType() == MVT::i1) 5560 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5561 5562 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5563 } else { 5564 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5565 isPPC64, isTailCall, false, MemOpChains, 5566 TailCallArguments, dl); 5567 } 5568 ArgOffset += PtrByteSize; 5569 break; 5570 case MVT::f32: 5571 case MVT::f64: 5572 if (FPR_idx != NumFPRs) { 5573 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5574 5575 if (isVarArg) { 5576 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5577 MachinePointerInfo(), false, false, 0); 5578 MemOpChains.push_back(Store); 5579 5580 // Float varargs are always shadowed in available integer registers 5581 if (GPR_idx != NumGPRs) { 5582 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5583 MachinePointerInfo(), false, false, 5584 false, 0); 5585 MemOpChains.push_back(Load.getValue(1)); 5586 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5587 } 5588 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5589 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5590 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5591 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5592 MachinePointerInfo(), 5593 false, false, false, 0); 5594 MemOpChains.push_back(Load.getValue(1)); 5595 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5596 } 5597 } else { 5598 // If we have any FPRs remaining, we may also have GPRs remaining. 5599 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5600 // GPRs. 5601 if (GPR_idx != NumGPRs) 5602 ++GPR_idx; 5603 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 5604 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 5605 ++GPR_idx; 5606 } 5607 } else 5608 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5609 isPPC64, isTailCall, false, MemOpChains, 5610 TailCallArguments, dl); 5611 if (isPPC64) 5612 ArgOffset += 8; 5613 else 5614 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 5615 break; 5616 case MVT::v4f32: 5617 case MVT::v4i32: 5618 case MVT::v8i16: 5619 case MVT::v16i8: 5620 if (isVarArg) { 5621 // These go aligned on the stack, or in the corresponding R registers 5622 // when within range. The Darwin PPC ABI doc claims they also go in 5623 // V registers; in fact gcc does this only for arguments that are 5624 // prototyped, not for those that match the ... We do it for all 5625 // arguments, seems to work. 5626 while (ArgOffset % 16 !=0) { 5627 ArgOffset += PtrByteSize; 5628 if (GPR_idx != NumGPRs) 5629 GPR_idx++; 5630 } 5631 // We could elide this store in the case where the object fits 5632 // entirely in R registers. Maybe later. 5633 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5634 DAG.getConstant(ArgOffset, dl, PtrVT)); 5635 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5636 MachinePointerInfo(), false, false, 0); 5637 MemOpChains.push_back(Store); 5638 if (VR_idx != NumVRs) { 5639 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5640 MachinePointerInfo(), 5641 false, false, false, 0); 5642 MemOpChains.push_back(Load.getValue(1)); 5643 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5644 } 5645 ArgOffset += 16; 5646 for (unsigned i=0; i<16; i+=PtrByteSize) { 5647 if (GPR_idx == NumGPRs) 5648 break; 5649 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5650 DAG.getConstant(i, dl, PtrVT)); 5651 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5652 false, false, false, 0); 5653 MemOpChains.push_back(Load.getValue(1)); 5654 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5655 } 5656 break; 5657 } 5658 5659 // Non-varargs Altivec params generally go in registers, but have 5660 // stack space allocated at the end. 5661 if (VR_idx != NumVRs) { 5662 // Doesn't have GPR space allocated. 5663 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5664 } else if (nAltivecParamsAtEnd==0) { 5665 // We are emitting Altivec params in order. 5666 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5667 isPPC64, isTailCall, true, MemOpChains, 5668 TailCallArguments, dl); 5669 ArgOffset += 16; 5670 } 5671 break; 5672 } 5673 } 5674 // If all Altivec parameters fit in registers, as they usually do, 5675 // they get stack space following the non-Altivec parameters. We 5676 // don't track this here because nobody below needs it. 5677 // If there are more Altivec parameters than fit in registers emit 5678 // the stores here. 5679 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5680 unsigned j = 0; 5681 // Offset is aligned; skip 1st 12 params which go in V registers. 5682 ArgOffset = ((ArgOffset+15)/16)*16; 5683 ArgOffset += 12*16; 5684 for (unsigned i = 0; i != NumOps; ++i) { 5685 SDValue Arg = OutVals[i]; 5686 EVT ArgType = Outs[i].VT; 5687 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5688 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5689 if (++j > NumVRs) { 5690 SDValue PtrOff; 5691 // We are emitting Altivec params in order. 5692 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5693 isPPC64, isTailCall, true, MemOpChains, 5694 TailCallArguments, dl); 5695 ArgOffset += 16; 5696 } 5697 } 5698 } 5699 } 5700 5701 if (!MemOpChains.empty()) 5702 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5703 5704 // On Darwin, R12 must contain the address of an indirect callee. This does 5705 // not mean the MTCTR instruction must use R12; it's easier to model this as 5706 // an extra parameter, so do that. 5707 if (!isTailCall && 5708 !isFunctionGlobalAddress(Callee) && 5709 !isa<ExternalSymbolSDNode>(Callee) && 5710 !isBLACompatibleAddress(Callee, DAG)) 5711 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5712 PPC::R12), Callee)); 5713 5714 // Build a sequence of copy-to-reg nodes chained together with token chain 5715 // and flag operands which copy the outgoing args into the appropriate regs. 5716 SDValue InFlag; 5717 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5718 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5719 RegsToPass[i].second, InFlag); 5720 InFlag = Chain.getValue(1); 5721 } 5722 5723 if (isTailCall) 5724 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 5725 FPOp, true, TailCallArguments); 5726 5727 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5728 /* unused except on PPC64 ELFv1 */ false, DAG, 5729 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5730 NumBytes, Ins, InVals, CS); 5731 } 5732 5733 bool 5734 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5735 MachineFunction &MF, bool isVarArg, 5736 const SmallVectorImpl<ISD::OutputArg> &Outs, 5737 LLVMContext &Context) const { 5738 SmallVector<CCValAssign, 16> RVLocs; 5739 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 5740 return CCInfo.CheckReturn(Outs, RetCC_PPC); 5741 } 5742 5743 SDValue 5744 PPCTargetLowering::LowerReturn(SDValue Chain, 5745 CallingConv::ID CallConv, bool isVarArg, 5746 const SmallVectorImpl<ISD::OutputArg> &Outs, 5747 const SmallVectorImpl<SDValue> &OutVals, 5748 SDLoc dl, SelectionDAG &DAG) const { 5749 5750 SmallVector<CCValAssign, 16> RVLocs; 5751 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5752 *DAG.getContext()); 5753 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 5754 5755 SDValue Flag; 5756 SmallVector<SDValue, 4> RetOps(1, Chain); 5757 5758 // Copy the result values into the output registers. 5759 for (unsigned i = 0; i != RVLocs.size(); ++i) { 5760 CCValAssign &VA = RVLocs[i]; 5761 assert(VA.isRegLoc() && "Can only return in registers!"); 5762 5763 SDValue Arg = OutVals[i]; 5764 5765 switch (VA.getLocInfo()) { 5766 default: llvm_unreachable("Unknown loc info!"); 5767 case CCValAssign::Full: break; 5768 case CCValAssign::AExt: 5769 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 5770 break; 5771 case CCValAssign::ZExt: 5772 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 5773 break; 5774 case CCValAssign::SExt: 5775 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 5776 break; 5777 } 5778 5779 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 5780 Flag = Chain.getValue(1); 5781 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 5782 } 5783 5784 RetOps[0] = Chain; // Update chain. 5785 5786 // Add the flag if we have it. 5787 if (Flag.getNode()) 5788 RetOps.push_back(Flag); 5789 5790 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 5791 } 5792 5793 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 5794 const PPCSubtarget &Subtarget) const { 5795 // When we pop the dynamic allocation we need to restore the SP link. 5796 SDLoc dl(Op); 5797 5798 // Get the corect type for pointers. 5799 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5800 5801 // Construct the stack pointer operand. 5802 bool isPPC64 = Subtarget.isPPC64(); 5803 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 5804 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 5805 5806 // Get the operands for the STACKRESTORE. 5807 SDValue Chain = Op.getOperand(0); 5808 SDValue SaveSP = Op.getOperand(1); 5809 5810 // Load the old link SP. 5811 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 5812 MachinePointerInfo(), 5813 false, false, false, 0); 5814 5815 // Restore the stack pointer. 5816 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 5817 5818 // Store the old link SP. 5819 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 5820 false, false, 0); 5821 } 5822 5823 5824 5825 SDValue 5826 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 5827 MachineFunction &MF = DAG.getMachineFunction(); 5828 bool isPPC64 = Subtarget.isPPC64(); 5829 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 5830 5831 // Get current frame pointer save index. The users of this index will be 5832 // primarily DYNALLOC instructions. 5833 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5834 int RASI = FI->getReturnAddrSaveIndex(); 5835 5836 // If the frame pointer save index hasn't been defined yet. 5837 if (!RASI) { 5838 // Find out what the fix offset of the frame pointer save area. 5839 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 5840 // Allocate the frame index for frame pointer save area. 5841 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 5842 // Save the result. 5843 FI->setReturnAddrSaveIndex(RASI); 5844 } 5845 return DAG.getFrameIndex(RASI, PtrVT); 5846 } 5847 5848 SDValue 5849 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 5850 MachineFunction &MF = DAG.getMachineFunction(); 5851 bool isPPC64 = Subtarget.isPPC64(); 5852 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 5853 5854 // Get current frame pointer save index. The users of this index will be 5855 // primarily DYNALLOC instructions. 5856 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5857 int FPSI = FI->getFramePointerSaveIndex(); 5858 5859 // If the frame pointer save index hasn't been defined yet. 5860 if (!FPSI) { 5861 // Find out what the fix offset of the frame pointer save area. 5862 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 5863 // Allocate the frame index for frame pointer save area. 5864 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 5865 // Save the result. 5866 FI->setFramePointerSaveIndex(FPSI); 5867 } 5868 return DAG.getFrameIndex(FPSI, PtrVT); 5869 } 5870 5871 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 5872 SelectionDAG &DAG, 5873 const PPCSubtarget &Subtarget) const { 5874 // Get the inputs. 5875 SDValue Chain = Op.getOperand(0); 5876 SDValue Size = Op.getOperand(1); 5877 SDLoc dl(Op); 5878 5879 // Get the corect type for pointers. 5880 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5881 // Negate the size. 5882 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 5883 DAG.getConstant(0, dl, PtrVT), Size); 5884 // Construct a node for the frame pointer save index. 5885 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 5886 // Build a DYNALLOC node. 5887 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 5888 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 5889 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 5890 } 5891 5892 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 5893 SelectionDAG &DAG) const { 5894 SDLoc DL(Op); 5895 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 5896 DAG.getVTList(MVT::i32, MVT::Other), 5897 Op.getOperand(0), Op.getOperand(1)); 5898 } 5899 5900 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 5901 SelectionDAG &DAG) const { 5902 SDLoc DL(Op); 5903 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 5904 Op.getOperand(0), Op.getOperand(1)); 5905 } 5906 5907 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 5908 if (Op.getValueType().isVector()) 5909 return LowerVectorLoad(Op, DAG); 5910 5911 assert(Op.getValueType() == MVT::i1 && 5912 "Custom lowering only for i1 loads"); 5913 5914 // First, load 8 bits into 32 bits, then truncate to 1 bit. 5915 5916 SDLoc dl(Op); 5917 LoadSDNode *LD = cast<LoadSDNode>(Op); 5918 5919 SDValue Chain = LD->getChain(); 5920 SDValue BasePtr = LD->getBasePtr(); 5921 MachineMemOperand *MMO = LD->getMemOperand(); 5922 5923 SDValue NewLD = 5924 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 5925 BasePtr, MVT::i8, MMO); 5926 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 5927 5928 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 5929 return DAG.getMergeValues(Ops, dl); 5930 } 5931 5932 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 5933 if (Op.getOperand(1).getValueType().isVector()) 5934 return LowerVectorStore(Op, DAG); 5935 5936 assert(Op.getOperand(1).getValueType() == MVT::i1 && 5937 "Custom lowering only for i1 stores"); 5938 5939 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 5940 5941 SDLoc dl(Op); 5942 StoreSDNode *ST = cast<StoreSDNode>(Op); 5943 5944 SDValue Chain = ST->getChain(); 5945 SDValue BasePtr = ST->getBasePtr(); 5946 SDValue Value = ST->getValue(); 5947 MachineMemOperand *MMO = ST->getMemOperand(); 5948 5949 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 5950 Value); 5951 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 5952 } 5953 5954 // FIXME: Remove this once the ANDI glue bug is fixed: 5955 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 5956 assert(Op.getValueType() == MVT::i1 && 5957 "Custom lowering only for i1 results"); 5958 5959 SDLoc DL(Op); 5960 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 5961 Op.getOperand(0)); 5962 } 5963 5964 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 5965 /// possible. 5966 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 5967 // Not FP? Not a fsel. 5968 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 5969 !Op.getOperand(2).getValueType().isFloatingPoint()) 5970 return Op; 5971 5972 // We might be able to do better than this under some circumstances, but in 5973 // general, fsel-based lowering of select is a finite-math-only optimization. 5974 // For more information, see section F.3 of the 2.06 ISA specification. 5975 if (!DAG.getTarget().Options.NoInfsFPMath || 5976 !DAG.getTarget().Options.NoNaNsFPMath) 5977 return Op; 5978 5979 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5980 5981 EVT ResVT = Op.getValueType(); 5982 EVT CmpVT = Op.getOperand(0).getValueType(); 5983 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5984 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 5985 SDLoc dl(Op); 5986 5987 // If the RHS of the comparison is a 0.0, we don't need to do the 5988 // subtraction at all. 5989 SDValue Sel1; 5990 if (isFloatingPointZero(RHS)) 5991 switch (CC) { 5992 default: break; // SETUO etc aren't handled by fsel. 5993 case ISD::SETNE: 5994 std::swap(TV, FV); 5995 case ISD::SETEQ: 5996 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5997 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5998 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 5999 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6000 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6001 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6002 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6003 case ISD::SETULT: 6004 case ISD::SETLT: 6005 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6006 case ISD::SETOGE: 6007 case ISD::SETGE: 6008 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6009 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6010 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6011 case ISD::SETUGT: 6012 case ISD::SETGT: 6013 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6014 case ISD::SETOLE: 6015 case ISD::SETLE: 6016 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6017 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6018 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6019 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6020 } 6021 6022 SDValue Cmp; 6023 switch (CC) { 6024 default: break; // SETUO etc aren't handled by fsel. 6025 case ISD::SETNE: 6026 std::swap(TV, FV); 6027 case ISD::SETEQ: 6028 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 6029 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6030 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6031 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6032 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6033 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6034 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6035 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6036 case ISD::SETULT: 6037 case ISD::SETLT: 6038 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 6039 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6040 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6041 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6042 case ISD::SETOGE: 6043 case ISD::SETGE: 6044 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 6045 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6046 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6047 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6048 case ISD::SETUGT: 6049 case ISD::SETGT: 6050 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 6051 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6052 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6053 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6054 case ISD::SETOLE: 6055 case ISD::SETLE: 6056 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 6057 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6058 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6059 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6060 } 6061 return Op; 6062 } 6063 6064 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6065 SelectionDAG &DAG, 6066 SDLoc dl) const { 6067 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6068 SDValue Src = Op.getOperand(0); 6069 if (Src.getValueType() == MVT::f32) 6070 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6071 6072 SDValue Tmp; 6073 switch (Op.getSimpleValueType().SimpleTy) { 6074 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6075 case MVT::i32: 6076 Tmp = DAG.getNode( 6077 Op.getOpcode() == ISD::FP_TO_SINT 6078 ? PPCISD::FCTIWZ 6079 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6080 dl, MVT::f64, Src); 6081 break; 6082 case MVT::i64: 6083 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6084 "i64 FP_TO_UINT is supported only with FPCVT"); 6085 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6086 PPCISD::FCTIDUZ, 6087 dl, MVT::f64, Src); 6088 break; 6089 } 6090 6091 // Convert the FP value to an int value through memory. 6092 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6093 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6094 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6095 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6096 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI); 6097 6098 // Emit a store to the stack slot. 6099 SDValue Chain; 6100 if (i32Stack) { 6101 MachineFunction &MF = DAG.getMachineFunction(); 6102 MachineMemOperand *MMO = 6103 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6104 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6105 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6106 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6107 } else 6108 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 6109 MPI, false, false, 0); 6110 6111 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6112 // add in a bias. 6113 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6114 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6115 DAG.getConstant(4, dl, FIPtr.getValueType())); 6116 MPI = MPI.getWithOffset(4); 6117 } 6118 6119 RLI.Chain = Chain; 6120 RLI.Ptr = FIPtr; 6121 RLI.MPI = MPI; 6122 } 6123 6124 /// \brief Custom lowers floating point to integer conversions to use 6125 /// the direct move instructions available in ISA 2.07 to avoid the 6126 /// need for load/store combinations. 6127 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6128 SelectionDAG &DAG, 6129 SDLoc dl) const { 6130 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6131 SDValue Src = Op.getOperand(0); 6132 6133 if (Src.getValueType() == MVT::f32) 6134 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6135 6136 SDValue Tmp; 6137 switch (Op.getSimpleValueType().SimpleTy) { 6138 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6139 case MVT::i32: 6140 Tmp = DAG.getNode( 6141 Op.getOpcode() == ISD::FP_TO_SINT 6142 ? PPCISD::FCTIWZ 6143 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6144 dl, MVT::f64, Src); 6145 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6146 break; 6147 case MVT::i64: 6148 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6149 "i64 FP_TO_UINT is supported only with FPCVT"); 6150 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6151 PPCISD::FCTIDUZ, 6152 dl, MVT::f64, Src); 6153 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6154 break; 6155 } 6156 return Tmp; 6157 } 6158 6159 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6160 SDLoc dl) const { 6161 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6162 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6163 6164 ReuseLoadInfo RLI; 6165 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6166 6167 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6168 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6169 RLI.Ranges); 6170 } 6171 6172 // We're trying to insert a regular store, S, and then a load, L. If the 6173 // incoming value, O, is a load, we might just be able to have our load use the 6174 // address used by O. However, we don't know if anything else will store to 6175 // that address before we can load from it. To prevent this situation, we need 6176 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6177 // the same chain operand as O, we create a token factor from the chain results 6178 // of O and L, and we replace all uses of O's chain result with that token 6179 // factor (see spliceIntoChain below for this last part). 6180 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6181 ReuseLoadInfo &RLI, 6182 SelectionDAG &DAG, 6183 ISD::LoadExtType ET) const { 6184 SDLoc dl(Op); 6185 if (ET == ISD::NON_EXTLOAD && 6186 (Op.getOpcode() == ISD::FP_TO_UINT || 6187 Op.getOpcode() == ISD::FP_TO_SINT) && 6188 isOperationLegalOrCustom(Op.getOpcode(), 6189 Op.getOperand(0).getValueType())) { 6190 6191 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6192 return true; 6193 } 6194 6195 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6196 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6197 LD->isNonTemporal()) 6198 return false; 6199 if (LD->getMemoryVT() != MemVT) 6200 return false; 6201 6202 RLI.Ptr = LD->getBasePtr(); 6203 if (LD->isIndexed() && LD->getOffset().getOpcode() != ISD::UNDEF) { 6204 assert(LD->getAddressingMode() == ISD::PRE_INC && 6205 "Non-pre-inc AM on PPC?"); 6206 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6207 LD->getOffset()); 6208 } 6209 6210 RLI.Chain = LD->getChain(); 6211 RLI.MPI = LD->getPointerInfo(); 6212 RLI.IsInvariant = LD->isInvariant(); 6213 RLI.Alignment = LD->getAlignment(); 6214 RLI.AAInfo = LD->getAAInfo(); 6215 RLI.Ranges = LD->getRanges(); 6216 6217 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6218 return true; 6219 } 6220 6221 // Given the head of the old chain, ResChain, insert a token factor containing 6222 // it and NewResChain, and make users of ResChain now be users of that token 6223 // factor. 6224 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6225 SDValue NewResChain, 6226 SelectionDAG &DAG) const { 6227 if (!ResChain) 6228 return; 6229 6230 SDLoc dl(NewResChain); 6231 6232 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6233 NewResChain, DAG.getUNDEF(MVT::Other)); 6234 assert(TF.getNode() != NewResChain.getNode() && 6235 "A new TF really is required here"); 6236 6237 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6238 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6239 } 6240 6241 /// \brief Custom lowers integer to floating point conversions to use 6242 /// the direct move instructions available in ISA 2.07 to avoid the 6243 /// need for load/store combinations. 6244 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6245 SelectionDAG &DAG, 6246 SDLoc dl) const { 6247 assert((Op.getValueType() == MVT::f32 || 6248 Op.getValueType() == MVT::f64) && 6249 "Invalid floating point type as target of conversion"); 6250 assert(Subtarget.hasFPCVT() && 6251 "Int to FP conversions with direct moves require FPCVT"); 6252 SDValue FP; 6253 SDValue Src = Op.getOperand(0); 6254 bool SinglePrec = Op.getValueType() == MVT::f32; 6255 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6256 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6257 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6258 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6259 6260 if (WordInt) { 6261 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6262 dl, MVT::f64, Src); 6263 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6264 } 6265 else { 6266 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6267 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6268 } 6269 6270 return FP; 6271 } 6272 6273 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6274 SelectionDAG &DAG) const { 6275 SDLoc dl(Op); 6276 6277 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6278 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6279 return SDValue(); 6280 6281 SDValue Value = Op.getOperand(0); 6282 // The values are now known to be -1 (false) or 1 (true). To convert this 6283 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6284 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6285 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6286 6287 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 6288 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 6289 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 6290 6291 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6292 6293 if (Op.getValueType() != MVT::v4f64) 6294 Value = DAG.getNode(ISD::FP_ROUND, dl, 6295 Op.getValueType(), Value, 6296 DAG.getIntPtrConstant(1, dl)); 6297 return Value; 6298 } 6299 6300 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6301 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6302 return SDValue(); 6303 6304 if (Op.getOperand(0).getValueType() == MVT::i1) 6305 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6306 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6307 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6308 6309 // If we have direct moves, we can do all the conversion, skip the store/load 6310 // however, without FPCVT we can't do most conversions. 6311 if (Subtarget.hasDirectMove() && Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6312 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6313 6314 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6315 "UINT_TO_FP is supported only with FPCVT"); 6316 6317 // If we have FCFIDS, then use it when converting to single-precision. 6318 // Otherwise, convert to double-precision and then round. 6319 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6320 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6321 : PPCISD::FCFIDS) 6322 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6323 : PPCISD::FCFID); 6324 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6325 ? MVT::f32 6326 : MVT::f64; 6327 6328 if (Op.getOperand(0).getValueType() == MVT::i64) { 6329 SDValue SINT = Op.getOperand(0); 6330 // When converting to single-precision, we actually need to convert 6331 // to double-precision first and then round to single-precision. 6332 // To avoid double-rounding effects during that operation, we have 6333 // to prepare the input operand. Bits that might be truncated when 6334 // converting to double-precision are replaced by a bit that won't 6335 // be lost at this stage, but is below the single-precision rounding 6336 // position. 6337 // 6338 // However, if -enable-unsafe-fp-math is in effect, accept double 6339 // rounding to avoid the extra overhead. 6340 if (Op.getValueType() == MVT::f32 && 6341 !Subtarget.hasFPCVT() && 6342 !DAG.getTarget().Options.UnsafeFPMath) { 6343 6344 // Twiddle input to make sure the low 11 bits are zero. (If this 6345 // is the case, we are guaranteed the value will fit into the 53 bit 6346 // mantissa of an IEEE double-precision value without rounding.) 6347 // If any of those low 11 bits were not zero originally, make sure 6348 // bit 12 (value 2048) is set instead, so that the final rounding 6349 // to single-precision gets the correct result. 6350 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6351 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6352 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6353 Round, DAG.getConstant(2047, dl, MVT::i64)); 6354 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6355 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6356 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6357 6358 // However, we cannot use that value unconditionally: if the magnitude 6359 // of the input value is small, the bit-twiddling we did above might 6360 // end up visibly changing the output. Fortunately, in that case, we 6361 // don't need to twiddle bits since the original input will convert 6362 // exactly to double-precision floating-point already. Therefore, 6363 // construct a conditional to use the original value if the top 11 6364 // bits are all sign-bit copies, and use the rounded value computed 6365 // above otherwise. 6366 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6367 SINT, DAG.getConstant(53, dl, MVT::i32)); 6368 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6369 Cond, DAG.getConstant(1, dl, MVT::i64)); 6370 Cond = DAG.getSetCC(dl, MVT::i32, 6371 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6372 6373 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6374 } 6375 6376 ReuseLoadInfo RLI; 6377 SDValue Bits; 6378 6379 MachineFunction &MF = DAG.getMachineFunction(); 6380 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6381 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6382 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6383 RLI.Ranges); 6384 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6385 } else if (Subtarget.hasLFIWAX() && 6386 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6387 MachineMemOperand *MMO = 6388 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6389 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6390 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6391 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6392 DAG.getVTList(MVT::f64, MVT::Other), 6393 Ops, MVT::i32, MMO); 6394 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6395 } else if (Subtarget.hasFPCVT() && 6396 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6397 MachineMemOperand *MMO = 6398 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6399 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6400 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6401 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6402 DAG.getVTList(MVT::f64, MVT::Other), 6403 Ops, MVT::i32, MMO); 6404 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6405 } else if (((Subtarget.hasLFIWAX() && 6406 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6407 (Subtarget.hasFPCVT() && 6408 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6409 SINT.getOperand(0).getValueType() == MVT::i32) { 6410 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6411 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6412 6413 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6414 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6415 6416 SDValue Store = 6417 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6418 MachinePointerInfo::getFixedStack(FrameIdx), 6419 false, false, 0); 6420 6421 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6422 "Expected an i32 store"); 6423 6424 RLI.Ptr = FIdx; 6425 RLI.Chain = Store; 6426 RLI.MPI = MachinePointerInfo::getFixedStack(FrameIdx); 6427 RLI.Alignment = 4; 6428 6429 MachineMemOperand *MMO = 6430 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6431 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6432 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6433 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6434 PPCISD::LFIWZX : PPCISD::LFIWAX, 6435 dl, DAG.getVTList(MVT::f64, MVT::Other), 6436 Ops, MVT::i32, MMO); 6437 } else 6438 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6439 6440 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6441 6442 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6443 FP = DAG.getNode(ISD::FP_ROUND, dl, 6444 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 6445 return FP; 6446 } 6447 6448 assert(Op.getOperand(0).getValueType() == MVT::i32 && 6449 "Unhandled INT_TO_FP type in custom expander!"); 6450 // Since we only generate this in 64-bit mode, we can take advantage of 6451 // 64-bit registers. In particular, sign extend the input value into the 6452 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 6453 // then lfd it and fcfid it. 6454 MachineFunction &MF = DAG.getMachineFunction(); 6455 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6456 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6457 6458 SDValue Ld; 6459 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 6460 ReuseLoadInfo RLI; 6461 bool ReusingLoad; 6462 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 6463 DAG))) { 6464 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6465 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6466 6467 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 6468 MachinePointerInfo::getFixedStack(FrameIdx), 6469 false, false, 0); 6470 6471 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6472 "Expected an i32 store"); 6473 6474 RLI.Ptr = FIdx; 6475 RLI.Chain = Store; 6476 RLI.MPI = MachinePointerInfo::getFixedStack(FrameIdx); 6477 RLI.Alignment = 4; 6478 } 6479 6480 MachineMemOperand *MMO = 6481 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6482 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6483 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6484 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 6485 PPCISD::LFIWZX : PPCISD::LFIWAX, 6486 dl, DAG.getVTList(MVT::f64, MVT::Other), 6487 Ops, MVT::i32, MMO); 6488 if (ReusingLoad) 6489 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 6490 } else { 6491 assert(Subtarget.isPPC64() && 6492 "i32->FP without LFIWAX supported only on PPC64"); 6493 6494 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 6495 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6496 6497 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 6498 Op.getOperand(0)); 6499 6500 // STD the extended value into the stack slot. 6501 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx, 6502 MachinePointerInfo::getFixedStack(FrameIdx), 6503 false, false, 0); 6504 6505 // Load the value as a double. 6506 Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, 6507 MachinePointerInfo::getFixedStack(FrameIdx), 6508 false, false, false, 0); 6509 } 6510 6511 // FCFID it and return it. 6512 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 6513 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6514 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 6515 DAG.getIntPtrConstant(0, dl)); 6516 return FP; 6517 } 6518 6519 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6520 SelectionDAG &DAG) const { 6521 SDLoc dl(Op); 6522 /* 6523 The rounding mode is in bits 30:31 of FPSR, and has the following 6524 settings: 6525 00 Round to nearest 6526 01 Round to 0 6527 10 Round to +inf 6528 11 Round to -inf 6529 6530 FLT_ROUNDS, on the other hand, expects the following: 6531 -1 Undefined 6532 0 Round to 0 6533 1 Round to nearest 6534 2 Round to +inf 6535 3 Round to -inf 6536 6537 To perform the conversion, we do: 6538 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 6539 */ 6540 6541 MachineFunction &MF = DAG.getMachineFunction(); 6542 EVT VT = Op.getValueType(); 6543 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6544 6545 // Save FP Control Word to register 6546 EVT NodeTys[] = { 6547 MVT::f64, // return register 6548 MVT::Glue // unused in this context 6549 }; 6550 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 6551 6552 // Save FP register to stack slot 6553 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 6554 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 6555 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 6556 StackSlot, MachinePointerInfo(), false, false,0); 6557 6558 // Load FP Control Word from low 32 bits of stack slot. 6559 SDValue Four = DAG.getConstant(4, dl, PtrVT); 6560 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 6561 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 6562 false, false, false, 0); 6563 6564 // Transform as necessary 6565 SDValue CWD1 = 6566 DAG.getNode(ISD::AND, dl, MVT::i32, 6567 CWD, DAG.getConstant(3, dl, MVT::i32)); 6568 SDValue CWD2 = 6569 DAG.getNode(ISD::SRL, dl, MVT::i32, 6570 DAG.getNode(ISD::AND, dl, MVT::i32, 6571 DAG.getNode(ISD::XOR, dl, MVT::i32, 6572 CWD, DAG.getConstant(3, dl, MVT::i32)), 6573 DAG.getConstant(3, dl, MVT::i32)), 6574 DAG.getConstant(1, dl, MVT::i32)); 6575 6576 SDValue RetVal = 6577 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 6578 6579 return DAG.getNode((VT.getSizeInBits() < 16 ? 6580 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 6581 } 6582 6583 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6584 EVT VT = Op.getValueType(); 6585 unsigned BitWidth = VT.getSizeInBits(); 6586 SDLoc dl(Op); 6587 assert(Op.getNumOperands() == 3 && 6588 VT == Op.getOperand(1).getValueType() && 6589 "Unexpected SHL!"); 6590 6591 // Expand into a bunch of logical ops. Note that these ops 6592 // depend on the PPC behavior for oversized shift amounts. 6593 SDValue Lo = Op.getOperand(0); 6594 SDValue Hi = Op.getOperand(1); 6595 SDValue Amt = Op.getOperand(2); 6596 EVT AmtVT = Amt.getValueType(); 6597 6598 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6599 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6600 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 6601 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 6602 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 6603 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6604 DAG.getConstant(-BitWidth, dl, AmtVT)); 6605 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 6606 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6607 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 6608 SDValue OutOps[] = { OutLo, OutHi }; 6609 return DAG.getMergeValues(OutOps, dl); 6610 } 6611 6612 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6613 EVT VT = Op.getValueType(); 6614 SDLoc dl(Op); 6615 unsigned BitWidth = VT.getSizeInBits(); 6616 assert(Op.getNumOperands() == 3 && 6617 VT == Op.getOperand(1).getValueType() && 6618 "Unexpected SRL!"); 6619 6620 // Expand into a bunch of logical ops. Note that these ops 6621 // depend on the PPC behavior for oversized shift amounts. 6622 SDValue Lo = Op.getOperand(0); 6623 SDValue Hi = Op.getOperand(1); 6624 SDValue Amt = Op.getOperand(2); 6625 EVT AmtVT = Amt.getValueType(); 6626 6627 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6628 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6629 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6630 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6631 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6632 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6633 DAG.getConstant(-BitWidth, dl, AmtVT)); 6634 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 6635 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6636 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 6637 SDValue OutOps[] = { OutLo, OutHi }; 6638 return DAG.getMergeValues(OutOps, dl); 6639 } 6640 6641 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 6642 SDLoc dl(Op); 6643 EVT VT = Op.getValueType(); 6644 unsigned BitWidth = VT.getSizeInBits(); 6645 assert(Op.getNumOperands() == 3 && 6646 VT == Op.getOperand(1).getValueType() && 6647 "Unexpected SRA!"); 6648 6649 // Expand into a bunch of logical ops, followed by a select_cc. 6650 SDValue Lo = Op.getOperand(0); 6651 SDValue Hi = Op.getOperand(1); 6652 SDValue Amt = Op.getOperand(2); 6653 EVT AmtVT = Amt.getValueType(); 6654 6655 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6656 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6657 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6658 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6659 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6660 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6661 DAG.getConstant(-BitWidth, dl, AmtVT)); 6662 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 6663 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 6664 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 6665 Tmp4, Tmp6, ISD::SETLE); 6666 SDValue OutOps[] = { OutLo, OutHi }; 6667 return DAG.getMergeValues(OutOps, dl); 6668 } 6669 6670 //===----------------------------------------------------------------------===// 6671 // Vector related lowering. 6672 // 6673 6674 /// BuildSplatI - Build a canonical splati of Val with an element size of 6675 /// SplatSize. Cast the result to VT. 6676 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 6677 SelectionDAG &DAG, SDLoc dl) { 6678 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 6679 6680 static const MVT VTys[] = { // canonical VT to use for each size. 6681 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 6682 }; 6683 6684 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 6685 6686 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 6687 if (Val == -1) 6688 SplatSize = 1; 6689 6690 EVT CanonicalVT = VTys[SplatSize-1]; 6691 6692 // Build a canonical splat for this value. 6693 SDValue Elt = DAG.getConstant(Val, dl, MVT::i32); 6694 SmallVector<SDValue, 8> Ops; 6695 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 6696 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops); 6697 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 6698 } 6699 6700 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 6701 /// specified intrinsic ID. 6702 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 6703 SelectionDAG &DAG, SDLoc dl, 6704 EVT DestVT = MVT::Other) { 6705 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 6706 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6707 DAG.getConstant(IID, dl, MVT::i32), Op); 6708 } 6709 6710 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 6711 /// specified intrinsic ID. 6712 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 6713 SelectionDAG &DAG, SDLoc dl, 6714 EVT DestVT = MVT::Other) { 6715 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 6716 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6717 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 6718 } 6719 6720 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 6721 /// specified intrinsic ID. 6722 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 6723 SDValue Op2, SelectionDAG &DAG, 6724 SDLoc dl, EVT DestVT = MVT::Other) { 6725 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 6726 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6727 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 6728 } 6729 6730 6731 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 6732 /// amount. The result has the specified value type. 6733 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 6734 EVT VT, SelectionDAG &DAG, SDLoc dl) { 6735 // Force LHS/RHS to be the right type. 6736 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 6737 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 6738 6739 int Ops[16]; 6740 for (unsigned i = 0; i != 16; ++i) 6741 Ops[i] = i + Amt; 6742 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 6743 return DAG.getNode(ISD::BITCAST, dl, VT, T); 6744 } 6745 6746 // If this is a case we can't handle, return null and let the default 6747 // expansion code take care of it. If we CAN select this case, and if it 6748 // selects to a single instruction, return Op. Otherwise, if we can codegen 6749 // this case more efficiently than a constant pool load, lower it to the 6750 // sequence of ops that should be used. 6751 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 6752 SelectionDAG &DAG) const { 6753 SDLoc dl(Op); 6754 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6755 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 6756 6757 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 6758 // We first build an i32 vector, load it into a QPX register, 6759 // then convert it to a floating-point vector and compare it 6760 // to a zero vector to get the boolean result. 6761 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 6762 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 6763 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FrameIdx); 6764 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6765 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6766 6767 assert(BVN->getNumOperands() == 4 && 6768 "BUILD_VECTOR for v4i1 does not have 4 operands"); 6769 6770 bool IsConst = true; 6771 for (unsigned i = 0; i < 4; ++i) { 6772 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; 6773 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 6774 IsConst = false; 6775 break; 6776 } 6777 } 6778 6779 if (IsConst) { 6780 Constant *One = 6781 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 6782 Constant *NegOne = 6783 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 6784 6785 SmallVector<Constant*, 4> CV(4, NegOne); 6786 for (unsigned i = 0; i < 4; ++i) { 6787 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) 6788 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 6789 else if (cast<ConstantSDNode>(BVN->getOperand(i))-> 6790 getConstantIntValue()->isZero()) 6791 continue; 6792 else 6793 CV[i] = One; 6794 } 6795 6796 Constant *CP = ConstantVector::get(CV); 6797 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 6798 16 /* alignment */); 6799 6800 SmallVector<SDValue, 2> Ops; 6801 Ops.push_back(DAG.getEntryNode()); 6802 Ops.push_back(CPIdx); 6803 6804 SmallVector<EVT, 2> ValueVTs; 6805 ValueVTs.push_back(MVT::v4i1); 6806 ValueVTs.push_back(MVT::Other); // chain 6807 SDVTList VTs = DAG.getVTList(ValueVTs); 6808 6809 return DAG.getMemIntrinsicNode(PPCISD::QVLFSb, 6810 dl, VTs, Ops, MVT::v4f32, 6811 MachinePointerInfo::getConstantPool()); 6812 } 6813 6814 SmallVector<SDValue, 4> Stores; 6815 for (unsigned i = 0; i < 4; ++i) { 6816 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; 6817 6818 unsigned Offset = 4*i; 6819 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 6820 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 6821 6822 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 6823 if (StoreSize > 4) { 6824 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 6825 BVN->getOperand(i), Idx, 6826 PtrInfo.getWithOffset(Offset), 6827 MVT::i32, false, false, 0)); 6828 } else { 6829 SDValue StoreValue = BVN->getOperand(i); 6830 if (StoreSize < 4) 6831 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 6832 6833 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 6834 StoreValue, Idx, 6835 PtrInfo.getWithOffset(Offset), 6836 false, false, 0)); 6837 } 6838 } 6839 6840 SDValue StoreChain; 6841 if (!Stores.empty()) 6842 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 6843 else 6844 StoreChain = DAG.getEntryNode(); 6845 6846 // Now load from v4i32 into the QPX register; this will extend it to 6847 // v4i64 but not yet convert it to a floating point. Nevertheless, this 6848 // is typed as v4f64 because the QPX register integer states are not 6849 // explicitly represented. 6850 6851 SmallVector<SDValue, 2> Ops; 6852 Ops.push_back(StoreChain); 6853 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32)); 6854 Ops.push_back(FIdx); 6855 6856 SmallVector<EVT, 2> ValueVTs; 6857 ValueVTs.push_back(MVT::v4f64); 6858 ValueVTs.push_back(MVT::Other); // chain 6859 SDVTList VTs = DAG.getVTList(ValueVTs); 6860 6861 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 6862 dl, VTs, Ops, MVT::v4i32, PtrInfo); 6863 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 6864 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 6865 LoadedVect); 6866 6867 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::f64); 6868 FPZeros = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 6869 FPZeros, FPZeros, FPZeros, FPZeros); 6870 6871 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 6872 } 6873 6874 // All other QPX vectors are handled by generic code. 6875 if (Subtarget.hasQPX()) 6876 return SDValue(); 6877 6878 // Check if this is a splat of a constant value. 6879 APInt APSplatBits, APSplatUndef; 6880 unsigned SplatBitSize; 6881 bool HasAnyUndefs; 6882 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 6883 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 6884 SplatBitSize > 32) 6885 return SDValue(); 6886 6887 unsigned SplatBits = APSplatBits.getZExtValue(); 6888 unsigned SplatUndef = APSplatUndef.getZExtValue(); 6889 unsigned SplatSize = SplatBitSize / 8; 6890 6891 // First, handle single instruction cases. 6892 6893 // All zeros? 6894 if (SplatBits == 0) { 6895 // Canonicalize all zero vectors to be v4i32. 6896 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 6897 SDValue Z = DAG.getConstant(0, dl, MVT::i32); 6898 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 6899 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 6900 } 6901 return Op; 6902 } 6903 6904 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 6905 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 6906 (32-SplatBitSize)); 6907 if (SextVal >= -16 && SextVal <= 15) 6908 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 6909 6910 6911 // Two instruction sequences. 6912 6913 // If this value is in the range [-32,30] and is even, use: 6914 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 6915 // If this value is in the range [17,31] and is odd, use: 6916 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 6917 // If this value is in the range [-31,-17] and is odd, use: 6918 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 6919 // Note the last two are three-instruction sequences. 6920 if (SextVal >= -32 && SextVal <= 31) { 6921 // To avoid having these optimizations undone by constant folding, 6922 // we convert to a pseudo that will be expanded later into one of 6923 // the above forms. 6924 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 6925 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 6926 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 6927 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 6928 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 6929 if (VT == Op.getValueType()) 6930 return RetVal; 6931 else 6932 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 6933 } 6934 6935 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 6936 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 6937 // for fneg/fabs. 6938 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 6939 // Make -1 and vspltisw -1: 6940 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 6941 6942 // Make the VSLW intrinsic, computing 0x8000_0000. 6943 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 6944 OnesV, DAG, dl); 6945 6946 // xor by OnesV to invert it. 6947 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 6948 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6949 } 6950 6951 // Check to see if this is a wide variety of vsplti*, binop self cases. 6952 static const signed char SplatCsts[] = { 6953 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 6954 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 6955 }; 6956 6957 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 6958 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 6959 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 6960 int i = SplatCsts[idx]; 6961 6962 // Figure out what shift amount will be used by altivec if shifted by i in 6963 // this splat size. 6964 unsigned TypeShiftAmt = i & (SplatBitSize-1); 6965 6966 // vsplti + shl self. 6967 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 6968 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6969 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6970 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 6971 Intrinsic::ppc_altivec_vslw 6972 }; 6973 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6974 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6975 } 6976 6977 // vsplti + srl self. 6978 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 6979 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6980 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6981 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 6982 Intrinsic::ppc_altivec_vsrw 6983 }; 6984 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6985 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6986 } 6987 6988 // vsplti + sra self. 6989 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 6990 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6991 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6992 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 6993 Intrinsic::ppc_altivec_vsraw 6994 }; 6995 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6996 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6997 } 6998 6999 // vsplti + rol self. 7000 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7001 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7002 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7003 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7004 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7005 Intrinsic::ppc_altivec_vrlw 7006 }; 7007 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7008 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7009 } 7010 7011 // t = vsplti c, result = vsldoi t, t, 1 7012 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7013 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7014 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 7015 } 7016 // t = vsplti c, result = vsldoi t, t, 2 7017 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7018 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7019 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 7020 } 7021 // t = vsplti c, result = vsldoi t, t, 3 7022 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7023 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7024 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 7025 } 7026 } 7027 7028 return SDValue(); 7029 } 7030 7031 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7032 /// the specified operations to build the shuffle. 7033 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7034 SDValue RHS, SelectionDAG &DAG, 7035 SDLoc dl) { 7036 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7037 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7038 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7039 7040 enum { 7041 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7042 OP_VMRGHW, 7043 OP_VMRGLW, 7044 OP_VSPLTISW0, 7045 OP_VSPLTISW1, 7046 OP_VSPLTISW2, 7047 OP_VSPLTISW3, 7048 OP_VSLDOI4, 7049 OP_VSLDOI8, 7050 OP_VSLDOI12 7051 }; 7052 7053 if (OpNum == OP_COPY) { 7054 if (LHSID == (1*9+2)*9+3) return LHS; 7055 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7056 return RHS; 7057 } 7058 7059 SDValue OpLHS, OpRHS; 7060 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7061 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7062 7063 int ShufIdxs[16]; 7064 switch (OpNum) { 7065 default: llvm_unreachable("Unknown i32 permute!"); 7066 case OP_VMRGHW: 7067 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7068 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7069 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7070 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7071 break; 7072 case OP_VMRGLW: 7073 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7074 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7075 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7076 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7077 break; 7078 case OP_VSPLTISW0: 7079 for (unsigned i = 0; i != 16; ++i) 7080 ShufIdxs[i] = (i&3)+0; 7081 break; 7082 case OP_VSPLTISW1: 7083 for (unsigned i = 0; i != 16; ++i) 7084 ShufIdxs[i] = (i&3)+4; 7085 break; 7086 case OP_VSPLTISW2: 7087 for (unsigned i = 0; i != 16; ++i) 7088 ShufIdxs[i] = (i&3)+8; 7089 break; 7090 case OP_VSPLTISW3: 7091 for (unsigned i = 0; i != 16; ++i) 7092 ShufIdxs[i] = (i&3)+12; 7093 break; 7094 case OP_VSLDOI4: 7095 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7096 case OP_VSLDOI8: 7097 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7098 case OP_VSLDOI12: 7099 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7100 } 7101 EVT VT = OpLHS.getValueType(); 7102 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7103 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7104 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7105 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7106 } 7107 7108 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7109 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7110 /// return the code it can be lowered into. Worst case, it can always be 7111 /// lowered into a vperm. 7112 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7113 SelectionDAG &DAG) const { 7114 SDLoc dl(Op); 7115 SDValue V1 = Op.getOperand(0); 7116 SDValue V2 = Op.getOperand(1); 7117 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7118 EVT VT = Op.getValueType(); 7119 bool isLittleEndian = Subtarget.isLittleEndian(); 7120 7121 if (Subtarget.hasQPX()) { 7122 if (VT.getVectorNumElements() != 4) 7123 return SDValue(); 7124 7125 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 7126 7127 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 7128 if (AlignIdx != -1) { 7129 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7130 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7131 } else if (SVOp->isSplat()) { 7132 int SplatIdx = SVOp->getSplatIndex(); 7133 if (SplatIdx >= 4) { 7134 std::swap(V1, V2); 7135 SplatIdx -= 4; 7136 } 7137 7138 // FIXME: If SplatIdx == 0 and the input came from a load, then there is 7139 // nothing to do. 7140 7141 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7142 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7143 } 7144 7145 // Lower this into a qvgpci/qvfperm pair. 7146 7147 // Compute the qvgpci literal 7148 unsigned idx = 0; 7149 for (unsigned i = 0; i < 4; ++i) { 7150 int m = SVOp->getMaskElt(i); 7151 unsigned mm = m >= 0 ? (unsigned) m : i; 7152 idx |= mm << (3-i)*3; 7153 } 7154 7155 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7156 DAG.getConstant(idx, dl, MVT::i32)); 7157 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7158 } 7159 7160 // Cases that are handled by instructions that take permute immediates 7161 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7162 // selected by the instruction selector. 7163 if (V2.getOpcode() == ISD::UNDEF) { 7164 if (PPC::isSplatShuffleMask(SVOp, 1) || 7165 PPC::isSplatShuffleMask(SVOp, 2) || 7166 PPC::isSplatShuffleMask(SVOp, 4) || 7167 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7168 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7169 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7170 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7171 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7172 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7173 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7174 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7175 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7176 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 7177 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 7178 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)) { 7179 return Op; 7180 } 7181 } 7182 7183 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7184 // and produce a fixed permutation. If any of these match, do not lower to 7185 // VPERM. 7186 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7187 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7188 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7189 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7190 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7191 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7192 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7193 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7194 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7195 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7196 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7197 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 7198 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)) 7199 return Op; 7200 7201 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7202 // perfect shuffle table to emit an optimal matching sequence. 7203 ArrayRef<int> PermMask = SVOp->getMask(); 7204 7205 unsigned PFIndexes[4]; 7206 bool isFourElementShuffle = true; 7207 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7208 unsigned EltNo = 8; // Start out undef. 7209 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7210 if (PermMask[i*4+j] < 0) 7211 continue; // Undef, ignore it. 7212 7213 unsigned ByteSource = PermMask[i*4+j]; 7214 if ((ByteSource & 3) != j) { 7215 isFourElementShuffle = false; 7216 break; 7217 } 7218 7219 if (EltNo == 8) { 7220 EltNo = ByteSource/4; 7221 } else if (EltNo != ByteSource/4) { 7222 isFourElementShuffle = false; 7223 break; 7224 } 7225 } 7226 PFIndexes[i] = EltNo; 7227 } 7228 7229 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7230 // perfect shuffle vector to determine if it is cost effective to do this as 7231 // discrete instructions, or whether we should use a vperm. 7232 // For now, we skip this for little endian until such time as we have a 7233 // little-endian perfect shuffle table. 7234 if (isFourElementShuffle && !isLittleEndian) { 7235 // Compute the index in the perfect shuffle table. 7236 unsigned PFTableIndex = 7237 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7238 7239 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7240 unsigned Cost = (PFEntry >> 30); 7241 7242 // Determining when to avoid vperm is tricky. Many things affect the cost 7243 // of vperm, particularly how many times the perm mask needs to be computed. 7244 // For example, if the perm mask can be hoisted out of a loop or is already 7245 // used (perhaps because there are multiple permutes with the same shuffle 7246 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7247 // the loop requires an extra register. 7248 // 7249 // As a compromise, we only emit discrete instructions if the shuffle can be 7250 // generated in 3 or fewer operations. When we have loop information 7251 // available, if this block is within a loop, we should avoid using vperm 7252 // for 3-operation perms and use a constant pool load instead. 7253 if (Cost < 3) 7254 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7255 } 7256 7257 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7258 // vector that will get spilled to the constant pool. 7259 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 7260 7261 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7262 // that it is in input element units, not in bytes. Convert now. 7263 7264 // For little endian, the order of the input vectors is reversed, and 7265 // the permutation mask is complemented with respect to 31. This is 7266 // necessary to produce proper semantics with the big-endian-biased vperm 7267 // instruction. 7268 EVT EltVT = V1.getValueType().getVectorElementType(); 7269 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7270 7271 SmallVector<SDValue, 16> ResultMask; 7272 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7273 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7274 7275 for (unsigned j = 0; j != BytesPerElement; ++j) 7276 if (isLittleEndian) 7277 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7278 dl, MVT::i32)); 7279 else 7280 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7281 MVT::i32)); 7282 } 7283 7284 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 7285 ResultMask); 7286 if (isLittleEndian) 7287 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7288 V2, V1, VPermMask); 7289 else 7290 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7291 V1, V2, VPermMask); 7292 } 7293 7294 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 7295 /// altivec comparison. If it is, return true and fill in Opc/isDot with 7296 /// information about the intrinsic. 7297 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 7298 bool &isDot, const PPCSubtarget &Subtarget) { 7299 unsigned IntrinsicID = 7300 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7301 CompareOpc = -1; 7302 isDot = false; 7303 switch (IntrinsicID) { 7304 default: return false; 7305 // Comparison predicates. 7306 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 7307 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 7308 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 7309 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 7310 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 7311 case Intrinsic::ppc_altivec_vcmpequd_p: 7312 if (Subtarget.hasP8Altivec()) { 7313 CompareOpc = 199; 7314 isDot = 1; 7315 } 7316 else 7317 return false; 7318 7319 break; 7320 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 7321 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 7322 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 7323 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 7324 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 7325 case Intrinsic::ppc_altivec_vcmpgtsd_p: 7326 if (Subtarget.hasP8Altivec()) { 7327 CompareOpc = 967; 7328 isDot = 1; 7329 } 7330 else 7331 return false; 7332 7333 break; 7334 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 7335 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 7336 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 7337 case Intrinsic::ppc_altivec_vcmpgtud_p: 7338 if (Subtarget.hasP8Altivec()) { 7339 CompareOpc = 711; 7340 isDot = 1; 7341 } 7342 else 7343 return false; 7344 7345 break; 7346 7347 // Normal Comparisons. 7348 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 7349 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 7350 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 7351 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 7352 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 7353 case Intrinsic::ppc_altivec_vcmpequd: 7354 if (Subtarget.hasP8Altivec()) { 7355 CompareOpc = 199; 7356 isDot = 0; 7357 } 7358 else 7359 return false; 7360 7361 break; 7362 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 7363 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 7364 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 7365 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 7366 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 7367 case Intrinsic::ppc_altivec_vcmpgtsd: 7368 if (Subtarget.hasP8Altivec()) { 7369 CompareOpc = 967; 7370 isDot = 0; 7371 } 7372 else 7373 return false; 7374 7375 break; 7376 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 7377 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 7378 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 7379 case Intrinsic::ppc_altivec_vcmpgtud: 7380 if (Subtarget.hasP8Altivec()) { 7381 CompareOpc = 711; 7382 isDot = 0; 7383 } 7384 else 7385 return false; 7386 7387 break; 7388 } 7389 return true; 7390 } 7391 7392 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 7393 /// lower, do it, otherwise return null. 7394 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 7395 SelectionDAG &DAG) const { 7396 // If this is a lowered altivec predicate compare, CompareOpc is set to the 7397 // opcode number of the comparison. 7398 SDLoc dl(Op); 7399 int CompareOpc; 7400 bool isDot; 7401 if (!getAltivecCompareInfo(Op, CompareOpc, isDot, Subtarget)) 7402 return SDValue(); // Don't custom lower most intrinsics. 7403 7404 // If this is a non-dot comparison, make the VCMP node and we are done. 7405 if (!isDot) { 7406 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 7407 Op.getOperand(1), Op.getOperand(2), 7408 DAG.getConstant(CompareOpc, dl, MVT::i32)); 7409 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 7410 } 7411 7412 // Create the PPCISD altivec 'dot' comparison node. 7413 SDValue Ops[] = { 7414 Op.getOperand(2), // LHS 7415 Op.getOperand(3), // RHS 7416 DAG.getConstant(CompareOpc, dl, MVT::i32) 7417 }; 7418 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 7419 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 7420 7421 // Now that we have the comparison, emit a copy from the CR to a GPR. 7422 // This is flagged to the above dot comparison. 7423 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 7424 DAG.getRegister(PPC::CR6, MVT::i32), 7425 CompNode.getValue(1)); 7426 7427 // Unpack the result based on how the target uses it. 7428 unsigned BitNo; // Bit # of CR6. 7429 bool InvertBit; // Invert result? 7430 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 7431 default: // Can't happen, don't crash on invalid number though. 7432 case 0: // Return the value of the EQ bit of CR6. 7433 BitNo = 0; InvertBit = false; 7434 break; 7435 case 1: // Return the inverted value of the EQ bit of CR6. 7436 BitNo = 0; InvertBit = true; 7437 break; 7438 case 2: // Return the value of the LT bit of CR6. 7439 BitNo = 2; InvertBit = false; 7440 break; 7441 case 3: // Return the inverted value of the LT bit of CR6. 7442 BitNo = 2; InvertBit = true; 7443 break; 7444 } 7445 7446 // Shift the bit into the low position. 7447 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 7448 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 7449 // Isolate the bit. 7450 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 7451 DAG.getConstant(1, dl, MVT::i32)); 7452 7453 // If we are supposed to, toggle the bit. 7454 if (InvertBit) 7455 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 7456 DAG.getConstant(1, dl, MVT::i32)); 7457 return Flags; 7458 } 7459 7460 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 7461 SelectionDAG &DAG) const { 7462 SDLoc dl(Op); 7463 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 7464 // instructions), but for smaller types, we need to first extend up to v2i32 7465 // before doing going farther. 7466 if (Op.getValueType() == MVT::v2i64) { 7467 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 7468 if (ExtVT != MVT::v2i32) { 7469 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 7470 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 7471 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 7472 ExtVT.getVectorElementType(), 4))); 7473 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 7474 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 7475 DAG.getValueType(MVT::v2i32)); 7476 } 7477 7478 return Op; 7479 } 7480 7481 return SDValue(); 7482 } 7483 7484 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 7485 SelectionDAG &DAG) const { 7486 SDLoc dl(Op); 7487 // Create a stack slot that is 16-byte aligned. 7488 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7489 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7490 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7491 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7492 7493 // Store the input value into Value#0 of the stack slot. 7494 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 7495 Op.getOperand(0), FIdx, MachinePointerInfo(), 7496 false, false, 0); 7497 // Load it out. 7498 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 7499 false, false, false, 0); 7500 } 7501 7502 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7503 SelectionDAG &DAG) const { 7504 SDLoc dl(Op); 7505 SDNode *N = Op.getNode(); 7506 7507 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 7508 "Unknown extract_vector_elt type"); 7509 7510 SDValue Value = N->getOperand(0); 7511 7512 // The first part of this is like the store lowering except that we don't 7513 // need to track the chain. 7514 7515 // The values are now known to be -1 (false) or 1 (true). To convert this 7516 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7517 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7518 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7519 7520 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7521 // understand how to form the extending load. 7522 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 7523 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 7524 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 7525 7526 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7527 7528 // Now convert to an integer and store. 7529 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7530 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7531 Value); 7532 7533 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7534 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7535 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FrameIdx); 7536 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7537 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7538 7539 SDValue StoreChain = DAG.getEntryNode(); 7540 SmallVector<SDValue, 2> Ops; 7541 Ops.push_back(StoreChain); 7542 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7543 Ops.push_back(Value); 7544 Ops.push_back(FIdx); 7545 7546 SmallVector<EVT, 2> ValueVTs; 7547 ValueVTs.push_back(MVT::Other); // chain 7548 SDVTList VTs = DAG.getVTList(ValueVTs); 7549 7550 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7551 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7552 7553 // Extract the value requested. 7554 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7555 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7556 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7557 7558 SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7559 PtrInfo.getWithOffset(Offset), 7560 false, false, false, 0); 7561 7562 if (!Subtarget.useCRBits()) 7563 return IntVal; 7564 7565 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 7566 } 7567 7568 /// Lowering for QPX v4i1 loads 7569 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 7570 SelectionDAG &DAG) const { 7571 SDLoc dl(Op); 7572 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 7573 SDValue LoadChain = LN->getChain(); 7574 SDValue BasePtr = LN->getBasePtr(); 7575 7576 if (Op.getValueType() == MVT::v4f64 || 7577 Op.getValueType() == MVT::v4f32) { 7578 EVT MemVT = LN->getMemoryVT(); 7579 unsigned Alignment = LN->getAlignment(); 7580 7581 // If this load is properly aligned, then it is legal. 7582 if (Alignment >= MemVT.getStoreSize()) 7583 return Op; 7584 7585 EVT ScalarVT = Op.getValueType().getScalarType(), 7586 ScalarMemVT = MemVT.getScalarType(); 7587 unsigned Stride = ScalarMemVT.getStoreSize(); 7588 7589 SmallVector<SDValue, 8> Vals, LoadChains; 7590 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7591 SDValue Load; 7592 if (ScalarVT != ScalarMemVT) 7593 Load = 7594 DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 7595 BasePtr, 7596 LN->getPointerInfo().getWithOffset(Idx*Stride), 7597 ScalarMemVT, LN->isVolatile(), LN->isNonTemporal(), 7598 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7599 LN->getAAInfo()); 7600 else 7601 Load = 7602 DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 7603 LN->getPointerInfo().getWithOffset(Idx*Stride), 7604 LN->isVolatile(), LN->isNonTemporal(), 7605 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7606 LN->getAAInfo()); 7607 7608 if (Idx == 0 && LN->isIndexed()) { 7609 assert(LN->getAddressingMode() == ISD::PRE_INC && 7610 "Unknown addressing mode on vector load"); 7611 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 7612 LN->getAddressingMode()); 7613 } 7614 7615 Vals.push_back(Load); 7616 LoadChains.push_back(Load.getValue(1)); 7617 7618 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7619 DAG.getConstant(Stride, dl, 7620 BasePtr.getValueType())); 7621 } 7622 7623 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7624 SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, 7625 Op.getValueType(), Vals); 7626 7627 if (LN->isIndexed()) { 7628 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 7629 return DAG.getMergeValues(RetOps, dl); 7630 } 7631 7632 SDValue RetOps[] = { Value, TF }; 7633 return DAG.getMergeValues(RetOps, dl); 7634 } 7635 7636 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 7637 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 7638 7639 // To lower v4i1 from a byte array, we load the byte elements of the 7640 // vector and then reuse the BUILD_VECTOR logic. 7641 7642 SmallVector<SDValue, 4> VectElmts, VectElmtChains; 7643 for (unsigned i = 0; i < 4; ++i) { 7644 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7645 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7646 7647 VectElmts.push_back(DAG.getExtLoad(ISD::EXTLOAD, 7648 dl, MVT::i32, LoadChain, Idx, 7649 LN->getPointerInfo().getWithOffset(i), 7650 MVT::i8 /* memory type */, 7651 LN->isVolatile(), LN->isNonTemporal(), 7652 LN->isInvariant(), 7653 1 /* alignment */, LN->getAAInfo())); 7654 VectElmtChains.push_back(VectElmts[i].getValue(1)); 7655 } 7656 7657 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 7658 SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i1, VectElmts); 7659 7660 SDValue RVals[] = { Value, LoadChain }; 7661 return DAG.getMergeValues(RVals, dl); 7662 } 7663 7664 /// Lowering for QPX v4i1 stores 7665 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 7666 SelectionDAG &DAG) const { 7667 SDLoc dl(Op); 7668 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 7669 SDValue StoreChain = SN->getChain(); 7670 SDValue BasePtr = SN->getBasePtr(); 7671 SDValue Value = SN->getValue(); 7672 7673 if (Value.getValueType() == MVT::v4f64 || 7674 Value.getValueType() == MVT::v4f32) { 7675 EVT MemVT = SN->getMemoryVT(); 7676 unsigned Alignment = SN->getAlignment(); 7677 7678 // If this store is properly aligned, then it is legal. 7679 if (Alignment >= MemVT.getStoreSize()) 7680 return Op; 7681 7682 EVT ScalarVT = Value.getValueType().getScalarType(), 7683 ScalarMemVT = MemVT.getScalarType(); 7684 unsigned Stride = ScalarMemVT.getStoreSize(); 7685 7686 SmallVector<SDValue, 8> Stores; 7687 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7688 SDValue Ex = DAG.getNode( 7689 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 7690 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 7691 SDValue Store; 7692 if (ScalarVT != ScalarMemVT) 7693 Store = 7694 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 7695 SN->getPointerInfo().getWithOffset(Idx*Stride), 7696 ScalarMemVT, SN->isVolatile(), SN->isNonTemporal(), 7697 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7698 else 7699 Store = 7700 DAG.getStore(StoreChain, dl, Ex, BasePtr, 7701 SN->getPointerInfo().getWithOffset(Idx*Stride), 7702 SN->isVolatile(), SN->isNonTemporal(), 7703 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7704 7705 if (Idx == 0 && SN->isIndexed()) { 7706 assert(SN->getAddressingMode() == ISD::PRE_INC && 7707 "Unknown addressing mode on vector store"); 7708 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 7709 SN->getAddressingMode()); 7710 } 7711 7712 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7713 DAG.getConstant(Stride, dl, 7714 BasePtr.getValueType())); 7715 Stores.push_back(Store); 7716 } 7717 7718 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7719 7720 if (SN->isIndexed()) { 7721 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 7722 return DAG.getMergeValues(RetOps, dl); 7723 } 7724 7725 return TF; 7726 } 7727 7728 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 7729 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 7730 7731 // The values are now known to be -1 (false) or 1 (true). To convert this 7732 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7733 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7734 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7735 7736 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7737 // understand how to form the extending load. 7738 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 7739 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 7740 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 7741 7742 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7743 7744 // Now convert to an integer and store. 7745 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7746 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7747 Value); 7748 7749 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7750 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7751 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FrameIdx); 7752 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7753 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7754 7755 SmallVector<SDValue, 2> Ops; 7756 Ops.push_back(StoreChain); 7757 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7758 Ops.push_back(Value); 7759 Ops.push_back(FIdx); 7760 7761 SmallVector<EVT, 2> ValueVTs; 7762 ValueVTs.push_back(MVT::Other); // chain 7763 SDVTList VTs = DAG.getVTList(ValueVTs); 7764 7765 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7766 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7767 7768 // Move data into the byte array. 7769 SmallVector<SDValue, 4> Loads, LoadChains; 7770 for (unsigned i = 0; i < 4; ++i) { 7771 unsigned Offset = 4*i; 7772 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7773 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7774 7775 Loads.push_back(DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7776 PtrInfo.getWithOffset(Offset), 7777 false, false, false, 0)); 7778 LoadChains.push_back(Loads[i].getValue(1)); 7779 } 7780 7781 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7782 7783 SmallVector<SDValue, 4> Stores; 7784 for (unsigned i = 0; i < 4; ++i) { 7785 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7786 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7787 7788 Stores.push_back(DAG.getTruncStore(StoreChain, dl, Loads[i], Idx, 7789 SN->getPointerInfo().getWithOffset(i), 7790 MVT::i8 /* memory type */, 7791 SN->isNonTemporal(), SN->isVolatile(), 7792 1 /* alignment */, SN->getAAInfo())); 7793 } 7794 7795 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7796 7797 return StoreChain; 7798 } 7799 7800 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 7801 SDLoc dl(Op); 7802 if (Op.getValueType() == MVT::v4i32) { 7803 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7804 7805 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 7806 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 7807 7808 SDValue RHSSwap = // = vrlw RHS, 16 7809 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 7810 7811 // Shrinkify inputs to v8i16. 7812 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 7813 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 7814 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 7815 7816 // Low parts multiplied together, generating 32-bit results (we ignore the 7817 // top parts). 7818 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 7819 LHS, RHS, DAG, dl, MVT::v4i32); 7820 7821 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 7822 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 7823 // Shift the high parts up 16 bits. 7824 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 7825 Neg16, DAG, dl); 7826 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 7827 } else if (Op.getValueType() == MVT::v8i16) { 7828 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7829 7830 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 7831 7832 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 7833 LHS, RHS, Zero, DAG, dl); 7834 } else if (Op.getValueType() == MVT::v16i8) { 7835 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7836 bool isLittleEndian = Subtarget.isLittleEndian(); 7837 7838 // Multiply the even 8-bit parts, producing 16-bit sums. 7839 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 7840 LHS, RHS, DAG, dl, MVT::v8i16); 7841 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 7842 7843 // Multiply the odd 8-bit parts, producing 16-bit sums. 7844 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 7845 LHS, RHS, DAG, dl, MVT::v8i16); 7846 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 7847 7848 // Merge the results together. Because vmuleub and vmuloub are 7849 // instructions with a big-endian bias, we must reverse the 7850 // element numbering and reverse the meaning of "odd" and "even" 7851 // when generating little endian code. 7852 int Ops[16]; 7853 for (unsigned i = 0; i != 8; ++i) { 7854 if (isLittleEndian) { 7855 Ops[i*2 ] = 2*i; 7856 Ops[i*2+1] = 2*i+16; 7857 } else { 7858 Ops[i*2 ] = 2*i+1; 7859 Ops[i*2+1] = 2*i+1+16; 7860 } 7861 } 7862 if (isLittleEndian) 7863 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 7864 else 7865 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 7866 } else { 7867 llvm_unreachable("Unknown mul to lower!"); 7868 } 7869 } 7870 7871 /// LowerOperation - Provide custom lowering hooks for some operations. 7872 /// 7873 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7874 switch (Op.getOpcode()) { 7875 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 7876 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 7877 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 7878 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 7879 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 7880 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 7881 case ISD::SETCC: return LowerSETCC(Op, DAG); 7882 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 7883 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 7884 case ISD::VASTART: 7885 return LowerVASTART(Op, DAG, Subtarget); 7886 7887 case ISD::VAARG: 7888 return LowerVAARG(Op, DAG, Subtarget); 7889 7890 case ISD::VACOPY: 7891 return LowerVACOPY(Op, DAG, Subtarget); 7892 7893 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget); 7894 case ISD::DYNAMIC_STACKALLOC: 7895 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget); 7896 7897 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 7898 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 7899 7900 case ISD::LOAD: return LowerLOAD(Op, DAG); 7901 case ISD::STORE: return LowerSTORE(Op, DAG); 7902 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 7903 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 7904 case ISD::FP_TO_UINT: 7905 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 7906 SDLoc(Op)); 7907 case ISD::UINT_TO_FP: 7908 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 7909 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 7910 7911 // Lower 64-bit shifts. 7912 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 7913 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 7914 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 7915 7916 // Vector-related lowering. 7917 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 7918 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 7919 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 7920 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 7921 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 7922 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 7923 case ISD::MUL: return LowerMUL(Op, DAG); 7924 7925 // For counter-based loop handling. 7926 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 7927 7928 // Frame & Return address. 7929 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 7930 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 7931 } 7932 } 7933 7934 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 7935 SmallVectorImpl<SDValue>&Results, 7936 SelectionDAG &DAG) const { 7937 SDLoc dl(N); 7938 switch (N->getOpcode()) { 7939 default: 7940 llvm_unreachable("Do not know how to custom type legalize this operation!"); 7941 case ISD::READCYCLECOUNTER: { 7942 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 7943 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 7944 7945 Results.push_back(RTB); 7946 Results.push_back(RTB.getValue(1)); 7947 Results.push_back(RTB.getValue(2)); 7948 break; 7949 } 7950 case ISD::INTRINSIC_W_CHAIN: { 7951 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 7952 Intrinsic::ppc_is_decremented_ctr_nonzero) 7953 break; 7954 7955 assert(N->getValueType(0) == MVT::i1 && 7956 "Unexpected result type for CTR decrement intrinsic"); 7957 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 7958 N->getValueType(0)); 7959 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 7960 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 7961 N->getOperand(1)); 7962 7963 Results.push_back(NewInt); 7964 Results.push_back(NewInt.getValue(1)); 7965 break; 7966 } 7967 case ISD::VAARG: { 7968 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 7969 return; 7970 7971 EVT VT = N->getValueType(0); 7972 7973 if (VT == MVT::i64) { 7974 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget); 7975 7976 Results.push_back(NewNode); 7977 Results.push_back(NewNode.getValue(1)); 7978 } 7979 return; 7980 } 7981 case ISD::FP_ROUND_INREG: { 7982 assert(N->getValueType(0) == MVT::ppcf128); 7983 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 7984 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7985 MVT::f64, N->getOperand(0), 7986 DAG.getIntPtrConstant(0, dl)); 7987 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7988 MVT::f64, N->getOperand(0), 7989 DAG.getIntPtrConstant(1, dl)); 7990 7991 // Add the two halves of the long double in round-to-zero mode. 7992 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7993 7994 // We know the low half is about to be thrown away, so just use something 7995 // convenient. 7996 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 7997 FPreg, FPreg)); 7998 return; 7999 } 8000 case ISD::FP_TO_SINT: 8001 case ISD::FP_TO_UINT: 8002 // LowerFP_TO_INT() can only handle f32 and f64. 8003 if (N->getOperand(0).getValueType() == MVT::ppcf128) 8004 return; 8005 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 8006 return; 8007 } 8008 } 8009 8010 8011 //===----------------------------------------------------------------------===// 8012 // Other Lowering Code 8013 //===----------------------------------------------------------------------===// 8014 8015 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 8016 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 8017 Function *Func = Intrinsic::getDeclaration(M, Id); 8018 return Builder.CreateCall(Func, {}); 8019 } 8020 8021 // The mappings for emitLeading/TrailingFence is taken from 8022 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 8023 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 8024 AtomicOrdering Ord, bool IsStore, 8025 bool IsLoad) const { 8026 if (Ord == SequentiallyConsistent) 8027 return callIntrinsic(Builder, Intrinsic::ppc_sync); 8028 if (isAtLeastRelease(Ord)) 8029 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8030 return nullptr; 8031 } 8032 8033 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 8034 AtomicOrdering Ord, bool IsStore, 8035 bool IsLoad) const { 8036 if (IsLoad && isAtLeastAcquire(Ord)) 8037 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8038 // FIXME: this is too conservative, a dependent branch + isync is enough. 8039 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 8040 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 8041 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 8042 return nullptr; 8043 } 8044 8045 MachineBasicBlock * 8046 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 8047 unsigned AtomicSize, 8048 unsigned BinOpcode) const { 8049 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8050 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8051 8052 auto LoadMnemonic = PPC::LDARX; 8053 auto StoreMnemonic = PPC::STDCX; 8054 switch (AtomicSize) { 8055 default: 8056 llvm_unreachable("Unexpected size of atomic entity"); 8057 case 1: 8058 LoadMnemonic = PPC::LBARX; 8059 StoreMnemonic = PPC::STBCX; 8060 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8061 break; 8062 case 2: 8063 LoadMnemonic = PPC::LHARX; 8064 StoreMnemonic = PPC::STHCX; 8065 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8066 break; 8067 case 4: 8068 LoadMnemonic = PPC::LWARX; 8069 StoreMnemonic = PPC::STWCX; 8070 break; 8071 case 8: 8072 LoadMnemonic = PPC::LDARX; 8073 StoreMnemonic = PPC::STDCX; 8074 break; 8075 } 8076 8077 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8078 MachineFunction *F = BB->getParent(); 8079 MachineFunction::iterator It = BB; 8080 ++It; 8081 8082 unsigned dest = MI->getOperand(0).getReg(); 8083 unsigned ptrA = MI->getOperand(1).getReg(); 8084 unsigned ptrB = MI->getOperand(2).getReg(); 8085 unsigned incr = MI->getOperand(3).getReg(); 8086 DebugLoc dl = MI->getDebugLoc(); 8087 8088 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8089 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8090 F->insert(It, loopMBB); 8091 F->insert(It, exitMBB); 8092 exitMBB->splice(exitMBB->begin(), BB, 8093 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8094 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8095 8096 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8097 unsigned TmpReg = (!BinOpcode) ? incr : 8098 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 8099 : &PPC::GPRCRegClass); 8100 8101 // thisMBB: 8102 // ... 8103 // fallthrough --> loopMBB 8104 BB->addSuccessor(loopMBB); 8105 8106 // loopMBB: 8107 // l[wd]arx dest, ptr 8108 // add r0, dest, incr 8109 // st[wd]cx. r0, ptr 8110 // bne- loopMBB 8111 // fallthrough --> exitMBB 8112 BB = loopMBB; 8113 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8114 .addReg(ptrA).addReg(ptrB); 8115 if (BinOpcode) 8116 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 8117 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8118 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 8119 BuildMI(BB, dl, TII->get(PPC::BCC)) 8120 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8121 BB->addSuccessor(loopMBB); 8122 BB->addSuccessor(exitMBB); 8123 8124 // exitMBB: 8125 // ... 8126 BB = exitMBB; 8127 return BB; 8128 } 8129 8130 MachineBasicBlock * 8131 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 8132 MachineBasicBlock *BB, 8133 bool is8bit, // operation 8134 unsigned BinOpcode) const { 8135 // If we support part-word atomic mnemonics, just use them 8136 if (Subtarget.hasPartwordAtomics()) 8137 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode); 8138 8139 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8140 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8141 // In 64 bit mode we have to use 64 bits for addresses, even though the 8142 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 8143 // registers without caring whether they're 32 or 64, but here we're 8144 // doing actual arithmetic on the addresses. 8145 bool is64bit = Subtarget.isPPC64(); 8146 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8147 8148 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8149 MachineFunction *F = BB->getParent(); 8150 MachineFunction::iterator It = BB; 8151 ++It; 8152 8153 unsigned dest = MI->getOperand(0).getReg(); 8154 unsigned ptrA = MI->getOperand(1).getReg(); 8155 unsigned ptrB = MI->getOperand(2).getReg(); 8156 unsigned incr = MI->getOperand(3).getReg(); 8157 DebugLoc dl = MI->getDebugLoc(); 8158 8159 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8160 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8161 F->insert(It, loopMBB); 8162 F->insert(It, exitMBB); 8163 exitMBB->splice(exitMBB->begin(), BB, 8164 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8165 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8166 8167 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8168 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8169 : &PPC::GPRCRegClass; 8170 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8171 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8172 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8173 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 8174 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8175 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8176 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8177 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8178 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 8179 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8180 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8181 unsigned Ptr1Reg; 8182 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 8183 8184 // thisMBB: 8185 // ... 8186 // fallthrough --> loopMBB 8187 BB->addSuccessor(loopMBB); 8188 8189 // The 4-byte load must be aligned, while a char or short may be 8190 // anywhere in the word. Hence all this nasty bookkeeping code. 8191 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8192 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8193 // xori shift, shift1, 24 [16] 8194 // rlwinm ptr, ptr1, 0, 0, 29 8195 // slw incr2, incr, shift 8196 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8197 // slw mask, mask2, shift 8198 // loopMBB: 8199 // lwarx tmpDest, ptr 8200 // add tmp, tmpDest, incr2 8201 // andc tmp2, tmpDest, mask 8202 // and tmp3, tmp, mask 8203 // or tmp4, tmp3, tmp2 8204 // stwcx. tmp4, ptr 8205 // bne- loopMBB 8206 // fallthrough --> exitMBB 8207 // srw dest, tmpDest, shift 8208 if (ptrA != ZeroReg) { 8209 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8210 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8211 .addReg(ptrA).addReg(ptrB); 8212 } else { 8213 Ptr1Reg = ptrB; 8214 } 8215 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8216 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8217 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8218 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8219 if (is64bit) 8220 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8221 .addReg(Ptr1Reg).addImm(0).addImm(61); 8222 else 8223 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8224 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8225 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 8226 .addReg(incr).addReg(ShiftReg); 8227 if (is8bit) 8228 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8229 else { 8230 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8231 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 8232 } 8233 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8234 .addReg(Mask2Reg).addReg(ShiftReg); 8235 8236 BB = loopMBB; 8237 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8238 .addReg(ZeroReg).addReg(PtrReg); 8239 if (BinOpcode) 8240 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 8241 .addReg(Incr2Reg).addReg(TmpDestReg); 8242 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 8243 .addReg(TmpDestReg).addReg(MaskReg); 8244 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 8245 .addReg(TmpReg).addReg(MaskReg); 8246 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 8247 .addReg(Tmp3Reg).addReg(Tmp2Reg); 8248 BuildMI(BB, dl, TII->get(PPC::STWCX)) 8249 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 8250 BuildMI(BB, dl, TII->get(PPC::BCC)) 8251 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8252 BB->addSuccessor(loopMBB); 8253 BB->addSuccessor(exitMBB); 8254 8255 // exitMBB: 8256 // ... 8257 BB = exitMBB; 8258 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 8259 .addReg(ShiftReg); 8260 return BB; 8261 } 8262 8263 llvm::MachineBasicBlock* 8264 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 8265 MachineBasicBlock *MBB) const { 8266 DebugLoc DL = MI->getDebugLoc(); 8267 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8268 8269 MachineFunction *MF = MBB->getParent(); 8270 MachineRegisterInfo &MRI = MF->getRegInfo(); 8271 8272 const BasicBlock *BB = MBB->getBasicBlock(); 8273 MachineFunction::iterator I = MBB; 8274 ++I; 8275 8276 // Memory Reference 8277 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8278 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8279 8280 unsigned DstReg = MI->getOperand(0).getReg(); 8281 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 8282 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 8283 unsigned mainDstReg = MRI.createVirtualRegister(RC); 8284 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 8285 8286 MVT PVT = getPointerTy(MF->getDataLayout()); 8287 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8288 "Invalid Pointer Size!"); 8289 // For v = setjmp(buf), we generate 8290 // 8291 // thisMBB: 8292 // SjLjSetup mainMBB 8293 // bl mainMBB 8294 // v_restore = 1 8295 // b sinkMBB 8296 // 8297 // mainMBB: 8298 // buf[LabelOffset] = LR 8299 // v_main = 0 8300 // 8301 // sinkMBB: 8302 // v = phi(main, restore) 8303 // 8304 8305 MachineBasicBlock *thisMBB = MBB; 8306 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 8307 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 8308 MF->insert(I, mainMBB); 8309 MF->insert(I, sinkMBB); 8310 8311 MachineInstrBuilder MIB; 8312 8313 // Transfer the remainder of BB and its successor edges to sinkMBB. 8314 sinkMBB->splice(sinkMBB->begin(), MBB, 8315 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8316 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 8317 8318 // Note that the structure of the jmp_buf used here is not compatible 8319 // with that used by libc, and is not designed to be. Specifically, it 8320 // stores only those 'reserved' registers that LLVM does not otherwise 8321 // understand how to spill. Also, by convention, by the time this 8322 // intrinsic is called, Clang has already stored the frame address in the 8323 // first slot of the buffer and stack address in the third. Following the 8324 // X86 target code, we'll store the jump address in the second slot. We also 8325 // need to save the TOC pointer (R2) to handle jumps between shared 8326 // libraries, and that will be stored in the fourth slot. The thread 8327 // identifier (R13) is not affected. 8328 8329 // thisMBB: 8330 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8331 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8332 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8333 8334 // Prepare IP either in reg. 8335 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 8336 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 8337 unsigned BufReg = MI->getOperand(1).getReg(); 8338 8339 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 8340 setUsesTOCBasePtr(*MBB->getParent()); 8341 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 8342 .addReg(PPC::X2) 8343 .addImm(TOCOffset) 8344 .addReg(BufReg); 8345 MIB.setMemRefs(MMOBegin, MMOEnd); 8346 } 8347 8348 // Naked functions never have a base pointer, and so we use r1. For all 8349 // other functions, this decision must be delayed until during PEI. 8350 unsigned BaseReg; 8351 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 8352 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 8353 else 8354 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 8355 8356 MIB = BuildMI(*thisMBB, MI, DL, 8357 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 8358 .addReg(BaseReg) 8359 .addImm(BPOffset) 8360 .addReg(BufReg); 8361 MIB.setMemRefs(MMOBegin, MMOEnd); 8362 8363 // Setup 8364 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 8365 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 8366 MIB.addRegMask(TRI->getNoPreservedMask()); 8367 8368 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 8369 8370 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 8371 .addMBB(mainMBB); 8372 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 8373 8374 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 8375 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 8376 8377 // mainMBB: 8378 // mainDstReg = 0 8379 MIB = 8380 BuildMI(mainMBB, DL, 8381 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 8382 8383 // Store IP 8384 if (Subtarget.isPPC64()) { 8385 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 8386 .addReg(LabelReg) 8387 .addImm(LabelOffset) 8388 .addReg(BufReg); 8389 } else { 8390 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 8391 .addReg(LabelReg) 8392 .addImm(LabelOffset) 8393 .addReg(BufReg); 8394 } 8395 8396 MIB.setMemRefs(MMOBegin, MMOEnd); 8397 8398 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 8399 mainMBB->addSuccessor(sinkMBB); 8400 8401 // sinkMBB: 8402 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 8403 TII->get(PPC::PHI), DstReg) 8404 .addReg(mainDstReg).addMBB(mainMBB) 8405 .addReg(restoreDstReg).addMBB(thisMBB); 8406 8407 MI->eraseFromParent(); 8408 return sinkMBB; 8409 } 8410 8411 MachineBasicBlock * 8412 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 8413 MachineBasicBlock *MBB) const { 8414 DebugLoc DL = MI->getDebugLoc(); 8415 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8416 8417 MachineFunction *MF = MBB->getParent(); 8418 MachineRegisterInfo &MRI = MF->getRegInfo(); 8419 8420 // Memory Reference 8421 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8422 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8423 8424 MVT PVT = getPointerTy(MF->getDataLayout()); 8425 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8426 "Invalid Pointer Size!"); 8427 8428 const TargetRegisterClass *RC = 8429 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 8430 unsigned Tmp = MRI.createVirtualRegister(RC); 8431 // Since FP is only updated here but NOT referenced, it's treated as GPR. 8432 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 8433 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 8434 unsigned BP = 8435 (PVT == MVT::i64) 8436 ? PPC::X30 8437 : (Subtarget.isSVR4ABI() && 8438 MF->getTarget().getRelocationModel() == Reloc::PIC_ 8439 ? PPC::R29 8440 : PPC::R30); 8441 8442 MachineInstrBuilder MIB; 8443 8444 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8445 const int64_t SPOffset = 2 * PVT.getStoreSize(); 8446 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8447 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8448 8449 unsigned BufReg = MI->getOperand(0).getReg(); 8450 8451 // Reload FP (the jumped-to function may not have had a 8452 // frame pointer, and if so, then its r31 will be restored 8453 // as necessary). 8454 if (PVT == MVT::i64) { 8455 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 8456 .addImm(0) 8457 .addReg(BufReg); 8458 } else { 8459 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 8460 .addImm(0) 8461 .addReg(BufReg); 8462 } 8463 MIB.setMemRefs(MMOBegin, MMOEnd); 8464 8465 // Reload IP 8466 if (PVT == MVT::i64) { 8467 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 8468 .addImm(LabelOffset) 8469 .addReg(BufReg); 8470 } else { 8471 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 8472 .addImm(LabelOffset) 8473 .addReg(BufReg); 8474 } 8475 MIB.setMemRefs(MMOBegin, MMOEnd); 8476 8477 // Reload SP 8478 if (PVT == MVT::i64) { 8479 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 8480 .addImm(SPOffset) 8481 .addReg(BufReg); 8482 } else { 8483 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 8484 .addImm(SPOffset) 8485 .addReg(BufReg); 8486 } 8487 MIB.setMemRefs(MMOBegin, MMOEnd); 8488 8489 // Reload BP 8490 if (PVT == MVT::i64) { 8491 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 8492 .addImm(BPOffset) 8493 .addReg(BufReg); 8494 } else { 8495 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 8496 .addImm(BPOffset) 8497 .addReg(BufReg); 8498 } 8499 MIB.setMemRefs(MMOBegin, MMOEnd); 8500 8501 // Reload TOC 8502 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 8503 setUsesTOCBasePtr(*MBB->getParent()); 8504 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 8505 .addImm(TOCOffset) 8506 .addReg(BufReg); 8507 8508 MIB.setMemRefs(MMOBegin, MMOEnd); 8509 } 8510 8511 // Jump 8512 BuildMI(*MBB, MI, DL, 8513 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 8514 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 8515 8516 MI->eraseFromParent(); 8517 return MBB; 8518 } 8519 8520 MachineBasicBlock * 8521 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 8522 MachineBasicBlock *BB) const { 8523 if (MI->getOpcode() == TargetOpcode::STACKMAP || 8524 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8525 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 8526 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8527 // Call lowering should have added an r2 operand to indicate a dependence 8528 // on the TOC base pointer value. It can't however, because there is no 8529 // way to mark the dependence as implicit there, and so the stackmap code 8530 // will confuse it with a regular operand. Instead, add the dependence 8531 // here. 8532 setUsesTOCBasePtr(*BB->getParent()); 8533 MI->addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 8534 } 8535 8536 return emitPatchPoint(MI, BB); 8537 } 8538 8539 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 8540 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 8541 return emitEHSjLjSetJmp(MI, BB); 8542 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 8543 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 8544 return emitEHSjLjLongJmp(MI, BB); 8545 } 8546 8547 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8548 8549 // To "insert" these instructions we actually have to insert their 8550 // control-flow patterns. 8551 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8552 MachineFunction::iterator It = BB; 8553 ++It; 8554 8555 MachineFunction *F = BB->getParent(); 8556 8557 if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 8558 MI->getOpcode() == PPC::SELECT_CC_I8 || 8559 MI->getOpcode() == PPC::SELECT_I4 || 8560 MI->getOpcode() == PPC::SELECT_I8)) { 8561 SmallVector<MachineOperand, 2> Cond; 8562 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8563 MI->getOpcode() == PPC::SELECT_CC_I8) 8564 Cond.push_back(MI->getOperand(4)); 8565 else 8566 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 8567 Cond.push_back(MI->getOperand(1)); 8568 8569 DebugLoc dl = MI->getDebugLoc(); 8570 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 8571 Cond, MI->getOperand(2).getReg(), 8572 MI->getOperand(3).getReg()); 8573 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8574 MI->getOpcode() == PPC::SELECT_CC_I8 || 8575 MI->getOpcode() == PPC::SELECT_CC_F4 || 8576 MI->getOpcode() == PPC::SELECT_CC_F8 || 8577 MI->getOpcode() == PPC::SELECT_CC_QFRC || 8578 MI->getOpcode() == PPC::SELECT_CC_QSRC || 8579 MI->getOpcode() == PPC::SELECT_CC_QBRC || 8580 MI->getOpcode() == PPC::SELECT_CC_VRRC || 8581 MI->getOpcode() == PPC::SELECT_CC_VSFRC || 8582 MI->getOpcode() == PPC::SELECT_CC_VSSRC || 8583 MI->getOpcode() == PPC::SELECT_CC_VSRC || 8584 MI->getOpcode() == PPC::SELECT_I4 || 8585 MI->getOpcode() == PPC::SELECT_I8 || 8586 MI->getOpcode() == PPC::SELECT_F4 || 8587 MI->getOpcode() == PPC::SELECT_F8 || 8588 MI->getOpcode() == PPC::SELECT_QFRC || 8589 MI->getOpcode() == PPC::SELECT_QSRC || 8590 MI->getOpcode() == PPC::SELECT_QBRC || 8591 MI->getOpcode() == PPC::SELECT_VRRC || 8592 MI->getOpcode() == PPC::SELECT_VSFRC || 8593 MI->getOpcode() == PPC::SELECT_VSSRC || 8594 MI->getOpcode() == PPC::SELECT_VSRC) { 8595 // The incoming instruction knows the destination vreg to set, the 8596 // condition code register to branch on, the true/false values to 8597 // select between, and a branch opcode to use. 8598 8599 // thisMBB: 8600 // ... 8601 // TrueVal = ... 8602 // cmpTY ccX, r1, r2 8603 // bCC copy1MBB 8604 // fallthrough --> copy0MBB 8605 MachineBasicBlock *thisMBB = BB; 8606 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8607 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8608 DebugLoc dl = MI->getDebugLoc(); 8609 F->insert(It, copy0MBB); 8610 F->insert(It, sinkMBB); 8611 8612 // Transfer the remainder of BB and its successor edges to sinkMBB. 8613 sinkMBB->splice(sinkMBB->begin(), BB, 8614 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8615 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8616 8617 // Next, add the true and fallthrough blocks as its successors. 8618 BB->addSuccessor(copy0MBB); 8619 BB->addSuccessor(sinkMBB); 8620 8621 if (MI->getOpcode() == PPC::SELECT_I4 || 8622 MI->getOpcode() == PPC::SELECT_I8 || 8623 MI->getOpcode() == PPC::SELECT_F4 || 8624 MI->getOpcode() == PPC::SELECT_F8 || 8625 MI->getOpcode() == PPC::SELECT_QFRC || 8626 MI->getOpcode() == PPC::SELECT_QSRC || 8627 MI->getOpcode() == PPC::SELECT_QBRC || 8628 MI->getOpcode() == PPC::SELECT_VRRC || 8629 MI->getOpcode() == PPC::SELECT_VSFRC || 8630 MI->getOpcode() == PPC::SELECT_VSSRC || 8631 MI->getOpcode() == PPC::SELECT_VSRC) { 8632 BuildMI(BB, dl, TII->get(PPC::BC)) 8633 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8634 } else { 8635 unsigned SelectPred = MI->getOperand(4).getImm(); 8636 BuildMI(BB, dl, TII->get(PPC::BCC)) 8637 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8638 } 8639 8640 // copy0MBB: 8641 // %FalseValue = ... 8642 // # fallthrough to sinkMBB 8643 BB = copy0MBB; 8644 8645 // Update machine-CFG edges 8646 BB->addSuccessor(sinkMBB); 8647 8648 // sinkMBB: 8649 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8650 // ... 8651 BB = sinkMBB; 8652 BuildMI(*BB, BB->begin(), dl, 8653 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 8654 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 8655 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 8656 } else if (MI->getOpcode() == PPC::ReadTB) { 8657 // To read the 64-bit time-base register on a 32-bit target, we read the 8658 // two halves. Should the counter have wrapped while it was being read, we 8659 // need to try again. 8660 // ... 8661 // readLoop: 8662 // mfspr Rx,TBU # load from TBU 8663 // mfspr Ry,TB # load from TB 8664 // mfspr Rz,TBU # load from TBU 8665 // cmpw crX,Rx,Rz # check if ‘old’=’new’ 8666 // bne readLoop # branch if they're not equal 8667 // ... 8668 8669 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 8670 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8671 DebugLoc dl = MI->getDebugLoc(); 8672 F->insert(It, readMBB); 8673 F->insert(It, sinkMBB); 8674 8675 // Transfer the remainder of BB and its successor edges to sinkMBB. 8676 sinkMBB->splice(sinkMBB->begin(), BB, 8677 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8678 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8679 8680 BB->addSuccessor(readMBB); 8681 BB = readMBB; 8682 8683 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8684 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 8685 unsigned LoReg = MI->getOperand(0).getReg(); 8686 unsigned HiReg = MI->getOperand(1).getReg(); 8687 8688 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 8689 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 8690 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 8691 8692 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 8693 8694 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 8695 .addReg(HiReg).addReg(ReadAgainReg); 8696 BuildMI(BB, dl, TII->get(PPC::BCC)) 8697 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 8698 8699 BB->addSuccessor(readMBB); 8700 BB->addSuccessor(sinkMBB); 8701 } 8702 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 8703 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 8704 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 8705 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 8706 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 8707 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 8708 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 8709 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 8710 8711 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 8712 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 8713 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 8714 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 8715 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 8716 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 8717 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 8718 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 8719 8720 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 8721 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 8722 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 8723 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 8724 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 8725 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 8726 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 8727 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 8728 8729 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 8730 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 8731 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 8732 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 8733 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 8734 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 8735 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 8736 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 8737 8738 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 8739 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 8740 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 8741 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 8742 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 8743 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 8744 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 8745 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 8746 8747 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 8748 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 8749 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 8750 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 8751 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 8752 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 8753 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 8754 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 8755 8756 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 8757 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 8758 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 8759 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 8760 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 8761 BB = EmitAtomicBinary(MI, BB, 4, 0); 8762 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 8763 BB = EmitAtomicBinary(MI, BB, 8, 0); 8764 8765 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 8766 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 8767 (Subtarget.hasPartwordAtomics() && 8768 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 8769 (Subtarget.hasPartwordAtomics() && 8770 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 8771 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 8772 8773 auto LoadMnemonic = PPC::LDARX; 8774 auto StoreMnemonic = PPC::STDCX; 8775 switch(MI->getOpcode()) { 8776 default: 8777 llvm_unreachable("Compare and swap of unknown size"); 8778 case PPC::ATOMIC_CMP_SWAP_I8: 8779 LoadMnemonic = PPC::LBARX; 8780 StoreMnemonic = PPC::STBCX; 8781 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 8782 break; 8783 case PPC::ATOMIC_CMP_SWAP_I16: 8784 LoadMnemonic = PPC::LHARX; 8785 StoreMnemonic = PPC::STHCX; 8786 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 8787 break; 8788 case PPC::ATOMIC_CMP_SWAP_I32: 8789 LoadMnemonic = PPC::LWARX; 8790 StoreMnemonic = PPC::STWCX; 8791 break; 8792 case PPC::ATOMIC_CMP_SWAP_I64: 8793 LoadMnemonic = PPC::LDARX; 8794 StoreMnemonic = PPC::STDCX; 8795 break; 8796 } 8797 unsigned dest = MI->getOperand(0).getReg(); 8798 unsigned ptrA = MI->getOperand(1).getReg(); 8799 unsigned ptrB = MI->getOperand(2).getReg(); 8800 unsigned oldval = MI->getOperand(3).getReg(); 8801 unsigned newval = MI->getOperand(4).getReg(); 8802 DebugLoc dl = MI->getDebugLoc(); 8803 8804 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 8805 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 8806 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 8807 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8808 F->insert(It, loop1MBB); 8809 F->insert(It, loop2MBB); 8810 F->insert(It, midMBB); 8811 F->insert(It, exitMBB); 8812 exitMBB->splice(exitMBB->begin(), BB, 8813 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8814 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8815 8816 // thisMBB: 8817 // ... 8818 // fallthrough --> loopMBB 8819 BB->addSuccessor(loop1MBB); 8820 8821 // loop1MBB: 8822 // l[bhwd]arx dest, ptr 8823 // cmp[wd] dest, oldval 8824 // bne- midMBB 8825 // loop2MBB: 8826 // st[bhwd]cx. newval, ptr 8827 // bne- loopMBB 8828 // b exitBB 8829 // midMBB: 8830 // st[bhwd]cx. dest, ptr 8831 // exitBB: 8832 BB = loop1MBB; 8833 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8834 .addReg(ptrA).addReg(ptrB); 8835 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 8836 .addReg(oldval).addReg(dest); 8837 BuildMI(BB, dl, TII->get(PPC::BCC)) 8838 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 8839 BB->addSuccessor(loop2MBB); 8840 BB->addSuccessor(midMBB); 8841 8842 BB = loop2MBB; 8843 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8844 .addReg(newval).addReg(ptrA).addReg(ptrB); 8845 BuildMI(BB, dl, TII->get(PPC::BCC)) 8846 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 8847 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 8848 BB->addSuccessor(loop1MBB); 8849 BB->addSuccessor(exitMBB); 8850 8851 BB = midMBB; 8852 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8853 .addReg(dest).addReg(ptrA).addReg(ptrB); 8854 BB->addSuccessor(exitMBB); 8855 8856 // exitMBB: 8857 // ... 8858 BB = exitMBB; 8859 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 8860 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 8861 // We must use 64-bit registers for addresses when targeting 64-bit, 8862 // since we're actually doing arithmetic on them. Other registers 8863 // can be 32-bit. 8864 bool is64bit = Subtarget.isPPC64(); 8865 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 8866 8867 unsigned dest = MI->getOperand(0).getReg(); 8868 unsigned ptrA = MI->getOperand(1).getReg(); 8869 unsigned ptrB = MI->getOperand(2).getReg(); 8870 unsigned oldval = MI->getOperand(3).getReg(); 8871 unsigned newval = MI->getOperand(4).getReg(); 8872 DebugLoc dl = MI->getDebugLoc(); 8873 8874 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 8875 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 8876 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 8877 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8878 F->insert(It, loop1MBB); 8879 F->insert(It, loop2MBB); 8880 F->insert(It, midMBB); 8881 F->insert(It, exitMBB); 8882 exitMBB->splice(exitMBB->begin(), BB, 8883 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8884 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8885 8886 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8887 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8888 : &PPC::GPRCRegClass; 8889 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8890 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8891 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8892 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 8893 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 8894 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 8895 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 8896 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8897 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8898 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8899 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8900 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8901 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8902 unsigned Ptr1Reg; 8903 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 8904 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8905 // thisMBB: 8906 // ... 8907 // fallthrough --> loopMBB 8908 BB->addSuccessor(loop1MBB); 8909 8910 // The 4-byte load must be aligned, while a char or short may be 8911 // anywhere in the word. Hence all this nasty bookkeeping code. 8912 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8913 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8914 // xori shift, shift1, 24 [16] 8915 // rlwinm ptr, ptr1, 0, 0, 29 8916 // slw newval2, newval, shift 8917 // slw oldval2, oldval,shift 8918 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8919 // slw mask, mask2, shift 8920 // and newval3, newval2, mask 8921 // and oldval3, oldval2, mask 8922 // loop1MBB: 8923 // lwarx tmpDest, ptr 8924 // and tmp, tmpDest, mask 8925 // cmpw tmp, oldval3 8926 // bne- midMBB 8927 // loop2MBB: 8928 // andc tmp2, tmpDest, mask 8929 // or tmp4, tmp2, newval3 8930 // stwcx. tmp4, ptr 8931 // bne- loop1MBB 8932 // b exitBB 8933 // midMBB: 8934 // stwcx. tmpDest, ptr 8935 // exitBB: 8936 // srw dest, tmpDest, shift 8937 if (ptrA != ZeroReg) { 8938 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8939 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8940 .addReg(ptrA).addReg(ptrB); 8941 } else { 8942 Ptr1Reg = ptrB; 8943 } 8944 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8945 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8946 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8947 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8948 if (is64bit) 8949 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8950 .addReg(Ptr1Reg).addImm(0).addImm(61); 8951 else 8952 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8953 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8954 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 8955 .addReg(newval).addReg(ShiftReg); 8956 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 8957 .addReg(oldval).addReg(ShiftReg); 8958 if (is8bit) 8959 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8960 else { 8961 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8962 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 8963 .addReg(Mask3Reg).addImm(65535); 8964 } 8965 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8966 .addReg(Mask2Reg).addReg(ShiftReg); 8967 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 8968 .addReg(NewVal2Reg).addReg(MaskReg); 8969 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 8970 .addReg(OldVal2Reg).addReg(MaskReg); 8971 8972 BB = loop1MBB; 8973 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8974 .addReg(ZeroReg).addReg(PtrReg); 8975 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 8976 .addReg(TmpDestReg).addReg(MaskReg); 8977 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 8978 .addReg(TmpReg).addReg(OldVal3Reg); 8979 BuildMI(BB, dl, TII->get(PPC::BCC)) 8980 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 8981 BB->addSuccessor(loop2MBB); 8982 BB->addSuccessor(midMBB); 8983 8984 BB = loop2MBB; 8985 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 8986 .addReg(TmpDestReg).addReg(MaskReg); 8987 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 8988 .addReg(Tmp2Reg).addReg(NewVal3Reg); 8989 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 8990 .addReg(ZeroReg).addReg(PtrReg); 8991 BuildMI(BB, dl, TII->get(PPC::BCC)) 8992 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 8993 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 8994 BB->addSuccessor(loop1MBB); 8995 BB->addSuccessor(exitMBB); 8996 8997 BB = midMBB; 8998 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 8999 .addReg(ZeroReg).addReg(PtrReg); 9000 BB->addSuccessor(exitMBB); 9001 9002 // exitMBB: 9003 // ... 9004 BB = exitMBB; 9005 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 9006 .addReg(ShiftReg); 9007 } else if (MI->getOpcode() == PPC::FADDrtz) { 9008 // This pseudo performs an FADD with rounding mode temporarily forced 9009 // to round-to-zero. We emit this via custom inserter since the FPSCR 9010 // is not modeled at the SelectionDAG level. 9011 unsigned Dest = MI->getOperand(0).getReg(); 9012 unsigned Src1 = MI->getOperand(1).getReg(); 9013 unsigned Src2 = MI->getOperand(2).getReg(); 9014 DebugLoc dl = MI->getDebugLoc(); 9015 9016 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9017 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 9018 9019 // Save FPSCR value. 9020 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 9021 9022 // Set rounding mode to round-to-zero. 9023 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 9024 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 9025 9026 // Perform addition. 9027 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 9028 9029 // Restore FPSCR value. 9030 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 9031 } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9032 MI->getOpcode() == PPC::ANDIo_1_GT_BIT || 9033 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9034 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) { 9035 unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9036 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ? 9037 PPC::ANDIo8 : PPC::ANDIo; 9038 bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9039 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8); 9040 9041 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9042 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 9043 &PPC::GPRCRegClass : 9044 &PPC::G8RCRegClass); 9045 9046 DebugLoc dl = MI->getDebugLoc(); 9047 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 9048 .addReg(MI->getOperand(1).getReg()).addImm(1); 9049 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 9050 MI->getOperand(0).getReg()) 9051 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 9052 } else if (MI->getOpcode() == PPC::TCHECK_RET) { 9053 DebugLoc Dl = MI->getDebugLoc(); 9054 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9055 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9056 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 9057 return BB; 9058 } else { 9059 llvm_unreachable("Unexpected instr type to insert"); 9060 } 9061 9062 MI->eraseFromParent(); // The pseudo instruction is gone now. 9063 return BB; 9064 } 9065 9066 //===----------------------------------------------------------------------===// 9067 // Target Optimization Hooks 9068 //===----------------------------------------------------------------------===// 9069 9070 static std::string getRecipOp(const char *Base, EVT VT) { 9071 std::string RecipOp(Base); 9072 if (VT.getScalarType() == MVT::f64) 9073 RecipOp += "d"; 9074 else 9075 RecipOp += "f"; 9076 9077 if (VT.isVector()) 9078 RecipOp = "vec-" + RecipOp; 9079 9080 return RecipOp; 9081 } 9082 9083 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand, 9084 DAGCombinerInfo &DCI, 9085 unsigned &RefinementSteps, 9086 bool &UseOneConstNR) const { 9087 EVT VT = Operand.getValueType(); 9088 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 9089 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 9090 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9091 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9092 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9093 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9094 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9095 std::string RecipOp = getRecipOp("sqrt", VT); 9096 if (!Recips.isEnabled(RecipOp)) 9097 return SDValue(); 9098 9099 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9100 UseOneConstNR = true; 9101 return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 9102 } 9103 return SDValue(); 9104 } 9105 9106 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, 9107 DAGCombinerInfo &DCI, 9108 unsigned &RefinementSteps) const { 9109 EVT VT = Operand.getValueType(); 9110 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 9111 (VT == MVT::f64 && Subtarget.hasFRE()) || 9112 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9113 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9114 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9115 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9116 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9117 std::string RecipOp = getRecipOp("div", VT); 9118 if (!Recips.isEnabled(RecipOp)) 9119 return SDValue(); 9120 9121 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9122 return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 9123 } 9124 return SDValue(); 9125 } 9126 9127 bool PPCTargetLowering::combineRepeatedFPDivisors(unsigned NumUsers) const { 9128 // Note: This functionality is used only when unsafe-fp-math is enabled, and 9129 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 9130 // enabled for division), this functionality is redundant with the default 9131 // combiner logic (once the division -> reciprocal/multiply transformation 9132 // has taken place). As a result, this matters more for older cores than for 9133 // newer ones. 9134 9135 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9136 // reciprocal if there are two or more FDIVs (for embedded cores with only 9137 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 9138 switch (Subtarget.getDarwinDirective()) { 9139 default: 9140 return NumUsers > 2; 9141 case PPC::DIR_440: 9142 case PPC::DIR_A2: 9143 case PPC::DIR_E500mc: 9144 case PPC::DIR_E5500: 9145 return NumUsers > 1; 9146 } 9147 } 9148 9149 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 9150 unsigned Bytes, int Dist, 9151 SelectionDAG &DAG) { 9152 if (VT.getSizeInBits() / 8 != Bytes) 9153 return false; 9154 9155 SDValue BaseLoc = Base->getBasePtr(); 9156 if (Loc.getOpcode() == ISD::FrameIndex) { 9157 if (BaseLoc.getOpcode() != ISD::FrameIndex) 9158 return false; 9159 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9160 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 9161 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 9162 int FS = MFI->getObjectSize(FI); 9163 int BFS = MFI->getObjectSize(BFI); 9164 if (FS != BFS || FS != (int)Bytes) return false; 9165 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 9166 } 9167 9168 // Handle X+C 9169 if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 9170 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 9171 return true; 9172 9173 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9174 const GlobalValue *GV1 = nullptr; 9175 const GlobalValue *GV2 = nullptr; 9176 int64_t Offset1 = 0; 9177 int64_t Offset2 = 0; 9178 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 9179 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 9180 if (isGA1 && isGA2 && GV1 == GV2) 9181 return Offset1 == (Offset2 + Dist*Bytes); 9182 return false; 9183 } 9184 9185 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 9186 // not enforce equality of the chain operands. 9187 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 9188 unsigned Bytes, int Dist, 9189 SelectionDAG &DAG) { 9190 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 9191 EVT VT = LS->getMemoryVT(); 9192 SDValue Loc = LS->getBasePtr(); 9193 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 9194 } 9195 9196 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 9197 EVT VT; 9198 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9199 default: return false; 9200 case Intrinsic::ppc_qpx_qvlfd: 9201 case Intrinsic::ppc_qpx_qvlfda: 9202 VT = MVT::v4f64; 9203 break; 9204 case Intrinsic::ppc_qpx_qvlfs: 9205 case Intrinsic::ppc_qpx_qvlfsa: 9206 VT = MVT::v4f32; 9207 break; 9208 case Intrinsic::ppc_qpx_qvlfcd: 9209 case Intrinsic::ppc_qpx_qvlfcda: 9210 VT = MVT::v2f64; 9211 break; 9212 case Intrinsic::ppc_qpx_qvlfcs: 9213 case Intrinsic::ppc_qpx_qvlfcsa: 9214 VT = MVT::v2f32; 9215 break; 9216 case Intrinsic::ppc_qpx_qvlfiwa: 9217 case Intrinsic::ppc_qpx_qvlfiwz: 9218 case Intrinsic::ppc_altivec_lvx: 9219 case Intrinsic::ppc_altivec_lvxl: 9220 case Intrinsic::ppc_vsx_lxvw4x: 9221 VT = MVT::v4i32; 9222 break; 9223 case Intrinsic::ppc_vsx_lxvd2x: 9224 VT = MVT::v2f64; 9225 break; 9226 case Intrinsic::ppc_altivec_lvebx: 9227 VT = MVT::i8; 9228 break; 9229 case Intrinsic::ppc_altivec_lvehx: 9230 VT = MVT::i16; 9231 break; 9232 case Intrinsic::ppc_altivec_lvewx: 9233 VT = MVT::i32; 9234 break; 9235 } 9236 9237 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 9238 } 9239 9240 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 9241 EVT VT; 9242 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9243 default: return false; 9244 case Intrinsic::ppc_qpx_qvstfd: 9245 case Intrinsic::ppc_qpx_qvstfda: 9246 VT = MVT::v4f64; 9247 break; 9248 case Intrinsic::ppc_qpx_qvstfs: 9249 case Intrinsic::ppc_qpx_qvstfsa: 9250 VT = MVT::v4f32; 9251 break; 9252 case Intrinsic::ppc_qpx_qvstfcd: 9253 case Intrinsic::ppc_qpx_qvstfcda: 9254 VT = MVT::v2f64; 9255 break; 9256 case Intrinsic::ppc_qpx_qvstfcs: 9257 case Intrinsic::ppc_qpx_qvstfcsa: 9258 VT = MVT::v2f32; 9259 break; 9260 case Intrinsic::ppc_qpx_qvstfiw: 9261 case Intrinsic::ppc_qpx_qvstfiwa: 9262 case Intrinsic::ppc_altivec_stvx: 9263 case Intrinsic::ppc_altivec_stvxl: 9264 case Intrinsic::ppc_vsx_stxvw4x: 9265 VT = MVT::v4i32; 9266 break; 9267 case Intrinsic::ppc_vsx_stxvd2x: 9268 VT = MVT::v2f64; 9269 break; 9270 case Intrinsic::ppc_altivec_stvebx: 9271 VT = MVT::i8; 9272 break; 9273 case Intrinsic::ppc_altivec_stvehx: 9274 VT = MVT::i16; 9275 break; 9276 case Intrinsic::ppc_altivec_stvewx: 9277 VT = MVT::i32; 9278 break; 9279 } 9280 9281 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 9282 } 9283 9284 return false; 9285 } 9286 9287 // Return true is there is a nearyby consecutive load to the one provided 9288 // (regardless of alignment). We search up and down the chain, looking though 9289 // token factors and other loads (but nothing else). As a result, a true result 9290 // indicates that it is safe to create a new consecutive load adjacent to the 9291 // load provided. 9292 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 9293 SDValue Chain = LD->getChain(); 9294 EVT VT = LD->getMemoryVT(); 9295 9296 SmallSet<SDNode *, 16> LoadRoots; 9297 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 9298 SmallSet<SDNode *, 16> Visited; 9299 9300 // First, search up the chain, branching to follow all token-factor operands. 9301 // If we find a consecutive load, then we're done, otherwise, record all 9302 // nodes just above the top-level loads and token factors. 9303 while (!Queue.empty()) { 9304 SDNode *ChainNext = Queue.pop_back_val(); 9305 if (!Visited.insert(ChainNext).second) 9306 continue; 9307 9308 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 9309 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9310 return true; 9311 9312 if (!Visited.count(ChainLD->getChain().getNode())) 9313 Queue.push_back(ChainLD->getChain().getNode()); 9314 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 9315 for (const SDUse &O : ChainNext->ops()) 9316 if (!Visited.count(O.getNode())) 9317 Queue.push_back(O.getNode()); 9318 } else 9319 LoadRoots.insert(ChainNext); 9320 } 9321 9322 // Second, search down the chain, starting from the top-level nodes recorded 9323 // in the first phase. These top-level nodes are the nodes just above all 9324 // loads and token factors. Starting with their uses, recursively look though 9325 // all loads (just the chain uses) and token factors to find a consecutive 9326 // load. 9327 Visited.clear(); 9328 Queue.clear(); 9329 9330 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 9331 IE = LoadRoots.end(); I != IE; ++I) { 9332 Queue.push_back(*I); 9333 9334 while (!Queue.empty()) { 9335 SDNode *LoadRoot = Queue.pop_back_val(); 9336 if (!Visited.insert(LoadRoot).second) 9337 continue; 9338 9339 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 9340 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9341 return true; 9342 9343 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 9344 UE = LoadRoot->use_end(); UI != UE; ++UI) 9345 if (((isa<MemSDNode>(*UI) && 9346 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 9347 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 9348 Queue.push_back(*UI); 9349 } 9350 } 9351 9352 return false; 9353 } 9354 9355 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 9356 DAGCombinerInfo &DCI) const { 9357 SelectionDAG &DAG = DCI.DAG; 9358 SDLoc dl(N); 9359 9360 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 9361 // If we're tracking CR bits, we need to be careful that we don't have: 9362 // trunc(binary-ops(zext(x), zext(y))) 9363 // or 9364 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 9365 // such that we're unnecessarily moving things into GPRs when it would be 9366 // better to keep them in CR bits. 9367 9368 // Note that trunc here can be an actual i1 trunc, or can be the effective 9369 // truncation that comes from a setcc or select_cc. 9370 if (N->getOpcode() == ISD::TRUNCATE && 9371 N->getValueType(0) != MVT::i1) 9372 return SDValue(); 9373 9374 if (N->getOperand(0).getValueType() != MVT::i32 && 9375 N->getOperand(0).getValueType() != MVT::i64) 9376 return SDValue(); 9377 9378 if (N->getOpcode() == ISD::SETCC || 9379 N->getOpcode() == ISD::SELECT_CC) { 9380 // If we're looking at a comparison, then we need to make sure that the 9381 // high bits (all except for the first) don't matter the result. 9382 ISD::CondCode CC = 9383 cast<CondCodeSDNode>(N->getOperand( 9384 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 9385 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 9386 9387 if (ISD::isSignedIntSetCC(CC)) { 9388 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 9389 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 9390 return SDValue(); 9391 } else if (ISD::isUnsignedIntSetCC(CC)) { 9392 if (!DAG.MaskedValueIsZero(N->getOperand(0), 9393 APInt::getHighBitsSet(OpBits, OpBits-1)) || 9394 !DAG.MaskedValueIsZero(N->getOperand(1), 9395 APInt::getHighBitsSet(OpBits, OpBits-1))) 9396 return SDValue(); 9397 } else { 9398 // This is neither a signed nor an unsigned comparison, just make sure 9399 // that the high bits are equal. 9400 APInt Op1Zero, Op1One; 9401 APInt Op2Zero, Op2One; 9402 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 9403 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 9404 9405 // We don't really care about what is known about the first bit (if 9406 // anything), so clear it in all masks prior to comparing them. 9407 Op1Zero.clearBit(0); Op1One.clearBit(0); 9408 Op2Zero.clearBit(0); Op2One.clearBit(0); 9409 9410 if (Op1Zero != Op2Zero || Op1One != Op2One) 9411 return SDValue(); 9412 } 9413 } 9414 9415 // We now know that the higher-order bits are irrelevant, we just need to 9416 // make sure that all of the intermediate operations are bit operations, and 9417 // all inputs are extensions. 9418 if (N->getOperand(0).getOpcode() != ISD::AND && 9419 N->getOperand(0).getOpcode() != ISD::OR && 9420 N->getOperand(0).getOpcode() != ISD::XOR && 9421 N->getOperand(0).getOpcode() != ISD::SELECT && 9422 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 9423 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 9424 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 9425 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 9426 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 9427 return SDValue(); 9428 9429 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 9430 N->getOperand(1).getOpcode() != ISD::AND && 9431 N->getOperand(1).getOpcode() != ISD::OR && 9432 N->getOperand(1).getOpcode() != ISD::XOR && 9433 N->getOperand(1).getOpcode() != ISD::SELECT && 9434 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 9435 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 9436 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 9437 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 9438 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 9439 return SDValue(); 9440 9441 SmallVector<SDValue, 4> Inputs; 9442 SmallVector<SDValue, 8> BinOps, PromOps; 9443 SmallPtrSet<SDNode *, 16> Visited; 9444 9445 for (unsigned i = 0; i < 2; ++i) { 9446 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9447 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9448 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9449 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9450 isa<ConstantSDNode>(N->getOperand(i))) 9451 Inputs.push_back(N->getOperand(i)); 9452 else 9453 BinOps.push_back(N->getOperand(i)); 9454 9455 if (N->getOpcode() == ISD::TRUNCATE) 9456 break; 9457 } 9458 9459 // Visit all inputs, collect all binary operations (and, or, xor and 9460 // select) that are all fed by extensions. 9461 while (!BinOps.empty()) { 9462 SDValue BinOp = BinOps.back(); 9463 BinOps.pop_back(); 9464 9465 if (!Visited.insert(BinOp.getNode()).second) 9466 continue; 9467 9468 PromOps.push_back(BinOp); 9469 9470 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9471 // The condition of the select is not promoted. 9472 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9473 continue; 9474 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9475 continue; 9476 9477 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9478 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9479 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9480 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9481 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9482 Inputs.push_back(BinOp.getOperand(i)); 9483 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9484 BinOp.getOperand(i).getOpcode() == ISD::OR || 9485 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9486 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9487 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 9488 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9489 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9490 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9491 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 9492 BinOps.push_back(BinOp.getOperand(i)); 9493 } else { 9494 // We have an input that is not an extension or another binary 9495 // operation; we'll abort this transformation. 9496 return SDValue(); 9497 } 9498 } 9499 } 9500 9501 // Make sure that this is a self-contained cluster of operations (which 9502 // is not quite the same thing as saying that everything has only one 9503 // use). 9504 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9505 if (isa<ConstantSDNode>(Inputs[i])) 9506 continue; 9507 9508 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9509 UE = Inputs[i].getNode()->use_end(); 9510 UI != UE; ++UI) { 9511 SDNode *User = *UI; 9512 if (User != N && !Visited.count(User)) 9513 return SDValue(); 9514 9515 // Make sure that we're not going to promote the non-output-value 9516 // operand(s) or SELECT or SELECT_CC. 9517 // FIXME: Although we could sometimes handle this, and it does occur in 9518 // practice that one of the condition inputs to the select is also one of 9519 // the outputs, we currently can't deal with this. 9520 if (User->getOpcode() == ISD::SELECT) { 9521 if (User->getOperand(0) == Inputs[i]) 9522 return SDValue(); 9523 } else if (User->getOpcode() == ISD::SELECT_CC) { 9524 if (User->getOperand(0) == Inputs[i] || 9525 User->getOperand(1) == Inputs[i]) 9526 return SDValue(); 9527 } 9528 } 9529 } 9530 9531 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9532 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9533 UE = PromOps[i].getNode()->use_end(); 9534 UI != UE; ++UI) { 9535 SDNode *User = *UI; 9536 if (User != N && !Visited.count(User)) 9537 return SDValue(); 9538 9539 // Make sure that we're not going to promote the non-output-value 9540 // operand(s) or SELECT or SELECT_CC. 9541 // FIXME: Although we could sometimes handle this, and it does occur in 9542 // practice that one of the condition inputs to the select is also one of 9543 // the outputs, we currently can't deal with this. 9544 if (User->getOpcode() == ISD::SELECT) { 9545 if (User->getOperand(0) == PromOps[i]) 9546 return SDValue(); 9547 } else if (User->getOpcode() == ISD::SELECT_CC) { 9548 if (User->getOperand(0) == PromOps[i] || 9549 User->getOperand(1) == PromOps[i]) 9550 return SDValue(); 9551 } 9552 } 9553 } 9554 9555 // Replace all inputs with the extension operand. 9556 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9557 // Constants may have users outside the cluster of to-be-promoted nodes, 9558 // and so we need to replace those as we do the promotions. 9559 if (isa<ConstantSDNode>(Inputs[i])) 9560 continue; 9561 else 9562 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 9563 } 9564 9565 // Replace all operations (these are all the same, but have a different 9566 // (i1) return type). DAG.getNode will validate that the types of 9567 // a binary operator match, so go through the list in reverse so that 9568 // we've likely promoted both operands first. Any intermediate truncations or 9569 // extensions disappear. 9570 while (!PromOps.empty()) { 9571 SDValue PromOp = PromOps.back(); 9572 PromOps.pop_back(); 9573 9574 if (PromOp.getOpcode() == ISD::TRUNCATE || 9575 PromOp.getOpcode() == ISD::SIGN_EXTEND || 9576 PromOp.getOpcode() == ISD::ZERO_EXTEND || 9577 PromOp.getOpcode() == ISD::ANY_EXTEND) { 9578 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 9579 PromOp.getOperand(0).getValueType() != MVT::i1) { 9580 // The operand is not yet ready (see comment below). 9581 PromOps.insert(PromOps.begin(), PromOp); 9582 continue; 9583 } 9584 9585 SDValue RepValue = PromOp.getOperand(0); 9586 if (isa<ConstantSDNode>(RepValue)) 9587 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 9588 9589 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 9590 continue; 9591 } 9592 9593 unsigned C; 9594 switch (PromOp.getOpcode()) { 9595 default: C = 0; break; 9596 case ISD::SELECT: C = 1; break; 9597 case ISD::SELECT_CC: C = 2; break; 9598 } 9599 9600 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9601 PromOp.getOperand(C).getValueType() != MVT::i1) || 9602 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9603 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 9604 // The to-be-promoted operands of this node have not yet been 9605 // promoted (this should be rare because we're going through the 9606 // list backward, but if one of the operands has several users in 9607 // this cluster of to-be-promoted nodes, it is possible). 9608 PromOps.insert(PromOps.begin(), PromOp); 9609 continue; 9610 } 9611 9612 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9613 PromOp.getNode()->op_end()); 9614 9615 // If there are any constant inputs, make sure they're replaced now. 9616 for (unsigned i = 0; i < 2; ++i) 9617 if (isa<ConstantSDNode>(Ops[C+i])) 9618 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 9619 9620 DAG.ReplaceAllUsesOfValueWith(PromOp, 9621 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 9622 } 9623 9624 // Now we're left with the initial truncation itself. 9625 if (N->getOpcode() == ISD::TRUNCATE) 9626 return N->getOperand(0); 9627 9628 // Otherwise, this is a comparison. The operands to be compared have just 9629 // changed type (to i1), but everything else is the same. 9630 return SDValue(N, 0); 9631 } 9632 9633 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 9634 DAGCombinerInfo &DCI) const { 9635 SelectionDAG &DAG = DCI.DAG; 9636 SDLoc dl(N); 9637 9638 // If we're tracking CR bits, we need to be careful that we don't have: 9639 // zext(binary-ops(trunc(x), trunc(y))) 9640 // or 9641 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 9642 // such that we're unnecessarily moving things into CR bits that can more 9643 // efficiently stay in GPRs. Note that if we're not certain that the high 9644 // bits are set as required by the final extension, we still may need to do 9645 // some masking to get the proper behavior. 9646 9647 // This same functionality is important on PPC64 when dealing with 9648 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 9649 // the return values of functions. Because it is so similar, it is handled 9650 // here as well. 9651 9652 if (N->getValueType(0) != MVT::i32 && 9653 N->getValueType(0) != MVT::i64) 9654 return SDValue(); 9655 9656 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 9657 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 9658 return SDValue(); 9659 9660 if (N->getOperand(0).getOpcode() != ISD::AND && 9661 N->getOperand(0).getOpcode() != ISD::OR && 9662 N->getOperand(0).getOpcode() != ISD::XOR && 9663 N->getOperand(0).getOpcode() != ISD::SELECT && 9664 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 9665 return SDValue(); 9666 9667 SmallVector<SDValue, 4> Inputs; 9668 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 9669 SmallPtrSet<SDNode *, 16> Visited; 9670 9671 // Visit all inputs, collect all binary operations (and, or, xor and 9672 // select) that are all fed by truncations. 9673 while (!BinOps.empty()) { 9674 SDValue BinOp = BinOps.back(); 9675 BinOps.pop_back(); 9676 9677 if (!Visited.insert(BinOp.getNode()).second) 9678 continue; 9679 9680 PromOps.push_back(BinOp); 9681 9682 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9683 // The condition of the select is not promoted. 9684 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9685 continue; 9686 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9687 continue; 9688 9689 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9690 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9691 Inputs.push_back(BinOp.getOperand(i)); 9692 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9693 BinOp.getOperand(i).getOpcode() == ISD::OR || 9694 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9695 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9696 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 9697 BinOps.push_back(BinOp.getOperand(i)); 9698 } else { 9699 // We have an input that is not a truncation or another binary 9700 // operation; we'll abort this transformation. 9701 return SDValue(); 9702 } 9703 } 9704 } 9705 9706 // The operands of a select that must be truncated when the select is 9707 // promoted because the operand is actually part of the to-be-promoted set. 9708 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 9709 9710 // Make sure that this is a self-contained cluster of operations (which 9711 // is not quite the same thing as saying that everything has only one 9712 // use). 9713 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9714 if (isa<ConstantSDNode>(Inputs[i])) 9715 continue; 9716 9717 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9718 UE = Inputs[i].getNode()->use_end(); 9719 UI != UE; ++UI) { 9720 SDNode *User = *UI; 9721 if (User != N && !Visited.count(User)) 9722 return SDValue(); 9723 9724 // If we're going to promote the non-output-value operand(s) or SELECT or 9725 // SELECT_CC, record them for truncation. 9726 if (User->getOpcode() == ISD::SELECT) { 9727 if (User->getOperand(0) == Inputs[i]) 9728 SelectTruncOp[0].insert(std::make_pair(User, 9729 User->getOperand(0).getValueType())); 9730 } else if (User->getOpcode() == ISD::SELECT_CC) { 9731 if (User->getOperand(0) == Inputs[i]) 9732 SelectTruncOp[0].insert(std::make_pair(User, 9733 User->getOperand(0).getValueType())); 9734 if (User->getOperand(1) == Inputs[i]) 9735 SelectTruncOp[1].insert(std::make_pair(User, 9736 User->getOperand(1).getValueType())); 9737 } 9738 } 9739 } 9740 9741 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9742 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9743 UE = PromOps[i].getNode()->use_end(); 9744 UI != UE; ++UI) { 9745 SDNode *User = *UI; 9746 if (User != N && !Visited.count(User)) 9747 return SDValue(); 9748 9749 // If we're going to promote the non-output-value operand(s) or SELECT or 9750 // SELECT_CC, record them for truncation. 9751 if (User->getOpcode() == ISD::SELECT) { 9752 if (User->getOperand(0) == PromOps[i]) 9753 SelectTruncOp[0].insert(std::make_pair(User, 9754 User->getOperand(0).getValueType())); 9755 } else if (User->getOpcode() == ISD::SELECT_CC) { 9756 if (User->getOperand(0) == PromOps[i]) 9757 SelectTruncOp[0].insert(std::make_pair(User, 9758 User->getOperand(0).getValueType())); 9759 if (User->getOperand(1) == PromOps[i]) 9760 SelectTruncOp[1].insert(std::make_pair(User, 9761 User->getOperand(1).getValueType())); 9762 } 9763 } 9764 } 9765 9766 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 9767 bool ReallyNeedsExt = false; 9768 if (N->getOpcode() != ISD::ANY_EXTEND) { 9769 // If all of the inputs are not already sign/zero extended, then 9770 // we'll still need to do that at the end. 9771 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9772 if (isa<ConstantSDNode>(Inputs[i])) 9773 continue; 9774 9775 unsigned OpBits = 9776 Inputs[i].getOperand(0).getValueSizeInBits(); 9777 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 9778 9779 if ((N->getOpcode() == ISD::ZERO_EXTEND && 9780 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 9781 APInt::getHighBitsSet(OpBits, 9782 OpBits-PromBits))) || 9783 (N->getOpcode() == ISD::SIGN_EXTEND && 9784 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 9785 (OpBits-(PromBits-1)))) { 9786 ReallyNeedsExt = true; 9787 break; 9788 } 9789 } 9790 } 9791 9792 // Replace all inputs, either with the truncation operand, or a 9793 // truncation or extension to the final output type. 9794 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9795 // Constant inputs need to be replaced with the to-be-promoted nodes that 9796 // use them because they might have users outside of the cluster of 9797 // promoted nodes. 9798 if (isa<ConstantSDNode>(Inputs[i])) 9799 continue; 9800 9801 SDValue InSrc = Inputs[i].getOperand(0); 9802 if (Inputs[i].getValueType() == N->getValueType(0)) 9803 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 9804 else if (N->getOpcode() == ISD::SIGN_EXTEND) 9805 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9806 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 9807 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9808 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9809 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 9810 else 9811 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9812 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 9813 } 9814 9815 // Replace all operations (these are all the same, but have a different 9816 // (promoted) return type). DAG.getNode will validate that the types of 9817 // a binary operator match, so go through the list in reverse so that 9818 // we've likely promoted both operands first. 9819 while (!PromOps.empty()) { 9820 SDValue PromOp = PromOps.back(); 9821 PromOps.pop_back(); 9822 9823 unsigned C; 9824 switch (PromOp.getOpcode()) { 9825 default: C = 0; break; 9826 case ISD::SELECT: C = 1; break; 9827 case ISD::SELECT_CC: C = 2; break; 9828 } 9829 9830 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9831 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 9832 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9833 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 9834 // The to-be-promoted operands of this node have not yet been 9835 // promoted (this should be rare because we're going through the 9836 // list backward, but if one of the operands has several users in 9837 // this cluster of to-be-promoted nodes, it is possible). 9838 PromOps.insert(PromOps.begin(), PromOp); 9839 continue; 9840 } 9841 9842 // For SELECT and SELECT_CC nodes, we do a similar check for any 9843 // to-be-promoted comparison inputs. 9844 if (PromOp.getOpcode() == ISD::SELECT || 9845 PromOp.getOpcode() == ISD::SELECT_CC) { 9846 if ((SelectTruncOp[0].count(PromOp.getNode()) && 9847 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 9848 (SelectTruncOp[1].count(PromOp.getNode()) && 9849 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 9850 PromOps.insert(PromOps.begin(), PromOp); 9851 continue; 9852 } 9853 } 9854 9855 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9856 PromOp.getNode()->op_end()); 9857 9858 // If this node has constant inputs, then they'll need to be promoted here. 9859 for (unsigned i = 0; i < 2; ++i) { 9860 if (!isa<ConstantSDNode>(Ops[C+i])) 9861 continue; 9862 if (Ops[C+i].getValueType() == N->getValueType(0)) 9863 continue; 9864 9865 if (N->getOpcode() == ISD::SIGN_EXTEND) 9866 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9867 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9868 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9869 else 9870 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9871 } 9872 9873 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 9874 // truncate them again to the original value type. 9875 if (PromOp.getOpcode() == ISD::SELECT || 9876 PromOp.getOpcode() == ISD::SELECT_CC) { 9877 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 9878 if (SI0 != SelectTruncOp[0].end()) 9879 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 9880 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 9881 if (SI1 != SelectTruncOp[1].end()) 9882 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 9883 } 9884 9885 DAG.ReplaceAllUsesOfValueWith(PromOp, 9886 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 9887 } 9888 9889 // Now we're left with the initial extension itself. 9890 if (!ReallyNeedsExt) 9891 return N->getOperand(0); 9892 9893 // To zero extend, just mask off everything except for the first bit (in the 9894 // i1 case). 9895 if (N->getOpcode() == ISD::ZERO_EXTEND) 9896 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 9897 DAG.getConstant(APInt::getLowBitsSet( 9898 N->getValueSizeInBits(0), PromBits), 9899 dl, N->getValueType(0))); 9900 9901 assert(N->getOpcode() == ISD::SIGN_EXTEND && 9902 "Invalid extension type"); 9903 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 9904 SDValue ShiftCst = 9905 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 9906 return DAG.getNode(ISD::SRA, dl, N->getValueType(0), 9907 DAG.getNode(ISD::SHL, dl, N->getValueType(0), 9908 N->getOperand(0), ShiftCst), ShiftCst); 9909 } 9910 9911 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 9912 DAGCombinerInfo &DCI) const { 9913 assert((N->getOpcode() == ISD::SINT_TO_FP || 9914 N->getOpcode() == ISD::UINT_TO_FP) && 9915 "Need an int -> FP conversion node here"); 9916 9917 if (!Subtarget.has64BitSupport()) 9918 return SDValue(); 9919 9920 SelectionDAG &DAG = DCI.DAG; 9921 SDLoc dl(N); 9922 SDValue Op(N, 0); 9923 9924 // Don't handle ppc_fp128 here or i1 conversions. 9925 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 9926 return SDValue(); 9927 if (Op.getOperand(0).getValueType() == MVT::i1) 9928 return SDValue(); 9929 9930 // For i32 intermediate values, unfortunately, the conversion functions 9931 // leave the upper 32 bits of the value are undefined. Within the set of 9932 // scalar instructions, we have no method for zero- or sign-extending the 9933 // value. Thus, we cannot handle i32 intermediate values here. 9934 if (Op.getOperand(0).getValueType() == MVT::i32) 9935 return SDValue(); 9936 9937 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 9938 "UINT_TO_FP is supported only with FPCVT"); 9939 9940 // If we have FCFIDS, then use it when converting to single-precision. 9941 // Otherwise, convert to double-precision and then round. 9942 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 9943 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 9944 : PPCISD::FCFIDS) 9945 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 9946 : PPCISD::FCFID); 9947 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 9948 ? MVT::f32 9949 : MVT::f64; 9950 9951 // If we're converting from a float, to an int, and back to a float again, 9952 // then we don't need the store/load pair at all. 9953 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 9954 Subtarget.hasFPCVT()) || 9955 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 9956 SDValue Src = Op.getOperand(0).getOperand(0); 9957 if (Src.getValueType() == MVT::f32) { 9958 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 9959 DCI.AddToWorklist(Src.getNode()); 9960 } 9961 9962 unsigned FCTOp = 9963 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 9964 PPCISD::FCTIDUZ; 9965 9966 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 9967 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 9968 9969 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 9970 FP = DAG.getNode(ISD::FP_ROUND, dl, 9971 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 9972 DCI.AddToWorklist(FP.getNode()); 9973 } 9974 9975 return FP; 9976 } 9977 9978 return SDValue(); 9979 } 9980 9981 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 9982 // builtins) into loads with swaps. 9983 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 9984 DAGCombinerInfo &DCI) const { 9985 SelectionDAG &DAG = DCI.DAG; 9986 SDLoc dl(N); 9987 SDValue Chain; 9988 SDValue Base; 9989 MachineMemOperand *MMO; 9990 9991 switch (N->getOpcode()) { 9992 default: 9993 llvm_unreachable("Unexpected opcode for little endian VSX load"); 9994 case ISD::LOAD: { 9995 LoadSDNode *LD = cast<LoadSDNode>(N); 9996 Chain = LD->getChain(); 9997 Base = LD->getBasePtr(); 9998 MMO = LD->getMemOperand(); 9999 // If the MMO suggests this isn't a load of a full vector, leave 10000 // things alone. For a built-in, we have to make the change for 10001 // correctness, so if there is a size problem that will be a bug. 10002 if (MMO->getSize() < 16) 10003 return SDValue(); 10004 break; 10005 } 10006 case ISD::INTRINSIC_W_CHAIN: { 10007 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10008 Chain = Intrin->getChain(); 10009 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 10010 // us what we want. Get operand 2 instead. 10011 Base = Intrin->getOperand(2); 10012 MMO = Intrin->getMemOperand(); 10013 break; 10014 } 10015 } 10016 10017 MVT VecTy = N->getValueType(0).getSimpleVT(); 10018 SDValue LoadOps[] = { Chain, Base }; 10019 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 10020 DAG.getVTList(VecTy, MVT::Other), 10021 LoadOps, VecTy, MMO); 10022 DCI.AddToWorklist(Load.getNode()); 10023 Chain = Load.getValue(1); 10024 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10025 DAG.getVTList(VecTy, MVT::Other), Chain, Load); 10026 DCI.AddToWorklist(Swap.getNode()); 10027 return Swap; 10028 } 10029 10030 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 10031 // builtins) into stores with swaps. 10032 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 10033 DAGCombinerInfo &DCI) const { 10034 SelectionDAG &DAG = DCI.DAG; 10035 SDLoc dl(N); 10036 SDValue Chain; 10037 SDValue Base; 10038 unsigned SrcOpnd; 10039 MachineMemOperand *MMO; 10040 10041 switch (N->getOpcode()) { 10042 default: 10043 llvm_unreachable("Unexpected opcode for little endian VSX store"); 10044 case ISD::STORE: { 10045 StoreSDNode *ST = cast<StoreSDNode>(N); 10046 Chain = ST->getChain(); 10047 Base = ST->getBasePtr(); 10048 MMO = ST->getMemOperand(); 10049 SrcOpnd = 1; 10050 // If the MMO suggests this isn't a store of a full vector, leave 10051 // things alone. For a built-in, we have to make the change for 10052 // correctness, so if there is a size problem that will be a bug. 10053 if (MMO->getSize() < 16) 10054 return SDValue(); 10055 break; 10056 } 10057 case ISD::INTRINSIC_VOID: { 10058 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10059 Chain = Intrin->getChain(); 10060 // Intrin->getBasePtr() oddly does not get what we want. 10061 Base = Intrin->getOperand(3); 10062 MMO = Intrin->getMemOperand(); 10063 SrcOpnd = 2; 10064 break; 10065 } 10066 } 10067 10068 SDValue Src = N->getOperand(SrcOpnd); 10069 MVT VecTy = Src.getValueType().getSimpleVT(); 10070 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10071 DAG.getVTList(VecTy, MVT::Other), Chain, Src); 10072 DCI.AddToWorklist(Swap.getNode()); 10073 Chain = Swap.getValue(1); 10074 SDValue StoreOps[] = { Chain, Swap, Base }; 10075 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 10076 DAG.getVTList(MVT::Other), 10077 StoreOps, VecTy, MMO); 10078 DCI.AddToWorklist(Store.getNode()); 10079 return Store; 10080 } 10081 10082 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 10083 DAGCombinerInfo &DCI) const { 10084 SelectionDAG &DAG = DCI.DAG; 10085 SDLoc dl(N); 10086 switch (N->getOpcode()) { 10087 default: break; 10088 case PPCISD::SHL: 10089 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10090 if (C->isNullValue()) // 0 << V -> 0. 10091 return N->getOperand(0); 10092 } 10093 break; 10094 case PPCISD::SRL: 10095 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10096 if (C->isNullValue()) // 0 >>u V -> 0. 10097 return N->getOperand(0); 10098 } 10099 break; 10100 case PPCISD::SRA: 10101 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10102 if (C->isNullValue() || // 0 >>s V -> 0. 10103 C->isAllOnesValue()) // -1 >>s V -> -1. 10104 return N->getOperand(0); 10105 } 10106 break; 10107 case ISD::SIGN_EXTEND: 10108 case ISD::ZERO_EXTEND: 10109 case ISD::ANY_EXTEND: 10110 return DAGCombineExtBoolTrunc(N, DCI); 10111 case ISD::TRUNCATE: 10112 case ISD::SETCC: 10113 case ISD::SELECT_CC: 10114 return DAGCombineTruncBoolExt(N, DCI); 10115 case ISD::SINT_TO_FP: 10116 case ISD::UINT_TO_FP: 10117 return combineFPToIntToFP(N, DCI); 10118 case ISD::STORE: { 10119 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 10120 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 10121 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 10122 N->getOperand(1).getValueType() == MVT::i32 && 10123 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 10124 SDValue Val = N->getOperand(1).getOperand(0); 10125 if (Val.getValueType() == MVT::f32) { 10126 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 10127 DCI.AddToWorklist(Val.getNode()); 10128 } 10129 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 10130 DCI.AddToWorklist(Val.getNode()); 10131 10132 SDValue Ops[] = { 10133 N->getOperand(0), Val, N->getOperand(2), 10134 DAG.getValueType(N->getOperand(1).getValueType()) 10135 }; 10136 10137 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 10138 DAG.getVTList(MVT::Other), Ops, 10139 cast<StoreSDNode>(N)->getMemoryVT(), 10140 cast<StoreSDNode>(N)->getMemOperand()); 10141 DCI.AddToWorklist(Val.getNode()); 10142 return Val; 10143 } 10144 10145 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 10146 if (cast<StoreSDNode>(N)->isUnindexed() && 10147 N->getOperand(1).getOpcode() == ISD::BSWAP && 10148 N->getOperand(1).getNode()->hasOneUse() && 10149 (N->getOperand(1).getValueType() == MVT::i32 || 10150 N->getOperand(1).getValueType() == MVT::i16 || 10151 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10152 N->getOperand(1).getValueType() == MVT::i64))) { 10153 SDValue BSwapOp = N->getOperand(1).getOperand(0); 10154 // Do an any-extend to 32-bits if this is a half-word input. 10155 if (BSwapOp.getValueType() == MVT::i16) 10156 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 10157 10158 SDValue Ops[] = { 10159 N->getOperand(0), BSwapOp, N->getOperand(2), 10160 DAG.getValueType(N->getOperand(1).getValueType()) 10161 }; 10162 return 10163 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 10164 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 10165 cast<StoreSDNode>(N)->getMemOperand()); 10166 } 10167 10168 // For little endian, VSX stores require generating xxswapd/lxvd2x. 10169 EVT VT = N->getOperand(1).getValueType(); 10170 if (VT.isSimple()) { 10171 MVT StoreVT = VT.getSimpleVT(); 10172 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10173 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 10174 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 10175 return expandVSXStoreForLE(N, DCI); 10176 } 10177 break; 10178 } 10179 case ISD::LOAD: { 10180 LoadSDNode *LD = cast<LoadSDNode>(N); 10181 EVT VT = LD->getValueType(0); 10182 10183 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10184 if (VT.isSimple()) { 10185 MVT LoadVT = VT.getSimpleVT(); 10186 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10187 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 10188 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 10189 return expandVSXLoadForLE(N, DCI); 10190 } 10191 10192 EVT MemVT = LD->getMemoryVT(); 10193 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 10194 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 10195 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 10196 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 10197 if (LD->isUnindexed() && VT.isVector() && 10198 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 10199 // P8 and later hardware should just use LOAD. 10200 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 10201 VT == MVT::v4i32 || VT == MVT::v4f32)) || 10202 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 10203 LD->getAlignment() >= ScalarABIAlignment)) && 10204 LD->getAlignment() < ABIAlignment) { 10205 // This is a type-legal unaligned Altivec or QPX load. 10206 SDValue Chain = LD->getChain(); 10207 SDValue Ptr = LD->getBasePtr(); 10208 bool isLittleEndian = Subtarget.isLittleEndian(); 10209 10210 // This implements the loading of unaligned vectors as described in 10211 // the venerable Apple Velocity Engine overview. Specifically: 10212 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 10213 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 10214 // 10215 // The general idea is to expand a sequence of one or more unaligned 10216 // loads into an alignment-based permutation-control instruction (lvsl 10217 // or lvsr), a series of regular vector loads (which always truncate 10218 // their input address to an aligned address), and a series of 10219 // permutations. The results of these permutations are the requested 10220 // loaded values. The trick is that the last "extra" load is not taken 10221 // from the address you might suspect (sizeof(vector) bytes after the 10222 // last requested load), but rather sizeof(vector) - 1 bytes after the 10223 // last requested vector. The point of this is to avoid a page fault if 10224 // the base address happened to be aligned. This works because if the 10225 // base address is aligned, then adding less than a full vector length 10226 // will cause the last vector in the sequence to be (re)loaded. 10227 // Otherwise, the next vector will be fetched as you might suspect was 10228 // necessary. 10229 10230 // We might be able to reuse the permutation generation from 10231 // a different base address offset from this one by an aligned amount. 10232 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 10233 // optimization later. 10234 Intrinsic::ID Intr, IntrLD, IntrPerm; 10235 MVT PermCntlTy, PermTy, LDTy; 10236 if (Subtarget.hasAltivec()) { 10237 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 10238 Intrinsic::ppc_altivec_lvsl; 10239 IntrLD = Intrinsic::ppc_altivec_lvx; 10240 IntrPerm = Intrinsic::ppc_altivec_vperm; 10241 PermCntlTy = MVT::v16i8; 10242 PermTy = MVT::v4i32; 10243 LDTy = MVT::v4i32; 10244 } else { 10245 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 10246 Intrinsic::ppc_qpx_qvlpcls; 10247 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 10248 Intrinsic::ppc_qpx_qvlfs; 10249 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 10250 PermCntlTy = MVT::v4f64; 10251 PermTy = MVT::v4f64; 10252 LDTy = MemVT.getSimpleVT(); 10253 } 10254 10255 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 10256 10257 // Create the new MMO for the new base load. It is like the original MMO, 10258 // but represents an area in memory almost twice the vector size centered 10259 // on the original address. If the address is unaligned, we might start 10260 // reading up to (sizeof(vector)-1) bytes below the address of the 10261 // original unaligned load. 10262 MachineFunction &MF = DAG.getMachineFunction(); 10263 MachineMemOperand *BaseMMO = 10264 MF.getMachineMemOperand(LD->getMemOperand(), -MemVT.getStoreSize()+1, 10265 2*MemVT.getStoreSize()-1); 10266 10267 // Create the new base load. 10268 SDValue LDXIntID = 10269 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 10270 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 10271 SDValue BaseLoad = 10272 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10273 DAG.getVTList(PermTy, MVT::Other), 10274 BaseLoadOps, LDTy, BaseMMO); 10275 10276 // Note that the value of IncOffset (which is provided to the next 10277 // load's pointer info offset value, and thus used to calculate the 10278 // alignment), and the value of IncValue (which is actually used to 10279 // increment the pointer value) are different! This is because we 10280 // require the next load to appear to be aligned, even though it 10281 // is actually offset from the base pointer by a lesser amount. 10282 int IncOffset = VT.getSizeInBits() / 8; 10283 int IncValue = IncOffset; 10284 10285 // Walk (both up and down) the chain looking for another load at the real 10286 // (aligned) offset (the alignment of the other load does not matter in 10287 // this case). If found, then do not use the offset reduction trick, as 10288 // that will prevent the loads from being later combined (as they would 10289 // otherwise be duplicates). 10290 if (!findConsecutiveLoad(LD, DAG)) 10291 --IncValue; 10292 10293 SDValue Increment = 10294 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 10295 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 10296 10297 MachineMemOperand *ExtraMMO = 10298 MF.getMachineMemOperand(LD->getMemOperand(), 10299 1, 2*MemVT.getStoreSize()-1); 10300 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 10301 SDValue ExtraLoad = 10302 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10303 DAG.getVTList(PermTy, MVT::Other), 10304 ExtraLoadOps, LDTy, ExtraMMO); 10305 10306 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 10307 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 10308 10309 // Because vperm has a big-endian bias, we must reverse the order 10310 // of the input vectors and complement the permute control vector 10311 // when generating little endian code. We have already handled the 10312 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 10313 // and ExtraLoad here. 10314 SDValue Perm; 10315 if (isLittleEndian) 10316 Perm = BuildIntrinsicOp(IntrPerm, 10317 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 10318 else 10319 Perm = BuildIntrinsicOp(IntrPerm, 10320 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 10321 10322 if (VT != PermTy) 10323 Perm = Subtarget.hasAltivec() ? 10324 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 10325 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 10326 DAG.getTargetConstant(1, dl, MVT::i64)); 10327 // second argument is 1 because this rounding 10328 // is always exact. 10329 10330 // The output of the permutation is our loaded result, the TokenFactor is 10331 // our new chain. 10332 DCI.CombineTo(N, Perm, TF); 10333 return SDValue(N, 0); 10334 } 10335 } 10336 break; 10337 case ISD::INTRINSIC_WO_CHAIN: { 10338 bool isLittleEndian = Subtarget.isLittleEndian(); 10339 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10340 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 10341 : Intrinsic::ppc_altivec_lvsl); 10342 if ((IID == Intr || 10343 IID == Intrinsic::ppc_qpx_qvlpcld || 10344 IID == Intrinsic::ppc_qpx_qvlpcls) && 10345 N->getOperand(1)->getOpcode() == ISD::ADD) { 10346 SDValue Add = N->getOperand(1); 10347 10348 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 10349 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 10350 10351 if (DAG.MaskedValueIsZero( 10352 Add->getOperand(1), 10353 APInt::getAllOnesValue(Bits /* alignment */) 10354 .zext( 10355 Add.getValueType().getScalarType().getSizeInBits()))) { 10356 SDNode *BasePtr = Add->getOperand(0).getNode(); 10357 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10358 UE = BasePtr->use_end(); 10359 UI != UE; ++UI) { 10360 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10361 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 10362 // We've found another LVSL/LVSR, and this address is an aligned 10363 // multiple of that one. The results will be the same, so use the 10364 // one we've just found instead. 10365 10366 return SDValue(*UI, 0); 10367 } 10368 } 10369 } 10370 10371 if (isa<ConstantSDNode>(Add->getOperand(1))) { 10372 SDNode *BasePtr = Add->getOperand(0).getNode(); 10373 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10374 UE = BasePtr->use_end(); UI != UE; ++UI) { 10375 if (UI->getOpcode() == ISD::ADD && 10376 isa<ConstantSDNode>(UI->getOperand(1)) && 10377 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 10378 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 10379 (1ULL << Bits) == 0) { 10380 SDNode *OtherAdd = *UI; 10381 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 10382 VE = OtherAdd->use_end(); VI != VE; ++VI) { 10383 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10384 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 10385 return SDValue(*VI, 0); 10386 } 10387 } 10388 } 10389 } 10390 } 10391 } 10392 } 10393 10394 break; 10395 case ISD::INTRINSIC_W_CHAIN: { 10396 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10397 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10398 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10399 default: 10400 break; 10401 case Intrinsic::ppc_vsx_lxvw4x: 10402 case Intrinsic::ppc_vsx_lxvd2x: 10403 return expandVSXLoadForLE(N, DCI); 10404 } 10405 } 10406 break; 10407 } 10408 case ISD::INTRINSIC_VOID: { 10409 // For little endian, VSX stores require generating xxswapd/stxvd2x. 10410 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10411 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10412 default: 10413 break; 10414 case Intrinsic::ppc_vsx_stxvw4x: 10415 case Intrinsic::ppc_vsx_stxvd2x: 10416 return expandVSXStoreForLE(N, DCI); 10417 } 10418 } 10419 break; 10420 } 10421 case ISD::BSWAP: 10422 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 10423 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 10424 N->getOperand(0).hasOneUse() && 10425 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 10426 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10427 N->getValueType(0) == MVT::i64))) { 10428 SDValue Load = N->getOperand(0); 10429 LoadSDNode *LD = cast<LoadSDNode>(Load); 10430 // Create the byte-swapping load. 10431 SDValue Ops[] = { 10432 LD->getChain(), // Chain 10433 LD->getBasePtr(), // Ptr 10434 DAG.getValueType(N->getValueType(0)) // VT 10435 }; 10436 SDValue BSLoad = 10437 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 10438 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 10439 MVT::i64 : MVT::i32, MVT::Other), 10440 Ops, LD->getMemoryVT(), LD->getMemOperand()); 10441 10442 // If this is an i16 load, insert the truncate. 10443 SDValue ResVal = BSLoad; 10444 if (N->getValueType(0) == MVT::i16) 10445 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 10446 10447 // First, combine the bswap away. This makes the value produced by the 10448 // load dead. 10449 DCI.CombineTo(N, ResVal); 10450 10451 // Next, combine the load away, we give it a bogus result value but a real 10452 // chain result. The result value is dead because the bswap is dead. 10453 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 10454 10455 // Return N so it doesn't get rechecked! 10456 return SDValue(N, 0); 10457 } 10458 10459 break; 10460 case PPCISD::VCMP: { 10461 // If a VCMPo node already exists with exactly the same operands as this 10462 // node, use its result instead of this node (VCMPo computes both a CR6 and 10463 // a normal output). 10464 // 10465 if (!N->getOperand(0).hasOneUse() && 10466 !N->getOperand(1).hasOneUse() && 10467 !N->getOperand(2).hasOneUse()) { 10468 10469 // Scan all of the users of the LHS, looking for VCMPo's that match. 10470 SDNode *VCMPoNode = nullptr; 10471 10472 SDNode *LHSN = N->getOperand(0).getNode(); 10473 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 10474 UI != E; ++UI) 10475 if (UI->getOpcode() == PPCISD::VCMPo && 10476 UI->getOperand(1) == N->getOperand(1) && 10477 UI->getOperand(2) == N->getOperand(2) && 10478 UI->getOperand(0) == N->getOperand(0)) { 10479 VCMPoNode = *UI; 10480 break; 10481 } 10482 10483 // If there is no VCMPo node, or if the flag value has a single use, don't 10484 // transform this. 10485 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 10486 break; 10487 10488 // Look at the (necessarily single) use of the flag value. If it has a 10489 // chain, this transformation is more complex. Note that multiple things 10490 // could use the value result, which we should ignore. 10491 SDNode *FlagUser = nullptr; 10492 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 10493 FlagUser == nullptr; ++UI) { 10494 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 10495 SDNode *User = *UI; 10496 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 10497 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 10498 FlagUser = User; 10499 break; 10500 } 10501 } 10502 } 10503 10504 // If the user is a MFOCRF instruction, we know this is safe. 10505 // Otherwise we give up for right now. 10506 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 10507 return SDValue(VCMPoNode, 0); 10508 } 10509 break; 10510 } 10511 case ISD::BRCOND: { 10512 SDValue Cond = N->getOperand(1); 10513 SDValue Target = N->getOperand(2); 10514 10515 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10516 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 10517 Intrinsic::ppc_is_decremented_ctr_nonzero) { 10518 10519 // We now need to make the intrinsic dead (it cannot be instruction 10520 // selected). 10521 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 10522 assert(Cond.getNode()->hasOneUse() && 10523 "Counter decrement has more than one use"); 10524 10525 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 10526 N->getOperand(0), Target); 10527 } 10528 } 10529 break; 10530 case ISD::BR_CC: { 10531 // If this is a branch on an altivec predicate comparison, lower this so 10532 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 10533 // lowering is done pre-legalize, because the legalizer lowers the predicate 10534 // compare down to code that is difficult to reassemble. 10535 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 10536 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 10537 10538 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 10539 // value. If so, pass-through the AND to get to the intrinsic. 10540 if (LHS.getOpcode() == ISD::AND && 10541 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 10542 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 10543 Intrinsic::ppc_is_decremented_ctr_nonzero && 10544 isa<ConstantSDNode>(LHS.getOperand(1)) && 10545 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 10546 isZero()) 10547 LHS = LHS.getOperand(0); 10548 10549 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10550 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 10551 Intrinsic::ppc_is_decremented_ctr_nonzero && 10552 isa<ConstantSDNode>(RHS)) { 10553 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 10554 "Counter decrement comparison is not EQ or NE"); 10555 10556 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10557 bool isBDNZ = (CC == ISD::SETEQ && Val) || 10558 (CC == ISD::SETNE && !Val); 10559 10560 // We now need to make the intrinsic dead (it cannot be instruction 10561 // selected). 10562 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 10563 assert(LHS.getNode()->hasOneUse() && 10564 "Counter decrement has more than one use"); 10565 10566 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 10567 N->getOperand(0), N->getOperand(4)); 10568 } 10569 10570 int CompareOpc; 10571 bool isDot; 10572 10573 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10574 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 10575 getAltivecCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 10576 assert(isDot && "Can't compare against a vector result!"); 10577 10578 // If this is a comparison against something other than 0/1, then we know 10579 // that the condition is never/always true. 10580 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10581 if (Val != 0 && Val != 1) { 10582 if (CC == ISD::SETEQ) // Cond never true, remove branch. 10583 return N->getOperand(0); 10584 // Always !=, turn it into an unconditional branch. 10585 return DAG.getNode(ISD::BR, dl, MVT::Other, 10586 N->getOperand(0), N->getOperand(4)); 10587 } 10588 10589 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 10590 10591 // Create the PPCISD altivec 'dot' comparison node. 10592 SDValue Ops[] = { 10593 LHS.getOperand(2), // LHS of compare 10594 LHS.getOperand(3), // RHS of compare 10595 DAG.getConstant(CompareOpc, dl, MVT::i32) 10596 }; 10597 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 10598 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 10599 10600 // Unpack the result based on how the target uses it. 10601 PPC::Predicate CompOpc; 10602 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 10603 default: // Can't happen, don't crash on invalid number though. 10604 case 0: // Branch on the value of the EQ bit of CR6. 10605 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 10606 break; 10607 case 1: // Branch on the inverted value of the EQ bit of CR6. 10608 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 10609 break; 10610 case 2: // Branch on the value of the LT bit of CR6. 10611 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 10612 break; 10613 case 3: // Branch on the inverted value of the LT bit of CR6. 10614 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 10615 break; 10616 } 10617 10618 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 10619 DAG.getConstant(CompOpc, dl, MVT::i32), 10620 DAG.getRegister(PPC::CR6, MVT::i32), 10621 N->getOperand(4), CompNode.getValue(1)); 10622 } 10623 break; 10624 } 10625 } 10626 10627 return SDValue(); 10628 } 10629 10630 SDValue 10631 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 10632 SelectionDAG &DAG, 10633 std::vector<SDNode *> *Created) const { 10634 // fold (sdiv X, pow2) 10635 EVT VT = N->getValueType(0); 10636 if (VT == MVT::i64 && !Subtarget.isPPC64()) 10637 return SDValue(); 10638 if ((VT != MVT::i32 && VT != MVT::i64) || 10639 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 10640 return SDValue(); 10641 10642 SDLoc DL(N); 10643 SDValue N0 = N->getOperand(0); 10644 10645 bool IsNegPow2 = (-Divisor).isPowerOf2(); 10646 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 10647 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 10648 10649 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 10650 if (Created) 10651 Created->push_back(Op.getNode()); 10652 10653 if (IsNegPow2) { 10654 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 10655 if (Created) 10656 Created->push_back(Op.getNode()); 10657 } 10658 10659 return Op; 10660 } 10661 10662 //===----------------------------------------------------------------------===// 10663 // Inline Assembly Support 10664 //===----------------------------------------------------------------------===// 10665 10666 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 10667 APInt &KnownZero, 10668 APInt &KnownOne, 10669 const SelectionDAG &DAG, 10670 unsigned Depth) const { 10671 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 10672 switch (Op.getOpcode()) { 10673 default: break; 10674 case PPCISD::LBRX: { 10675 // lhbrx is known to have the top bits cleared out. 10676 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 10677 KnownZero = 0xFFFF0000; 10678 break; 10679 } 10680 case ISD::INTRINSIC_WO_CHAIN: { 10681 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 10682 default: break; 10683 case Intrinsic::ppc_altivec_vcmpbfp_p: 10684 case Intrinsic::ppc_altivec_vcmpeqfp_p: 10685 case Intrinsic::ppc_altivec_vcmpequb_p: 10686 case Intrinsic::ppc_altivec_vcmpequh_p: 10687 case Intrinsic::ppc_altivec_vcmpequw_p: 10688 case Intrinsic::ppc_altivec_vcmpequd_p: 10689 case Intrinsic::ppc_altivec_vcmpgefp_p: 10690 case Intrinsic::ppc_altivec_vcmpgtfp_p: 10691 case Intrinsic::ppc_altivec_vcmpgtsb_p: 10692 case Intrinsic::ppc_altivec_vcmpgtsh_p: 10693 case Intrinsic::ppc_altivec_vcmpgtsw_p: 10694 case Intrinsic::ppc_altivec_vcmpgtsd_p: 10695 case Intrinsic::ppc_altivec_vcmpgtub_p: 10696 case Intrinsic::ppc_altivec_vcmpgtuh_p: 10697 case Intrinsic::ppc_altivec_vcmpgtuw_p: 10698 case Intrinsic::ppc_altivec_vcmpgtud_p: 10699 KnownZero = ~1U; // All bits but the low one are known to be zero. 10700 break; 10701 } 10702 } 10703 } 10704 } 10705 10706 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 10707 switch (Subtarget.getDarwinDirective()) { 10708 default: break; 10709 case PPC::DIR_970: 10710 case PPC::DIR_PWR4: 10711 case PPC::DIR_PWR5: 10712 case PPC::DIR_PWR5X: 10713 case PPC::DIR_PWR6: 10714 case PPC::DIR_PWR6X: 10715 case PPC::DIR_PWR7: 10716 case PPC::DIR_PWR8: { 10717 if (!ML) 10718 break; 10719 10720 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 10721 10722 // For small loops (between 5 and 8 instructions), align to a 32-byte 10723 // boundary so that the entire loop fits in one instruction-cache line. 10724 uint64_t LoopSize = 0; 10725 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 10726 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) 10727 LoopSize += TII->GetInstSizeInBytes(J); 10728 10729 if (LoopSize > 16 && LoopSize <= 32) 10730 return 5; 10731 10732 break; 10733 } 10734 } 10735 10736 return TargetLowering::getPrefLoopAlignment(ML); 10737 } 10738 10739 /// getConstraintType - Given a constraint, return the type of 10740 /// constraint it is for this target. 10741 PPCTargetLowering::ConstraintType 10742 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 10743 if (Constraint.size() == 1) { 10744 switch (Constraint[0]) { 10745 default: break; 10746 case 'b': 10747 case 'r': 10748 case 'f': 10749 case 'v': 10750 case 'y': 10751 return C_RegisterClass; 10752 case 'Z': 10753 // FIXME: While Z does indicate a memory constraint, it specifically 10754 // indicates an r+r address (used in conjunction with the 'y' modifier 10755 // in the replacement string). Currently, we're forcing the base 10756 // register to be r0 in the asm printer (which is interpreted as zero) 10757 // and forming the complete address in the second register. This is 10758 // suboptimal. 10759 return C_Memory; 10760 } 10761 } else if (Constraint == "wc") { // individual CR bits. 10762 return C_RegisterClass; 10763 } else if (Constraint == "wa" || Constraint == "wd" || 10764 Constraint == "wf" || Constraint == "ws") { 10765 return C_RegisterClass; // VSX registers. 10766 } 10767 return TargetLowering::getConstraintType(Constraint); 10768 } 10769 10770 /// Examine constraint type and operand type and determine a weight value. 10771 /// This object must already have been set up with the operand type 10772 /// and the current alternative constraint selected. 10773 TargetLowering::ConstraintWeight 10774 PPCTargetLowering::getSingleConstraintMatchWeight( 10775 AsmOperandInfo &info, const char *constraint) const { 10776 ConstraintWeight weight = CW_Invalid; 10777 Value *CallOperandVal = info.CallOperandVal; 10778 // If we don't have a value, we can't do a match, 10779 // but allow it at the lowest weight. 10780 if (!CallOperandVal) 10781 return CW_Default; 10782 Type *type = CallOperandVal->getType(); 10783 10784 // Look at the constraint type. 10785 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 10786 return CW_Register; // an individual CR bit. 10787 else if ((StringRef(constraint) == "wa" || 10788 StringRef(constraint) == "wd" || 10789 StringRef(constraint) == "wf") && 10790 type->isVectorTy()) 10791 return CW_Register; 10792 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 10793 return CW_Register; 10794 10795 switch (*constraint) { 10796 default: 10797 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 10798 break; 10799 case 'b': 10800 if (type->isIntegerTy()) 10801 weight = CW_Register; 10802 break; 10803 case 'f': 10804 if (type->isFloatTy()) 10805 weight = CW_Register; 10806 break; 10807 case 'd': 10808 if (type->isDoubleTy()) 10809 weight = CW_Register; 10810 break; 10811 case 'v': 10812 if (type->isVectorTy()) 10813 weight = CW_Register; 10814 break; 10815 case 'y': 10816 weight = CW_Register; 10817 break; 10818 case 'Z': 10819 weight = CW_Memory; 10820 break; 10821 } 10822 return weight; 10823 } 10824 10825 std::pair<unsigned, const TargetRegisterClass *> 10826 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 10827 StringRef Constraint, 10828 MVT VT) const { 10829 if (Constraint.size() == 1) { 10830 // GCC RS6000 Constraint Letters 10831 switch (Constraint[0]) { 10832 case 'b': // R1-R31 10833 if (VT == MVT::i64 && Subtarget.isPPC64()) 10834 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 10835 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 10836 case 'r': // R0-R31 10837 if (VT == MVT::i64 && Subtarget.isPPC64()) 10838 return std::make_pair(0U, &PPC::G8RCRegClass); 10839 return std::make_pair(0U, &PPC::GPRCRegClass); 10840 case 'f': 10841 if (VT == MVT::f32 || VT == MVT::i32) 10842 return std::make_pair(0U, &PPC::F4RCRegClass); 10843 if (VT == MVT::f64 || VT == MVT::i64) 10844 return std::make_pair(0U, &PPC::F8RCRegClass); 10845 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 10846 return std::make_pair(0U, &PPC::QFRCRegClass); 10847 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 10848 return std::make_pair(0U, &PPC::QSRCRegClass); 10849 break; 10850 case 'v': 10851 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 10852 return std::make_pair(0U, &PPC::QFRCRegClass); 10853 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 10854 return std::make_pair(0U, &PPC::QSRCRegClass); 10855 return std::make_pair(0U, &PPC::VRRCRegClass); 10856 case 'y': // crrc 10857 return std::make_pair(0U, &PPC::CRRCRegClass); 10858 } 10859 } else if (Constraint == "wc") { // an individual CR bit. 10860 return std::make_pair(0U, &PPC::CRBITRCRegClass); 10861 } else if (Constraint == "wa" || Constraint == "wd" || 10862 Constraint == "wf") { 10863 return std::make_pair(0U, &PPC::VSRCRegClass); 10864 } else if (Constraint == "ws") { 10865 if (VT == MVT::f32) 10866 return std::make_pair(0U, &PPC::VSSRCRegClass); 10867 else 10868 return std::make_pair(0U, &PPC::VSFRCRegClass); 10869 } 10870 10871 std::pair<unsigned, const TargetRegisterClass *> R = 10872 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 10873 10874 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 10875 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 10876 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 10877 // register. 10878 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 10879 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 10880 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 10881 PPC::GPRCRegClass.contains(R.first)) 10882 return std::make_pair(TRI->getMatchingSuperReg(R.first, 10883 PPC::sub_32, &PPC::G8RCRegClass), 10884 &PPC::G8RCRegClass); 10885 10886 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 10887 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 10888 R.first = PPC::CR0; 10889 R.second = &PPC::CRRCRegClass; 10890 } 10891 10892 return R; 10893 } 10894 10895 10896 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 10897 /// vector. If it is invalid, don't add anything to Ops. 10898 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 10899 std::string &Constraint, 10900 std::vector<SDValue>&Ops, 10901 SelectionDAG &DAG) const { 10902 SDValue Result; 10903 10904 // Only support length 1 constraints. 10905 if (Constraint.length() > 1) return; 10906 10907 char Letter = Constraint[0]; 10908 switch (Letter) { 10909 default: break; 10910 case 'I': 10911 case 'J': 10912 case 'K': 10913 case 'L': 10914 case 'M': 10915 case 'N': 10916 case 'O': 10917 case 'P': { 10918 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 10919 if (!CST) return; // Must be an immediate to match. 10920 SDLoc dl(Op); 10921 int64_t Value = CST->getSExtValue(); 10922 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 10923 // numbers are printed as such. 10924 switch (Letter) { 10925 default: llvm_unreachable("Unknown constraint letter!"); 10926 case 'I': // "I" is a signed 16-bit constant. 10927 if (isInt<16>(Value)) 10928 Result = DAG.getTargetConstant(Value, dl, TCVT); 10929 break; 10930 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 10931 if (isShiftedUInt<16, 16>(Value)) 10932 Result = DAG.getTargetConstant(Value, dl, TCVT); 10933 break; 10934 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 10935 if (isShiftedInt<16, 16>(Value)) 10936 Result = DAG.getTargetConstant(Value, dl, TCVT); 10937 break; 10938 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 10939 if (isUInt<16>(Value)) 10940 Result = DAG.getTargetConstant(Value, dl, TCVT); 10941 break; 10942 case 'M': // "M" is a constant that is greater than 31. 10943 if (Value > 31) 10944 Result = DAG.getTargetConstant(Value, dl, TCVT); 10945 break; 10946 case 'N': // "N" is a positive constant that is an exact power of two. 10947 if (Value > 0 && isPowerOf2_64(Value)) 10948 Result = DAG.getTargetConstant(Value, dl, TCVT); 10949 break; 10950 case 'O': // "O" is the constant zero. 10951 if (Value == 0) 10952 Result = DAG.getTargetConstant(Value, dl, TCVT); 10953 break; 10954 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 10955 if (isInt<16>(-Value)) 10956 Result = DAG.getTargetConstant(Value, dl, TCVT); 10957 break; 10958 } 10959 break; 10960 } 10961 } 10962 10963 if (Result.getNode()) { 10964 Ops.push_back(Result); 10965 return; 10966 } 10967 10968 // Handle standard constraint letters. 10969 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 10970 } 10971 10972 // isLegalAddressingMode - Return true if the addressing mode represented 10973 // by AM is legal for this target, for a load/store of the specified type. 10974 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 10975 const AddrMode &AM, Type *Ty, 10976 unsigned AS) const { 10977 // PPC does not allow r+i addressing modes for vectors! 10978 if (Ty->isVectorTy() && AM.BaseOffs != 0) 10979 return false; 10980 10981 // PPC allows a sign-extended 16-bit immediate field. 10982 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 10983 return false; 10984 10985 // No global is ever allowed as a base. 10986 if (AM.BaseGV) 10987 return false; 10988 10989 // PPC only support r+r, 10990 switch (AM.Scale) { 10991 case 0: // "r+i" or just "i", depending on HasBaseReg. 10992 break; 10993 case 1: 10994 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 10995 return false; 10996 // Otherwise we have r+r or r+i. 10997 break; 10998 case 2: 10999 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 11000 return false; 11001 // Allow 2*r as r+r. 11002 break; 11003 default: 11004 // No other scales are supported. 11005 return false; 11006 } 11007 11008 return true; 11009 } 11010 11011 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 11012 SelectionDAG &DAG) const { 11013 MachineFunction &MF = DAG.getMachineFunction(); 11014 MachineFrameInfo *MFI = MF.getFrameInfo(); 11015 MFI->setReturnAddressIsTaken(true); 11016 11017 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 11018 return SDValue(); 11019 11020 SDLoc dl(Op); 11021 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11022 11023 // Make sure the function does not optimize away the store of the RA to 11024 // the stack. 11025 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 11026 FuncInfo->setLRStoreRequired(); 11027 bool isPPC64 = Subtarget.isPPC64(); 11028 auto PtrVT = getPointerTy(MF.getDataLayout()); 11029 11030 if (Depth > 0) { 11031 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11032 SDValue Offset = 11033 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 11034 isPPC64 ? MVT::i64 : MVT::i32); 11035 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11036 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 11037 MachinePointerInfo(), false, false, false, 0); 11038 } 11039 11040 // Just load the return address off the stack. 11041 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 11042 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 11043 MachinePointerInfo(), false, false, false, 0); 11044 } 11045 11046 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 11047 SelectionDAG &DAG) const { 11048 SDLoc dl(Op); 11049 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11050 11051 MachineFunction &MF = DAG.getMachineFunction(); 11052 MachineFrameInfo *MFI = MF.getFrameInfo(); 11053 MFI->setFrameAddressIsTaken(true); 11054 11055 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 11056 bool isPPC64 = PtrVT == MVT::i64; 11057 11058 // Naked functions never have a frame pointer, and so we use r1. For all 11059 // other functions, this decision must be delayed until during PEI. 11060 unsigned FrameReg; 11061 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 11062 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 11063 else 11064 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 11065 11066 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 11067 PtrVT); 11068 while (Depth--) 11069 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 11070 FrameAddr, MachinePointerInfo(), false, false, 11071 false, 0); 11072 return FrameAddr; 11073 } 11074 11075 // FIXME? Maybe this could be a TableGen attribute on some registers and 11076 // this table could be generated automatically from RegInfo. 11077 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 11078 SelectionDAG &DAG) const { 11079 bool isPPC64 = Subtarget.isPPC64(); 11080 bool isDarwinABI = Subtarget.isDarwinABI(); 11081 11082 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 11083 (!isPPC64 && VT != MVT::i32)) 11084 report_fatal_error("Invalid register global variable type"); 11085 11086 bool is64Bit = isPPC64 && VT == MVT::i64; 11087 unsigned Reg = StringSwitch<unsigned>(RegName) 11088 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 11089 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 11090 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 11091 (is64Bit ? PPC::X13 : PPC::R13)) 11092 .Default(0); 11093 11094 if (Reg) 11095 return Reg; 11096 report_fatal_error("Invalid register name global variable"); 11097 } 11098 11099 bool 11100 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11101 // The PowerPC target isn't yet aware of offsets. 11102 return false; 11103 } 11104 11105 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 11106 const CallInst &I, 11107 unsigned Intrinsic) const { 11108 11109 switch (Intrinsic) { 11110 case Intrinsic::ppc_qpx_qvlfd: 11111 case Intrinsic::ppc_qpx_qvlfs: 11112 case Intrinsic::ppc_qpx_qvlfcd: 11113 case Intrinsic::ppc_qpx_qvlfcs: 11114 case Intrinsic::ppc_qpx_qvlfiwa: 11115 case Intrinsic::ppc_qpx_qvlfiwz: 11116 case Intrinsic::ppc_altivec_lvx: 11117 case Intrinsic::ppc_altivec_lvxl: 11118 case Intrinsic::ppc_altivec_lvebx: 11119 case Intrinsic::ppc_altivec_lvehx: 11120 case Intrinsic::ppc_altivec_lvewx: 11121 case Intrinsic::ppc_vsx_lxvd2x: 11122 case Intrinsic::ppc_vsx_lxvw4x: { 11123 EVT VT; 11124 switch (Intrinsic) { 11125 case Intrinsic::ppc_altivec_lvebx: 11126 VT = MVT::i8; 11127 break; 11128 case Intrinsic::ppc_altivec_lvehx: 11129 VT = MVT::i16; 11130 break; 11131 case Intrinsic::ppc_altivec_lvewx: 11132 VT = MVT::i32; 11133 break; 11134 case Intrinsic::ppc_vsx_lxvd2x: 11135 VT = MVT::v2f64; 11136 break; 11137 case Intrinsic::ppc_qpx_qvlfd: 11138 VT = MVT::v4f64; 11139 break; 11140 case Intrinsic::ppc_qpx_qvlfs: 11141 VT = MVT::v4f32; 11142 break; 11143 case Intrinsic::ppc_qpx_qvlfcd: 11144 VT = MVT::v2f64; 11145 break; 11146 case Intrinsic::ppc_qpx_qvlfcs: 11147 VT = MVT::v2f32; 11148 break; 11149 default: 11150 VT = MVT::v4i32; 11151 break; 11152 } 11153 11154 Info.opc = ISD::INTRINSIC_W_CHAIN; 11155 Info.memVT = VT; 11156 Info.ptrVal = I.getArgOperand(0); 11157 Info.offset = -VT.getStoreSize()+1; 11158 Info.size = 2*VT.getStoreSize()-1; 11159 Info.align = 1; 11160 Info.vol = false; 11161 Info.readMem = true; 11162 Info.writeMem = false; 11163 return true; 11164 } 11165 case Intrinsic::ppc_qpx_qvlfda: 11166 case Intrinsic::ppc_qpx_qvlfsa: 11167 case Intrinsic::ppc_qpx_qvlfcda: 11168 case Intrinsic::ppc_qpx_qvlfcsa: 11169 case Intrinsic::ppc_qpx_qvlfiwaa: 11170 case Intrinsic::ppc_qpx_qvlfiwza: { 11171 EVT VT; 11172 switch (Intrinsic) { 11173 case Intrinsic::ppc_qpx_qvlfda: 11174 VT = MVT::v4f64; 11175 break; 11176 case Intrinsic::ppc_qpx_qvlfsa: 11177 VT = MVT::v4f32; 11178 break; 11179 case Intrinsic::ppc_qpx_qvlfcda: 11180 VT = MVT::v2f64; 11181 break; 11182 case Intrinsic::ppc_qpx_qvlfcsa: 11183 VT = MVT::v2f32; 11184 break; 11185 default: 11186 VT = MVT::v4i32; 11187 break; 11188 } 11189 11190 Info.opc = ISD::INTRINSIC_W_CHAIN; 11191 Info.memVT = VT; 11192 Info.ptrVal = I.getArgOperand(0); 11193 Info.offset = 0; 11194 Info.size = VT.getStoreSize(); 11195 Info.align = 1; 11196 Info.vol = false; 11197 Info.readMem = true; 11198 Info.writeMem = false; 11199 return true; 11200 } 11201 case Intrinsic::ppc_qpx_qvstfd: 11202 case Intrinsic::ppc_qpx_qvstfs: 11203 case Intrinsic::ppc_qpx_qvstfcd: 11204 case Intrinsic::ppc_qpx_qvstfcs: 11205 case Intrinsic::ppc_qpx_qvstfiw: 11206 case Intrinsic::ppc_altivec_stvx: 11207 case Intrinsic::ppc_altivec_stvxl: 11208 case Intrinsic::ppc_altivec_stvebx: 11209 case Intrinsic::ppc_altivec_stvehx: 11210 case Intrinsic::ppc_altivec_stvewx: 11211 case Intrinsic::ppc_vsx_stxvd2x: 11212 case Intrinsic::ppc_vsx_stxvw4x: { 11213 EVT VT; 11214 switch (Intrinsic) { 11215 case Intrinsic::ppc_altivec_stvebx: 11216 VT = MVT::i8; 11217 break; 11218 case Intrinsic::ppc_altivec_stvehx: 11219 VT = MVT::i16; 11220 break; 11221 case Intrinsic::ppc_altivec_stvewx: 11222 VT = MVT::i32; 11223 break; 11224 case Intrinsic::ppc_vsx_stxvd2x: 11225 VT = MVT::v2f64; 11226 break; 11227 case Intrinsic::ppc_qpx_qvstfd: 11228 VT = MVT::v4f64; 11229 break; 11230 case Intrinsic::ppc_qpx_qvstfs: 11231 VT = MVT::v4f32; 11232 break; 11233 case Intrinsic::ppc_qpx_qvstfcd: 11234 VT = MVT::v2f64; 11235 break; 11236 case Intrinsic::ppc_qpx_qvstfcs: 11237 VT = MVT::v2f32; 11238 break; 11239 default: 11240 VT = MVT::v4i32; 11241 break; 11242 } 11243 11244 Info.opc = ISD::INTRINSIC_VOID; 11245 Info.memVT = VT; 11246 Info.ptrVal = I.getArgOperand(1); 11247 Info.offset = -VT.getStoreSize()+1; 11248 Info.size = 2*VT.getStoreSize()-1; 11249 Info.align = 1; 11250 Info.vol = false; 11251 Info.readMem = false; 11252 Info.writeMem = true; 11253 return true; 11254 } 11255 case Intrinsic::ppc_qpx_qvstfda: 11256 case Intrinsic::ppc_qpx_qvstfsa: 11257 case Intrinsic::ppc_qpx_qvstfcda: 11258 case Intrinsic::ppc_qpx_qvstfcsa: 11259 case Intrinsic::ppc_qpx_qvstfiwa: { 11260 EVT VT; 11261 switch (Intrinsic) { 11262 case Intrinsic::ppc_qpx_qvstfda: 11263 VT = MVT::v4f64; 11264 break; 11265 case Intrinsic::ppc_qpx_qvstfsa: 11266 VT = MVT::v4f32; 11267 break; 11268 case Intrinsic::ppc_qpx_qvstfcda: 11269 VT = MVT::v2f64; 11270 break; 11271 case Intrinsic::ppc_qpx_qvstfcsa: 11272 VT = MVT::v2f32; 11273 break; 11274 default: 11275 VT = MVT::v4i32; 11276 break; 11277 } 11278 11279 Info.opc = ISD::INTRINSIC_VOID; 11280 Info.memVT = VT; 11281 Info.ptrVal = I.getArgOperand(1); 11282 Info.offset = 0; 11283 Info.size = VT.getStoreSize(); 11284 Info.align = 1; 11285 Info.vol = false; 11286 Info.readMem = false; 11287 Info.writeMem = true; 11288 return true; 11289 } 11290 default: 11291 break; 11292 } 11293 11294 return false; 11295 } 11296 11297 /// getOptimalMemOpType - Returns the target specific optimal type for load 11298 /// and store operations as a result of memset, memcpy, and memmove 11299 /// lowering. If DstAlign is zero that means it's safe to destination 11300 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 11301 /// means there isn't a need to check it against alignment requirement, 11302 /// probably because the source does not need to be loaded. If 'IsMemset' is 11303 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 11304 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 11305 /// source is constant so it does not need to be loaded. 11306 /// It returns EVT::Other if the type should be determined using generic 11307 /// target-independent logic. 11308 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 11309 unsigned DstAlign, unsigned SrcAlign, 11310 bool IsMemset, bool ZeroMemset, 11311 bool MemcpyStrSrc, 11312 MachineFunction &MF) const { 11313 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 11314 const Function *F = MF.getFunction(); 11315 // When expanding a memset, require at least two QPX instructions to cover 11316 // the cost of loading the value to be stored from the constant pool. 11317 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 11318 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 11319 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 11320 return MVT::v4f64; 11321 } 11322 11323 // We should use Altivec/VSX loads and stores when available. For unaligned 11324 // addresses, unaligned VSX loads are only fast starting with the P8. 11325 if (Subtarget.hasAltivec() && Size >= 16 && 11326 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 11327 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 11328 return MVT::v4i32; 11329 } 11330 11331 if (Subtarget.isPPC64()) { 11332 return MVT::i64; 11333 } 11334 11335 return MVT::i32; 11336 } 11337 11338 /// \brief Returns true if it is beneficial to convert a load of a constant 11339 /// to just the constant itself. 11340 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 11341 Type *Ty) const { 11342 assert(Ty->isIntegerTy()); 11343 11344 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 11345 if (BitSize == 0 || BitSize > 64) 11346 return false; 11347 return true; 11348 } 11349 11350 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11351 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11352 return false; 11353 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11354 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11355 return NumBits1 == 64 && NumBits2 == 32; 11356 } 11357 11358 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11359 if (!VT1.isInteger() || !VT2.isInteger()) 11360 return false; 11361 unsigned NumBits1 = VT1.getSizeInBits(); 11362 unsigned NumBits2 = VT2.getSizeInBits(); 11363 return NumBits1 == 64 && NumBits2 == 32; 11364 } 11365 11366 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 11367 // Generally speaking, zexts are not free, but they are free when they can be 11368 // folded with other operations. 11369 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 11370 EVT MemVT = LD->getMemoryVT(); 11371 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 11372 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 11373 (LD->getExtensionType() == ISD::NON_EXTLOAD || 11374 LD->getExtensionType() == ISD::ZEXTLOAD)) 11375 return true; 11376 } 11377 11378 // FIXME: Add other cases... 11379 // - 32-bit shifts with a zext to i64 11380 // - zext after ctlz, bswap, etc. 11381 // - zext after and by a constant mask 11382 11383 return TargetLowering::isZExtFree(Val, VT2); 11384 } 11385 11386 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 11387 assert(VT.isFloatingPoint()); 11388 return true; 11389 } 11390 11391 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11392 return isInt<16>(Imm) || isUInt<16>(Imm); 11393 } 11394 11395 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11396 return isInt<16>(Imm) || isUInt<16>(Imm); 11397 } 11398 11399 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 11400 unsigned, 11401 unsigned, 11402 bool *Fast) const { 11403 if (DisablePPCUnaligned) 11404 return false; 11405 11406 // PowerPC supports unaligned memory access for simple non-vector types. 11407 // Although accessing unaligned addresses is not as efficient as accessing 11408 // aligned addresses, it is generally more efficient than manual expansion, 11409 // and generally only traps for software emulation when crossing page 11410 // boundaries. 11411 11412 if (!VT.isSimple()) 11413 return false; 11414 11415 if (VT.getSimpleVT().isVector()) { 11416 if (Subtarget.hasVSX()) { 11417 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 11418 VT != MVT::v4f32 && VT != MVT::v4i32) 11419 return false; 11420 } else { 11421 return false; 11422 } 11423 } 11424 11425 if (VT == MVT::ppcf128) 11426 return false; 11427 11428 if (Fast) 11429 *Fast = true; 11430 11431 return true; 11432 } 11433 11434 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 11435 VT = VT.getScalarType(); 11436 11437 if (!VT.isSimple()) 11438 return false; 11439 11440 switch (VT.getSimpleVT().SimpleTy) { 11441 case MVT::f32: 11442 case MVT::f64: 11443 return true; 11444 default: 11445 break; 11446 } 11447 11448 return false; 11449 } 11450 11451 const MCPhysReg * 11452 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 11453 // LR is a callee-save register, but we must treat it as clobbered by any call 11454 // site. Hence we include LR in the scratch registers, which are in turn added 11455 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 11456 // to CTR, which is used by any indirect call. 11457 static const MCPhysReg ScratchRegs[] = { 11458 PPC::X12, PPC::LR8, PPC::CTR8, 0 11459 }; 11460 11461 return ScratchRegs; 11462 } 11463 11464 bool 11465 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 11466 EVT VT , unsigned DefinedValues) const { 11467 if (VT == MVT::v2i64) 11468 return false; 11469 11470 if (Subtarget.hasQPX()) { 11471 if (VT == MVT::v4f32 || VT == MVT::v4f64 || VT == MVT::v4i1) 11472 return true; 11473 } 11474 11475 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 11476 } 11477 11478 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 11479 if (DisableILPPref || Subtarget.enableMachineScheduler()) 11480 return TargetLowering::getSchedulingPreference(N); 11481 11482 return Sched::ILP; 11483 } 11484 11485 // Create a fast isel object. 11486 FastISel * 11487 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 11488 const TargetLibraryInfo *LibInfo) const { 11489 return PPC::createFastISel(FuncInfo, LibInfo); 11490 } 11491