1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCCallingConv.h" 17 #include "PPCMachineFunctionInfo.h" 18 #include "PPCPerfectShuffle.h" 19 #include "PPCTargetMachine.h" 20 #include "PPCTargetObjectFile.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/StringSwitch.h" 23 #include "llvm/ADT/Triple.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineLoopInfo.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 32 #include "llvm/IR/CallingConv.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include "llvm/Target/TargetOptions.h" 42 43 using namespace llvm; 44 45 // FIXME: Remove this once soft-float is supported. 46 static cl::opt<bool> DisablePPCFloatInVariadic("disable-ppc-float-in-variadic", 47 cl::desc("disable saving float registers for va_start on PPC"), cl::Hidden); 48 49 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 50 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 51 52 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 53 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 54 55 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 56 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 57 58 // FIXME: Remove this once the bug has been fixed! 59 extern cl::opt<bool> ANDIGlueBug; 60 61 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 62 const PPCSubtarget &STI) 63 : TargetLowering(TM), Subtarget(STI) { 64 // Use _setjmp/_longjmp instead of setjmp/longjmp. 65 setUseUnderscoreSetJmp(true); 66 setUseUnderscoreLongJmp(true); 67 68 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 69 // arguments are at least 4/8 bytes aligned. 70 bool isPPC64 = Subtarget.isPPC64(); 71 setMinStackArgumentAlignment(isPPC64 ? 8:4); 72 73 // Set up the register classes. 74 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 75 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 76 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 77 78 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 79 for (MVT VT : MVT::integer_valuetypes()) { 80 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 81 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 82 } 83 84 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 85 86 // PowerPC has pre-inc load and store's. 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 91 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 92 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 93 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 96 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 97 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 98 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 99 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 100 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 101 102 if (Subtarget.useCRBits()) { 103 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 104 105 if (isPPC64 || Subtarget.hasFPCVT()) { 106 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 107 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 108 isPPC64 ? MVT::i64 : MVT::i32); 109 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 110 AddPromotedToType (ISD::UINT_TO_FP, MVT::i1, 111 isPPC64 ? MVT::i64 : MVT::i32); 112 } else { 113 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 114 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 115 } 116 117 // PowerPC does not support direct load / store of condition registers 118 setOperationAction(ISD::LOAD, MVT::i1, Custom); 119 setOperationAction(ISD::STORE, MVT::i1, Custom); 120 121 // FIXME: Remove this once the ANDI glue bug is fixed: 122 if (ANDIGlueBug) 123 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 124 125 for (MVT VT : MVT::integer_valuetypes()) { 126 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 127 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 128 setTruncStoreAction(VT, MVT::i1, Expand); 129 } 130 131 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 132 } 133 134 // This is used in the ppcf128->int sequence. Note it has different semantics 135 // from FP_ROUND: that rounds to nearest, this rounds to zero. 136 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 137 138 // We do not currently implement these libm ops for PowerPC. 139 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 140 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 141 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 142 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 143 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 144 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 145 146 // PowerPC has no SREM/UREM instructions 147 setOperationAction(ISD::SREM, MVT::i32, Expand); 148 setOperationAction(ISD::UREM, MVT::i32, Expand); 149 setOperationAction(ISD::SREM, MVT::i64, Expand); 150 setOperationAction(ISD::UREM, MVT::i64, Expand); 151 152 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 153 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 154 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 155 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 156 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 157 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 158 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 159 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 160 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 161 162 // We don't support sin/cos/sqrt/fmod/pow 163 setOperationAction(ISD::FSIN , MVT::f64, Expand); 164 setOperationAction(ISD::FCOS , MVT::f64, Expand); 165 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 166 setOperationAction(ISD::FREM , MVT::f64, Expand); 167 setOperationAction(ISD::FPOW , MVT::f64, Expand); 168 setOperationAction(ISD::FMA , MVT::f64, Legal); 169 setOperationAction(ISD::FSIN , MVT::f32, Expand); 170 setOperationAction(ISD::FCOS , MVT::f32, Expand); 171 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 172 setOperationAction(ISD::FREM , MVT::f32, Expand); 173 setOperationAction(ISD::FPOW , MVT::f32, Expand); 174 setOperationAction(ISD::FMA , MVT::f32, Legal); 175 176 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 177 178 // If we're enabling GP optimizations, use hardware square root 179 if (!Subtarget.hasFSQRT() && 180 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 181 Subtarget.hasFRE())) 182 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 183 184 if (!Subtarget.hasFSQRT() && 185 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 186 Subtarget.hasFRES())) 187 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 188 189 if (Subtarget.hasFCPSGN()) { 190 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 191 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 192 } else { 193 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 194 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 195 } 196 197 if (Subtarget.hasFPRND()) { 198 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 199 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 200 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 201 setOperationAction(ISD::FROUND, MVT::f64, Legal); 202 203 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 204 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 205 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 206 setOperationAction(ISD::FROUND, MVT::f32, Legal); 207 } 208 209 // PowerPC does not have BSWAP, CTPOP or CTTZ 210 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 211 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 212 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 213 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 214 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 215 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 216 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 217 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 218 219 if (Subtarget.hasPOPCNTD()) { 220 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 221 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 222 } else { 223 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 224 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 225 } 226 227 // PowerPC does not have ROTR 228 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 229 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 230 231 if (!Subtarget.useCRBits()) { 232 // PowerPC does not have Select 233 setOperationAction(ISD::SELECT, MVT::i32, Expand); 234 setOperationAction(ISD::SELECT, MVT::i64, Expand); 235 setOperationAction(ISD::SELECT, MVT::f32, Expand); 236 setOperationAction(ISD::SELECT, MVT::f64, Expand); 237 } 238 239 // PowerPC wants to turn select_cc of FP into fsel when possible. 240 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 241 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 242 243 // PowerPC wants to optimize integer setcc a bit 244 if (!Subtarget.useCRBits()) 245 setOperationAction(ISD::SETCC, MVT::i32, Custom); 246 247 // PowerPC does not have BRCOND which requires SetCC 248 if (!Subtarget.useCRBits()) 249 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 250 251 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 252 253 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 254 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 255 256 // PowerPC does not have [U|S]INT_TO_FP 257 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 258 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 259 260 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 261 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 262 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 263 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 264 265 // We cannot sextinreg(i1). Expand to shifts. 266 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 267 268 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 269 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 270 // support continuation, user-level threading, and etc.. As a result, no 271 // other SjLj exception interfaces are implemented and please don't build 272 // your own exception handling based on them. 273 // LLVM/Clang supports zero-cost DWARF exception handling. 274 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 275 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 276 277 // We want to legalize GlobalAddress and ConstantPool nodes into the 278 // appropriate instructions to materialize the address. 279 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 280 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 281 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 282 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 283 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 284 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 285 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 286 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 287 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 288 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 289 290 // TRAP is legal. 291 setOperationAction(ISD::TRAP, MVT::Other, Legal); 292 293 // TRAMPOLINE is custom lowered. 294 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 295 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 296 297 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 298 setOperationAction(ISD::VASTART , MVT::Other, Custom); 299 300 if (Subtarget.isSVR4ABI()) { 301 if (isPPC64) { 302 // VAARG always uses double-word chunks, so promote anything smaller. 303 setOperationAction(ISD::VAARG, MVT::i1, Promote); 304 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 305 setOperationAction(ISD::VAARG, MVT::i8, Promote); 306 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 307 setOperationAction(ISD::VAARG, MVT::i16, Promote); 308 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 309 setOperationAction(ISD::VAARG, MVT::i32, Promote); 310 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 311 setOperationAction(ISD::VAARG, MVT::Other, Expand); 312 } else { 313 // VAARG is custom lowered with the 32-bit SVR4 ABI. 314 setOperationAction(ISD::VAARG, MVT::Other, Custom); 315 setOperationAction(ISD::VAARG, MVT::i64, Custom); 316 } 317 } else 318 setOperationAction(ISD::VAARG, MVT::Other, Expand); 319 320 if (Subtarget.isSVR4ABI() && !isPPC64) 321 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 322 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 323 else 324 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 325 326 // Use the default implementation. 327 setOperationAction(ISD::VAEND , MVT::Other, Expand); 328 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 329 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 330 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 331 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 332 333 // We want to custom lower some of our intrinsics. 334 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 335 336 // To handle counter-based loop conditions. 337 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 338 339 // Comparisons that require checking two conditions. 340 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 341 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 342 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 343 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 344 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 345 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 346 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 347 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 348 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 349 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 350 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 351 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 352 353 if (Subtarget.has64BitSupport()) { 354 // They also have instructions for converting between i64 and fp. 355 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 356 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 357 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 358 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 359 // This is just the low 32 bits of a (signed) fp->i64 conversion. 360 // We cannot do this with Promote because i64 is not a legal type. 361 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 362 363 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 364 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 365 } else { 366 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 367 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 368 } 369 370 // With the instructions enabled under FPCVT, we can do everything. 371 if (Subtarget.hasFPCVT()) { 372 if (Subtarget.has64BitSupport()) { 373 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 374 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 375 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 376 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 377 } 378 379 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 380 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 381 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 382 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 383 } 384 385 if (Subtarget.use64BitRegs()) { 386 // 64-bit PowerPC implementations can support i64 types directly 387 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 388 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 389 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 390 // 64-bit PowerPC wants to expand i128 shifts itself. 391 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 392 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 393 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 394 } else { 395 // 32-bit PowerPC wants to expand i64 shifts itself. 396 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 397 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 398 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 399 } 400 401 if (Subtarget.hasAltivec()) { 402 // First set operation action for all vector types to expand. Then we 403 // will selectively turn on ones that can be effectively codegen'd. 404 for (MVT VT : MVT::vector_valuetypes()) { 405 // add/sub are legal for all supported vector VT's. 406 // This check is temporary until support for quadword add/sub is added 407 if (VT.SimpleTy != MVT::v1i128) { 408 setOperationAction(ISD::ADD , VT, Legal); 409 setOperationAction(ISD::SUB , VT, Legal); 410 } 411 else { 412 setOperationAction(ISD::ADD , VT, Expand); 413 setOperationAction(ISD::SUB , VT, Expand); 414 } 415 416 // Vector instructions introduced in P8 417 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 418 setOperationAction(ISD::CTPOP, VT, Legal); 419 setOperationAction(ISD::CTLZ, VT, Legal); 420 } 421 else { 422 setOperationAction(ISD::CTPOP, VT, Expand); 423 setOperationAction(ISD::CTLZ, VT, Expand); 424 } 425 426 // We promote all shuffles to v16i8. 427 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 428 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 429 430 // We promote all non-typed operations to v4i32. 431 setOperationAction(ISD::AND , VT, Promote); 432 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 433 setOperationAction(ISD::OR , VT, Promote); 434 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 435 setOperationAction(ISD::XOR , VT, Promote); 436 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 437 setOperationAction(ISD::LOAD , VT, Promote); 438 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 439 setOperationAction(ISD::SELECT, VT, Promote); 440 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 441 setOperationAction(ISD::STORE, VT, Promote); 442 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 443 444 // No other operations are legal. 445 setOperationAction(ISD::MUL , VT, Expand); 446 setOperationAction(ISD::SDIV, VT, Expand); 447 setOperationAction(ISD::SREM, VT, Expand); 448 setOperationAction(ISD::UDIV, VT, Expand); 449 setOperationAction(ISD::UREM, VT, Expand); 450 setOperationAction(ISD::FDIV, VT, Expand); 451 setOperationAction(ISD::FREM, VT, Expand); 452 setOperationAction(ISD::FNEG, VT, Expand); 453 setOperationAction(ISD::FSQRT, VT, Expand); 454 setOperationAction(ISD::FLOG, VT, Expand); 455 setOperationAction(ISD::FLOG10, VT, Expand); 456 setOperationAction(ISD::FLOG2, VT, Expand); 457 setOperationAction(ISD::FEXP, VT, Expand); 458 setOperationAction(ISD::FEXP2, VT, Expand); 459 setOperationAction(ISD::FSIN, VT, Expand); 460 setOperationAction(ISD::FCOS, VT, Expand); 461 setOperationAction(ISD::FABS, VT, Expand); 462 setOperationAction(ISD::FPOWI, VT, Expand); 463 setOperationAction(ISD::FFLOOR, VT, Expand); 464 setOperationAction(ISD::FCEIL, VT, Expand); 465 setOperationAction(ISD::FTRUNC, VT, Expand); 466 setOperationAction(ISD::FRINT, VT, Expand); 467 setOperationAction(ISD::FNEARBYINT, VT, Expand); 468 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 469 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 470 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 471 setOperationAction(ISD::MULHU, VT, Expand); 472 setOperationAction(ISD::MULHS, VT, Expand); 473 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 474 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 475 setOperationAction(ISD::UDIVREM, VT, Expand); 476 setOperationAction(ISD::SDIVREM, VT, Expand); 477 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 478 setOperationAction(ISD::FPOW, VT, Expand); 479 setOperationAction(ISD::BSWAP, VT, Expand); 480 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 481 setOperationAction(ISD::CTTZ, VT, Expand); 482 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 483 setOperationAction(ISD::VSELECT, VT, Expand); 484 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 485 486 for (MVT InnerVT : MVT::vector_valuetypes()) { 487 setTruncStoreAction(VT, InnerVT, Expand); 488 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 489 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 490 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 491 } 492 } 493 494 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 495 // with merges, splats, etc. 496 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 497 498 setOperationAction(ISD::AND , MVT::v4i32, Legal); 499 setOperationAction(ISD::OR , MVT::v4i32, Legal); 500 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 501 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 502 setOperationAction(ISD::SELECT, MVT::v4i32, 503 Subtarget.useCRBits() ? Legal : Expand); 504 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 505 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 506 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 507 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 508 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 509 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 510 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 511 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 512 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 513 514 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 515 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 516 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 517 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 518 519 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 520 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 521 522 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 523 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 524 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 525 } 526 527 528 if (Subtarget.hasP8Altivec()) 529 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 530 else 531 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 532 533 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 534 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 535 536 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 537 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 538 539 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 540 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 541 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 542 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 543 544 // Altivec does not contain unordered floating-point compare instructions 545 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 546 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 547 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 548 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 549 550 if (Subtarget.hasVSX()) { 551 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 552 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 553 554 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 555 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 556 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 557 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 558 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 559 560 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 561 562 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 563 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 564 565 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 566 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 567 568 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 569 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 570 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 571 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 572 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 573 574 // Share the Altivec comparison restrictions. 575 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 576 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 577 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 578 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 579 580 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 581 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 582 583 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 584 585 if (Subtarget.hasP8Vector()) 586 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 587 588 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 589 590 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 591 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 592 593 if (Subtarget.hasP8Altivec()) { 594 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 595 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 596 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 597 598 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 599 } 600 else { 601 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 602 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 603 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 604 605 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 606 607 // VSX v2i64 only supports non-arithmetic operations. 608 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 609 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 610 } 611 612 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 613 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 614 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 615 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 616 617 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 618 619 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 620 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 621 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 622 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 623 624 // Vector operation legalization checks the result type of 625 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 626 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 627 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 628 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 629 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 630 631 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 632 } 633 634 if (Subtarget.hasP8Altivec()) { 635 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 636 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 637 } 638 } 639 640 if (Subtarget.hasQPX()) { 641 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 642 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 643 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 644 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 645 646 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 647 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 648 649 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 650 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 651 652 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 653 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 654 655 if (!Subtarget.useCRBits()) 656 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 657 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 658 659 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 660 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 661 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 662 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 663 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 664 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 665 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 666 667 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 668 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 669 670 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 671 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 672 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 673 674 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 675 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 676 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 677 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 678 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 679 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 680 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 681 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 682 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 683 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 684 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 685 686 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 687 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 688 689 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 690 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 691 692 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 693 694 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 695 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 696 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 697 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 698 699 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 700 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 701 702 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 703 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 704 705 if (!Subtarget.useCRBits()) 706 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 707 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 708 709 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 710 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 711 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 712 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 713 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 714 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 715 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 716 717 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 718 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 719 720 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 721 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 722 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 723 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 724 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 725 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 726 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 727 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 728 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 729 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 730 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 731 732 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 733 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 734 735 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 736 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 737 738 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 739 740 setOperationAction(ISD::AND , MVT::v4i1, Legal); 741 setOperationAction(ISD::OR , MVT::v4i1, Legal); 742 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 743 744 if (!Subtarget.useCRBits()) 745 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 746 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 747 748 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 749 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 750 751 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 752 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 753 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 754 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 755 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 756 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 757 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 758 759 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 760 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 761 762 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 763 764 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 765 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 766 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 767 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 768 769 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 770 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 771 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 772 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 773 774 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 775 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 776 777 // These need to set FE_INEXACT, and so cannot be vectorized here. 778 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 779 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 780 781 if (TM.Options.UnsafeFPMath) { 782 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 783 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 784 785 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 786 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 787 } else { 788 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 789 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 790 791 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 792 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 793 } 794 } 795 796 if (Subtarget.has64BitSupport()) 797 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 798 799 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 800 801 if (!isPPC64) { 802 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 803 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 804 } 805 806 setBooleanContents(ZeroOrOneBooleanContent); 807 808 if (Subtarget.hasAltivec()) { 809 // Altivec instructions set fields to all zeros or all ones. 810 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 811 } 812 813 if (!isPPC64) { 814 // These libcalls are not available in 32-bit. 815 setLibcallName(RTLIB::SHL_I128, nullptr); 816 setLibcallName(RTLIB::SRL_I128, nullptr); 817 setLibcallName(RTLIB::SRA_I128, nullptr); 818 } 819 820 if (isPPC64) { 821 setStackPointerRegisterToSaveRestore(PPC::X1); 822 setExceptionPointerRegister(PPC::X3); 823 setExceptionSelectorRegister(PPC::X4); 824 } else { 825 setStackPointerRegisterToSaveRestore(PPC::R1); 826 setExceptionPointerRegister(PPC::R3); 827 setExceptionSelectorRegister(PPC::R4); 828 } 829 830 // We have target-specific dag combine patterns for the following nodes: 831 setTargetDAGCombine(ISD::SINT_TO_FP); 832 if (Subtarget.hasFPCVT()) 833 setTargetDAGCombine(ISD::UINT_TO_FP); 834 setTargetDAGCombine(ISD::LOAD); 835 setTargetDAGCombine(ISD::STORE); 836 setTargetDAGCombine(ISD::BR_CC); 837 if (Subtarget.useCRBits()) 838 setTargetDAGCombine(ISD::BRCOND); 839 setTargetDAGCombine(ISD::BSWAP); 840 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 841 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 842 setTargetDAGCombine(ISD::INTRINSIC_VOID); 843 844 setTargetDAGCombine(ISD::SIGN_EXTEND); 845 setTargetDAGCombine(ISD::ZERO_EXTEND); 846 setTargetDAGCombine(ISD::ANY_EXTEND); 847 848 if (Subtarget.useCRBits()) { 849 setTargetDAGCombine(ISD::TRUNCATE); 850 setTargetDAGCombine(ISD::SETCC); 851 setTargetDAGCombine(ISD::SELECT_CC); 852 } 853 854 // Use reciprocal estimates. 855 if (TM.Options.UnsafeFPMath) { 856 setTargetDAGCombine(ISD::FDIV); 857 setTargetDAGCombine(ISD::FSQRT); 858 } 859 860 // Darwin long double math library functions have $LDBL128 appended. 861 if (Subtarget.isDarwin()) { 862 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 863 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 864 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 865 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 866 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 867 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 868 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 869 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 870 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 871 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 872 } 873 874 // With 32 condition bits, we don't need to sink (and duplicate) compares 875 // aggressively in CodeGenPrep. 876 if (Subtarget.useCRBits()) { 877 setHasMultipleConditionRegisters(); 878 setJumpIsExpensive(); 879 } 880 881 setMinFunctionAlignment(2); 882 if (Subtarget.isDarwin()) 883 setPrefFunctionAlignment(4); 884 885 switch (Subtarget.getDarwinDirective()) { 886 default: break; 887 case PPC::DIR_970: 888 case PPC::DIR_A2: 889 case PPC::DIR_E500mc: 890 case PPC::DIR_E5500: 891 case PPC::DIR_PWR4: 892 case PPC::DIR_PWR5: 893 case PPC::DIR_PWR5X: 894 case PPC::DIR_PWR6: 895 case PPC::DIR_PWR6X: 896 case PPC::DIR_PWR7: 897 case PPC::DIR_PWR8: 898 setPrefFunctionAlignment(4); 899 setPrefLoopAlignment(4); 900 break; 901 } 902 903 setInsertFencesForAtomic(true); 904 905 if (Subtarget.enableMachineScheduler()) 906 setSchedulingPreference(Sched::Source); 907 else 908 setSchedulingPreference(Sched::Hybrid); 909 910 computeRegisterProperties(STI.getRegisterInfo()); 911 912 // The Freescale cores do better with aggressive inlining of memcpy and 913 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 914 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 915 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 916 MaxStoresPerMemset = 32; 917 MaxStoresPerMemsetOptSize = 16; 918 MaxStoresPerMemcpy = 32; 919 MaxStoresPerMemcpyOptSize = 8; 920 MaxStoresPerMemmove = 32; 921 MaxStoresPerMemmoveOptSize = 8; 922 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 923 // The A2 also benefits from (very) aggressive inlining of memcpy and 924 // friends. The overhead of a the function call, even when warm, can be 925 // over one hundred cycles. 926 MaxStoresPerMemset = 128; 927 MaxStoresPerMemcpy = 128; 928 MaxStoresPerMemmove = 128; 929 } 930 } 931 932 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 933 /// the desired ByVal argument alignment. 934 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 935 unsigned MaxMaxAlign) { 936 if (MaxAlign == MaxMaxAlign) 937 return; 938 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 939 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 940 MaxAlign = 32; 941 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 942 MaxAlign = 16; 943 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 944 unsigned EltAlign = 0; 945 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 946 if (EltAlign > MaxAlign) 947 MaxAlign = EltAlign; 948 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 949 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 950 unsigned EltAlign = 0; 951 getMaxByValAlign(STy->getElementType(i), EltAlign, MaxMaxAlign); 952 if (EltAlign > MaxAlign) 953 MaxAlign = EltAlign; 954 if (MaxAlign == MaxMaxAlign) 955 break; 956 } 957 } 958 } 959 960 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 961 /// function arguments in the caller parameter area. 962 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { 963 // Darwin passes everything on 4 byte boundary. 964 if (Subtarget.isDarwin()) 965 return 4; 966 967 // 16byte and wider vectors are passed on 16byte boundary. 968 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 969 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 970 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 971 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 972 return Align; 973 } 974 975 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 976 switch ((PPCISD::NodeType)Opcode) { 977 case PPCISD::FIRST_NUMBER: break; 978 case PPCISD::FSEL: return "PPCISD::FSEL"; 979 case PPCISD::FCFID: return "PPCISD::FCFID"; 980 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 981 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 982 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 983 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 984 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 985 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 986 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 987 case PPCISD::FRE: return "PPCISD::FRE"; 988 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 989 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 990 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 991 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 992 case PPCISD::VPERM: return "PPCISD::VPERM"; 993 case PPCISD::CMPB: return "PPCISD::CMPB"; 994 case PPCISD::Hi: return "PPCISD::Hi"; 995 case PPCISD::Lo: return "PPCISD::Lo"; 996 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 997 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 998 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 999 case PPCISD::SRL: return "PPCISD::SRL"; 1000 case PPCISD::SRA: return "PPCISD::SRA"; 1001 case PPCISD::SHL: return "PPCISD::SHL"; 1002 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1003 case PPCISD::CALL: return "PPCISD::CALL"; 1004 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1005 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1006 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1007 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1008 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1009 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1010 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1011 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1012 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1013 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1014 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1015 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1016 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1017 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1018 case PPCISD::VCMP: return "PPCISD::VCMP"; 1019 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1020 case PPCISD::LBRX: return "PPCISD::LBRX"; 1021 case PPCISD::STBRX: return "PPCISD::STBRX"; 1022 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1023 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1024 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1025 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1026 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1027 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1028 case PPCISD::BDZ: return "PPCISD::BDZ"; 1029 case PPCISD::MFFS: return "PPCISD::MFFS"; 1030 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1031 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1032 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1033 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1034 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1035 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1036 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1037 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1038 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1039 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1040 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1041 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1042 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1043 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1044 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1045 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1046 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1047 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1048 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1049 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1050 case PPCISD::SC: return "PPCISD::SC"; 1051 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1052 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1053 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1054 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1055 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1056 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1057 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1058 } 1059 return nullptr; 1060 } 1061 1062 EVT PPCTargetLowering::getSetCCResultType(LLVMContext &C, EVT VT) const { 1063 if (!VT.isVector()) 1064 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1065 1066 if (Subtarget.hasQPX()) 1067 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1068 1069 return VT.changeVectorElementTypeToInteger(); 1070 } 1071 1072 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1073 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1074 return true; 1075 } 1076 1077 //===----------------------------------------------------------------------===// 1078 // Node matching predicates, for use by the tblgen matching code. 1079 //===----------------------------------------------------------------------===// 1080 1081 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1082 static bool isFloatingPointZero(SDValue Op) { 1083 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1084 return CFP->getValueAPF().isZero(); 1085 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1086 // Maybe this has already been legalized into the constant pool? 1087 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1088 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1089 return CFP->getValueAPF().isZero(); 1090 } 1091 return false; 1092 } 1093 1094 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1095 /// true if Op is undef or if it matches the specified value. 1096 static bool isConstantOrUndef(int Op, int Val) { 1097 return Op < 0 || Op == Val; 1098 } 1099 1100 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1101 /// VPKUHUM instruction. 1102 /// The ShuffleKind distinguishes between big-endian operations with 1103 /// two different inputs (0), either-endian operations with two identical 1104 /// inputs (1), and little-endian operations with two different inputs (2). 1105 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1106 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1107 SelectionDAG &DAG) { 1108 bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian(); 1109 if (ShuffleKind == 0) { 1110 if (IsLE) 1111 return false; 1112 for (unsigned i = 0; i != 16; ++i) 1113 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1114 return false; 1115 } else if (ShuffleKind == 2) { 1116 if (!IsLE) 1117 return false; 1118 for (unsigned i = 0; i != 16; ++i) 1119 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1120 return false; 1121 } else if (ShuffleKind == 1) { 1122 unsigned j = IsLE ? 0 : 1; 1123 for (unsigned i = 0; i != 8; ++i) 1124 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1125 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1126 return false; 1127 } 1128 return true; 1129 } 1130 1131 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1132 /// VPKUWUM instruction. 1133 /// The ShuffleKind distinguishes between big-endian operations with 1134 /// two different inputs (0), either-endian operations with two identical 1135 /// inputs (1), and little-endian operations with two different inputs (2). 1136 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1137 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1138 SelectionDAG &DAG) { 1139 bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian(); 1140 if (ShuffleKind == 0) { 1141 if (IsLE) 1142 return false; 1143 for (unsigned i = 0; i != 16; i += 2) 1144 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1145 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1146 return false; 1147 } else if (ShuffleKind == 2) { 1148 if (!IsLE) 1149 return false; 1150 for (unsigned i = 0; i != 16; i += 2) 1151 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1152 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1153 return false; 1154 } else if (ShuffleKind == 1) { 1155 unsigned j = IsLE ? 0 : 2; 1156 for (unsigned i = 0; i != 8; i += 2) 1157 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1158 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1159 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1160 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1161 return false; 1162 } 1163 return true; 1164 } 1165 1166 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1167 /// VPKUDUM instruction. 1168 /// The ShuffleKind distinguishes between big-endian operations with 1169 /// two different inputs (0), either-endian operations with two identical 1170 /// inputs (1), and little-endian operations with two different inputs (2). 1171 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1172 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1173 SelectionDAG &DAG) { 1174 bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian(); 1175 if (ShuffleKind == 0) { 1176 if (IsLE) 1177 return false; 1178 for (unsigned i = 0; i != 16; i += 4) 1179 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1180 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1181 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1182 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1183 return false; 1184 } else if (ShuffleKind == 2) { 1185 if (!IsLE) 1186 return false; 1187 for (unsigned i = 0; i != 16; i += 4) 1188 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1189 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1190 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1191 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1192 return false; 1193 } else if (ShuffleKind == 1) { 1194 unsigned j = IsLE ? 0 : 4; 1195 for (unsigned i = 0; i != 8; i += 4) 1196 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1197 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1198 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1199 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1200 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1201 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1202 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1203 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1204 return false; 1205 } 1206 return true; 1207 } 1208 1209 /// isVMerge - Common function, used to match vmrg* shuffles. 1210 /// 1211 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1212 unsigned LHSStart, unsigned RHSStart) { 1213 if (N->getValueType(0) != MVT::v16i8) 1214 return false; 1215 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1216 "Unsupported merge size!"); 1217 1218 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1219 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1220 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1221 LHSStart+j+i*UnitSize) || 1222 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1223 RHSStart+j+i*UnitSize)) 1224 return false; 1225 } 1226 return true; 1227 } 1228 1229 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1230 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1231 /// The ShuffleKind distinguishes between big-endian merges with two 1232 /// different inputs (0), either-endian merges with two identical inputs (1), 1233 /// and little-endian merges with two different inputs (2). For the latter, 1234 /// the input operands are swapped (see PPCInstrAltivec.td). 1235 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1236 unsigned ShuffleKind, SelectionDAG &DAG) { 1237 if (DAG.getTarget().getDataLayout()->isLittleEndian()) { 1238 if (ShuffleKind == 1) // unary 1239 return isVMerge(N, UnitSize, 0, 0); 1240 else if (ShuffleKind == 2) // swapped 1241 return isVMerge(N, UnitSize, 0, 16); 1242 else 1243 return false; 1244 } else { 1245 if (ShuffleKind == 1) // unary 1246 return isVMerge(N, UnitSize, 8, 8); 1247 else if (ShuffleKind == 0) // normal 1248 return isVMerge(N, UnitSize, 8, 24); 1249 else 1250 return false; 1251 } 1252 } 1253 1254 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1255 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1256 /// The ShuffleKind distinguishes between big-endian merges with two 1257 /// different inputs (0), either-endian merges with two identical inputs (1), 1258 /// and little-endian merges with two different inputs (2). For the latter, 1259 /// the input operands are swapped (see PPCInstrAltivec.td). 1260 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1261 unsigned ShuffleKind, SelectionDAG &DAG) { 1262 if (DAG.getTarget().getDataLayout()->isLittleEndian()) { 1263 if (ShuffleKind == 1) // unary 1264 return isVMerge(N, UnitSize, 8, 8); 1265 else if (ShuffleKind == 2) // swapped 1266 return isVMerge(N, UnitSize, 8, 24); 1267 else 1268 return false; 1269 } else { 1270 if (ShuffleKind == 1) // unary 1271 return isVMerge(N, UnitSize, 0, 0); 1272 else if (ShuffleKind == 0) // normal 1273 return isVMerge(N, UnitSize, 0, 16); 1274 else 1275 return false; 1276 } 1277 } 1278 1279 1280 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1281 /// amount, otherwise return -1. 1282 /// The ShuffleKind distinguishes between big-endian operations with two 1283 /// different inputs (0), either-endian operations with two identical inputs 1284 /// (1), and little-endian operations with two different inputs (2). For the 1285 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1286 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1287 SelectionDAG &DAG) { 1288 if (N->getValueType(0) != MVT::v16i8) 1289 return -1; 1290 1291 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1292 1293 // Find the first non-undef value in the shuffle mask. 1294 unsigned i; 1295 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1296 /*search*/; 1297 1298 if (i == 16) return -1; // all undef. 1299 1300 // Otherwise, check to see if the rest of the elements are consecutively 1301 // numbered from this value. 1302 unsigned ShiftAmt = SVOp->getMaskElt(i); 1303 if (ShiftAmt < i) return -1; 1304 1305 ShiftAmt -= i; 1306 bool isLE = DAG.getTarget().getDataLayout()->isLittleEndian(); 1307 1308 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1309 // Check the rest of the elements to see if they are consecutive. 1310 for (++i; i != 16; ++i) 1311 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1312 return -1; 1313 } else if (ShuffleKind == 1) { 1314 // Check the rest of the elements to see if they are consecutive. 1315 for (++i; i != 16; ++i) 1316 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1317 return -1; 1318 } else 1319 return -1; 1320 1321 if (ShuffleKind == 2 && isLE) 1322 ShiftAmt = 16 - ShiftAmt; 1323 1324 return ShiftAmt; 1325 } 1326 1327 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1328 /// specifies a splat of a single element that is suitable for input to 1329 /// VSPLTB/VSPLTH/VSPLTW. 1330 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1331 assert(N->getValueType(0) == MVT::v16i8 && 1332 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1333 1334 // This is a splat operation if each element of the permute is the same, and 1335 // if the value doesn't reference the second vector. 1336 unsigned ElementBase = N->getMaskElt(0); 1337 1338 // FIXME: Handle UNDEF elements too! 1339 if (ElementBase >= 16) 1340 return false; 1341 1342 // Check that the indices are consecutive, in the case of a multi-byte element 1343 // splatted with a v16i8 mask. 1344 for (unsigned i = 1; i != EltSize; ++i) 1345 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1346 return false; 1347 1348 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1349 if (N->getMaskElt(i) < 0) continue; 1350 for (unsigned j = 0; j != EltSize; ++j) 1351 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1352 return false; 1353 } 1354 return true; 1355 } 1356 1357 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1358 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1359 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1360 SelectionDAG &DAG) { 1361 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1362 assert(isSplatShuffleMask(SVOp, EltSize)); 1363 if (DAG.getTarget().getDataLayout()->isLittleEndian()) 1364 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1365 else 1366 return SVOp->getMaskElt(0) / EltSize; 1367 } 1368 1369 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1370 /// by using a vspltis[bhw] instruction of the specified element size, return 1371 /// the constant being splatted. The ByteSize field indicates the number of 1372 /// bytes of each element [124] -> [bhw]. 1373 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1374 SDValue OpVal(nullptr, 0); 1375 1376 // If ByteSize of the splat is bigger than the element size of the 1377 // build_vector, then we have a case where we are checking for a splat where 1378 // multiple elements of the buildvector are folded together into a single 1379 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1380 unsigned EltSize = 16/N->getNumOperands(); 1381 if (EltSize < ByteSize) { 1382 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1383 SDValue UniquedVals[4]; 1384 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1385 1386 // See if all of the elements in the buildvector agree across. 1387 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1388 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1389 // If the element isn't a constant, bail fully out. 1390 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1391 1392 1393 if (!UniquedVals[i&(Multiple-1)].getNode()) 1394 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1395 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1396 return SDValue(); // no match. 1397 } 1398 1399 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1400 // either constant or undef values that are identical for each chunk. See 1401 // if these chunks can form into a larger vspltis*. 1402 1403 // Check to see if all of the leading entries are either 0 or -1. If 1404 // neither, then this won't fit into the immediate field. 1405 bool LeadingZero = true; 1406 bool LeadingOnes = true; 1407 for (unsigned i = 0; i != Multiple-1; ++i) { 1408 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1409 1410 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 1411 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 1412 } 1413 // Finally, check the least significant entry. 1414 if (LeadingZero) { 1415 if (!UniquedVals[Multiple-1].getNode()) 1416 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1417 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1418 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1419 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1420 } 1421 if (LeadingOnes) { 1422 if (!UniquedVals[Multiple-1].getNode()) 1423 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1424 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1425 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1426 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1427 } 1428 1429 return SDValue(); 1430 } 1431 1432 // Check to see if this buildvec has a single non-undef value in its elements. 1433 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1434 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1435 if (!OpVal.getNode()) 1436 OpVal = N->getOperand(i); 1437 else if (OpVal != N->getOperand(i)) 1438 return SDValue(); 1439 } 1440 1441 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1442 1443 unsigned ValSizeInBytes = EltSize; 1444 uint64_t Value = 0; 1445 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1446 Value = CN->getZExtValue(); 1447 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1448 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1449 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1450 } 1451 1452 // If the splat value is larger than the element value, then we can never do 1453 // this splat. The only case that we could fit the replicated bits into our 1454 // immediate field for would be zero, and we prefer to use vxor for it. 1455 if (ValSizeInBytes < ByteSize) return SDValue(); 1456 1457 // If the element value is larger than the splat value, check if it consists 1458 // of a repeated bit pattern of size ByteSize. 1459 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1460 return SDValue(); 1461 1462 // Properly sign extend the value. 1463 int MaskVal = SignExtend32(Value, ByteSize * 8); 1464 1465 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1466 if (MaskVal == 0) return SDValue(); 1467 1468 // Finally, if this value fits in a 5 bit sext field, return it 1469 if (SignExtend32<5>(MaskVal) == MaskVal) 1470 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1471 return SDValue(); 1472 } 1473 1474 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1475 /// amount, otherwise return -1. 1476 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1477 EVT VT = N->getValueType(0); 1478 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1479 return -1; 1480 1481 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1482 1483 // Find the first non-undef value in the shuffle mask. 1484 unsigned i; 1485 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1486 /*search*/; 1487 1488 if (i == 4) return -1; // all undef. 1489 1490 // Otherwise, check to see if the rest of the elements are consecutively 1491 // numbered from this value. 1492 unsigned ShiftAmt = SVOp->getMaskElt(i); 1493 if (ShiftAmt < i) return -1; 1494 ShiftAmt -= i; 1495 1496 // Check the rest of the elements to see if they are consecutive. 1497 for (++i; i != 4; ++i) 1498 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1499 return -1; 1500 1501 return ShiftAmt; 1502 } 1503 1504 //===----------------------------------------------------------------------===// 1505 // Addressing Mode Selection 1506 //===----------------------------------------------------------------------===// 1507 1508 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1509 /// or 64-bit immediate, and if the value can be accurately represented as a 1510 /// sign extension from a 16-bit value. If so, this returns true and the 1511 /// immediate. 1512 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1513 if (!isa<ConstantSDNode>(N)) 1514 return false; 1515 1516 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1517 if (N->getValueType(0) == MVT::i32) 1518 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1519 else 1520 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1521 } 1522 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1523 return isIntS16Immediate(Op.getNode(), Imm); 1524 } 1525 1526 1527 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1528 /// can be represented as an indexed [r+r] operation. Returns false if it 1529 /// can be more efficiently represented with [r+imm]. 1530 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1531 SDValue &Index, 1532 SelectionDAG &DAG) const { 1533 short imm = 0; 1534 if (N.getOpcode() == ISD::ADD) { 1535 if (isIntS16Immediate(N.getOperand(1), imm)) 1536 return false; // r+i 1537 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1538 return false; // r+i 1539 1540 Base = N.getOperand(0); 1541 Index = N.getOperand(1); 1542 return true; 1543 } else if (N.getOpcode() == ISD::OR) { 1544 if (isIntS16Immediate(N.getOperand(1), imm)) 1545 return false; // r+i can fold it if we can. 1546 1547 // If this is an or of disjoint bitfields, we can codegen this as an add 1548 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1549 // disjoint. 1550 APInt LHSKnownZero, LHSKnownOne; 1551 APInt RHSKnownZero, RHSKnownOne; 1552 DAG.computeKnownBits(N.getOperand(0), 1553 LHSKnownZero, LHSKnownOne); 1554 1555 if (LHSKnownZero.getBoolValue()) { 1556 DAG.computeKnownBits(N.getOperand(1), 1557 RHSKnownZero, RHSKnownOne); 1558 // If all of the bits are known zero on the LHS or RHS, the add won't 1559 // carry. 1560 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1561 Base = N.getOperand(0); 1562 Index = N.getOperand(1); 1563 return true; 1564 } 1565 } 1566 } 1567 1568 return false; 1569 } 1570 1571 // If we happen to be doing an i64 load or store into a stack slot that has 1572 // less than a 4-byte alignment, then the frame-index elimination may need to 1573 // use an indexed load or store instruction (because the offset may not be a 1574 // multiple of 4). The extra register needed to hold the offset comes from the 1575 // register scavenger, and it is possible that the scavenger will need to use 1576 // an emergency spill slot. As a result, we need to make sure that a spill slot 1577 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1578 // stack slot. 1579 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1580 // FIXME: This does not handle the LWA case. 1581 if (VT != MVT::i64) 1582 return; 1583 1584 // NOTE: We'll exclude negative FIs here, which come from argument 1585 // lowering, because there are no known test cases triggering this problem 1586 // using packed structures (or similar). We can remove this exclusion if 1587 // we find such a test case. The reason why this is so test-case driven is 1588 // because this entire 'fixup' is only to prevent crashes (from the 1589 // register scavenger) on not-really-valid inputs. For example, if we have: 1590 // %a = alloca i1 1591 // %b = bitcast i1* %a to i64* 1592 // store i64* a, i64 b 1593 // then the store should really be marked as 'align 1', but is not. If it 1594 // were marked as 'align 1' then the indexed form would have been 1595 // instruction-selected initially, and the problem this 'fixup' is preventing 1596 // won't happen regardless. 1597 if (FrameIdx < 0) 1598 return; 1599 1600 MachineFunction &MF = DAG.getMachineFunction(); 1601 MachineFrameInfo *MFI = MF.getFrameInfo(); 1602 1603 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1604 if (Align >= 4) 1605 return; 1606 1607 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1608 FuncInfo->setHasNonRISpills(); 1609 } 1610 1611 /// Returns true if the address N can be represented by a base register plus 1612 /// a signed 16-bit displacement [r+imm], and if it is not better 1613 /// represented as reg+reg. If Aligned is true, only accept displacements 1614 /// suitable for STD and friends, i.e. multiples of 4. 1615 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1616 SDValue &Base, 1617 SelectionDAG &DAG, 1618 bool Aligned) const { 1619 // FIXME dl should come from parent load or store, not from address 1620 SDLoc dl(N); 1621 // If this can be more profitably realized as r+r, fail. 1622 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1623 return false; 1624 1625 if (N.getOpcode() == ISD::ADD) { 1626 short imm = 0; 1627 if (isIntS16Immediate(N.getOperand(1), imm) && 1628 (!Aligned || (imm & 3) == 0)) { 1629 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1630 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1631 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1632 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1633 } else { 1634 Base = N.getOperand(0); 1635 } 1636 return true; // [r+i] 1637 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1638 // Match LOAD (ADD (X, Lo(G))). 1639 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1640 && "Cannot handle constant offsets yet!"); 1641 Disp = N.getOperand(1).getOperand(0); // The global address. 1642 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1643 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1644 Disp.getOpcode() == ISD::TargetConstantPool || 1645 Disp.getOpcode() == ISD::TargetJumpTable); 1646 Base = N.getOperand(0); 1647 return true; // [&g+r] 1648 } 1649 } else if (N.getOpcode() == ISD::OR) { 1650 short imm = 0; 1651 if (isIntS16Immediate(N.getOperand(1), imm) && 1652 (!Aligned || (imm & 3) == 0)) { 1653 // If this is an or of disjoint bitfields, we can codegen this as an add 1654 // (for better address arithmetic) if the LHS and RHS of the OR are 1655 // provably disjoint. 1656 APInt LHSKnownZero, LHSKnownOne; 1657 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1658 1659 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1660 // If all of the bits are known zero on the LHS or RHS, the add won't 1661 // carry. 1662 if (FrameIndexSDNode *FI = 1663 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1664 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1665 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1666 } else { 1667 Base = N.getOperand(0); 1668 } 1669 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1670 return true; 1671 } 1672 } 1673 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1674 // Loading from a constant address. 1675 1676 // If this address fits entirely in a 16-bit sext immediate field, codegen 1677 // this as "d, 0" 1678 short Imm; 1679 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1680 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1681 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1682 CN->getValueType(0)); 1683 return true; 1684 } 1685 1686 // Handle 32-bit sext immediates with LIS + addr mode. 1687 if ((CN->getValueType(0) == MVT::i32 || 1688 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1689 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1690 int Addr = (int)CN->getZExtValue(); 1691 1692 // Otherwise, break this down into an LIS + disp. 1693 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1694 1695 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1696 MVT::i32); 1697 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1698 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1699 return true; 1700 } 1701 } 1702 1703 Disp = DAG.getTargetConstant(0, dl, getPointerTy()); 1704 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1705 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1706 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1707 } else 1708 Base = N; 1709 return true; // [r+0] 1710 } 1711 1712 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1713 /// represented as an indexed [r+r] operation. 1714 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1715 SDValue &Index, 1716 SelectionDAG &DAG) const { 1717 // Check to see if we can easily represent this as an [r+r] address. This 1718 // will fail if it thinks that the address is more profitably represented as 1719 // reg+imm, e.g. where imm = 0. 1720 if (SelectAddressRegReg(N, Base, Index, DAG)) 1721 return true; 1722 1723 // If the operand is an addition, always emit this as [r+r], since this is 1724 // better (for code size, and execution, as the memop does the add for free) 1725 // than emitting an explicit add. 1726 if (N.getOpcode() == ISD::ADD) { 1727 Base = N.getOperand(0); 1728 Index = N.getOperand(1); 1729 return true; 1730 } 1731 1732 // Otherwise, do it the hard way, using R0 as the base register. 1733 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1734 N.getValueType()); 1735 Index = N; 1736 return true; 1737 } 1738 1739 /// getPreIndexedAddressParts - returns true by value, base pointer and 1740 /// offset pointer and addressing mode by reference if the node's address 1741 /// can be legally represented as pre-indexed load / store address. 1742 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1743 SDValue &Offset, 1744 ISD::MemIndexedMode &AM, 1745 SelectionDAG &DAG) const { 1746 if (DisablePPCPreinc) return false; 1747 1748 bool isLoad = true; 1749 SDValue Ptr; 1750 EVT VT; 1751 unsigned Alignment; 1752 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1753 Ptr = LD->getBasePtr(); 1754 VT = LD->getMemoryVT(); 1755 Alignment = LD->getAlignment(); 1756 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1757 Ptr = ST->getBasePtr(); 1758 VT = ST->getMemoryVT(); 1759 Alignment = ST->getAlignment(); 1760 isLoad = false; 1761 } else 1762 return false; 1763 1764 // PowerPC doesn't have preinc load/store instructions for vectors (except 1765 // for QPX, which does have preinc r+r forms). 1766 if (VT.isVector()) { 1767 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 1768 return false; 1769 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 1770 AM = ISD::PRE_INC; 1771 return true; 1772 } 1773 } 1774 1775 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1776 1777 // Common code will reject creating a pre-inc form if the base pointer 1778 // is a frame index, or if N is a store and the base pointer is either 1779 // the same as or a predecessor of the value being stored. Check for 1780 // those situations here, and try with swapped Base/Offset instead. 1781 bool Swap = false; 1782 1783 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1784 Swap = true; 1785 else if (!isLoad) { 1786 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1787 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1788 Swap = true; 1789 } 1790 1791 if (Swap) 1792 std::swap(Base, Offset); 1793 1794 AM = ISD::PRE_INC; 1795 return true; 1796 } 1797 1798 // LDU/STU can only handle immediates that are a multiple of 4. 1799 if (VT != MVT::i64) { 1800 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1801 return false; 1802 } else { 1803 // LDU/STU need an address with at least 4-byte alignment. 1804 if (Alignment < 4) 1805 return false; 1806 1807 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1808 return false; 1809 } 1810 1811 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1812 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1813 // sext i32 to i64 when addr mode is r+i. 1814 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1815 LD->getExtensionType() == ISD::SEXTLOAD && 1816 isa<ConstantSDNode>(Offset)) 1817 return false; 1818 } 1819 1820 AM = ISD::PRE_INC; 1821 return true; 1822 } 1823 1824 //===----------------------------------------------------------------------===// 1825 // LowerOperation implementation 1826 //===----------------------------------------------------------------------===// 1827 1828 /// GetLabelAccessInfo - Return true if we should reference labels using a 1829 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1830 static bool GetLabelAccessInfo(const TargetMachine &TM, 1831 const PPCSubtarget &Subtarget, 1832 unsigned &HiOpFlags, unsigned &LoOpFlags, 1833 const GlobalValue *GV = nullptr) { 1834 HiOpFlags = PPCII::MO_HA; 1835 LoOpFlags = PPCII::MO_LO; 1836 1837 // Don't use the pic base if not in PIC relocation model. 1838 bool isPIC = TM.getRelocationModel() == Reloc::PIC_; 1839 1840 if (isPIC) { 1841 HiOpFlags |= PPCII::MO_PIC_FLAG; 1842 LoOpFlags |= PPCII::MO_PIC_FLAG; 1843 } 1844 1845 // If this is a reference to a global value that requires a non-lazy-ptr, make 1846 // sure that instruction lowering adds it. 1847 if (GV && Subtarget.hasLazyResolverStub(GV)) { 1848 HiOpFlags |= PPCII::MO_NLP_FLAG; 1849 LoOpFlags |= PPCII::MO_NLP_FLAG; 1850 1851 if (GV->hasHiddenVisibility()) { 1852 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1853 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1854 } 1855 } 1856 1857 return isPIC; 1858 } 1859 1860 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1861 SelectionDAG &DAG) { 1862 SDLoc DL(HiPart); 1863 EVT PtrVT = HiPart.getValueType(); 1864 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 1865 1866 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1867 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1868 1869 // With PIC, the first instruction is actually "GR+hi(&G)". 1870 if (isPIC) 1871 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1872 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1873 1874 // Generate non-pic code that has direct accesses to the constant pool. 1875 // The address of the global is just (hi(&g)+lo(&g)). 1876 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1877 } 1878 1879 static void setUsesTOCBasePtr(MachineFunction &MF) { 1880 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1881 FuncInfo->setUsesTOCBasePtr(); 1882 } 1883 1884 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 1885 setUsesTOCBasePtr(DAG.getMachineFunction()); 1886 } 1887 1888 static SDValue getTOCEntry(SelectionDAG &DAG, SDLoc dl, bool Is64Bit, 1889 SDValue GA) { 1890 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 1891 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 1892 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 1893 1894 SDValue Ops[] = { GA, Reg }; 1895 return DAG.getMemIntrinsicNode(PPCISD::TOC_ENTRY, dl, 1896 DAG.getVTList(VT, MVT::Other), Ops, VT, 1897 MachinePointerInfo::getGOT(), 0, false, true, 1898 false, 0); 1899 } 1900 1901 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1902 SelectionDAG &DAG) const { 1903 EVT PtrVT = Op.getValueType(); 1904 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1905 const Constant *C = CP->getConstVal(); 1906 1907 // 64-bit SVR4 ABI code is always position-independent. 1908 // The actual address of the GlobalValue is stored in the TOC. 1909 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1910 setUsesTOCBasePtr(DAG); 1911 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 1912 return getTOCEntry(DAG, SDLoc(CP), true, GA); 1913 } 1914 1915 unsigned MOHiFlag, MOLoFlag; 1916 bool isPIC = 1917 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 1918 1919 if (isPIC && Subtarget.isSVR4ABI()) { 1920 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 1921 PPCII::MO_PIC_FLAG); 1922 return getTOCEntry(DAG, SDLoc(CP), false, GA); 1923 } 1924 1925 SDValue CPIHi = 1926 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 1927 SDValue CPILo = 1928 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 1929 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 1930 } 1931 1932 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1933 EVT PtrVT = Op.getValueType(); 1934 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1935 1936 // 64-bit SVR4 ABI code is always position-independent. 1937 // The actual address of the GlobalValue is stored in the TOC. 1938 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1939 setUsesTOCBasePtr(DAG); 1940 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1941 return getTOCEntry(DAG, SDLoc(JT), true, GA); 1942 } 1943 1944 unsigned MOHiFlag, MOLoFlag; 1945 bool isPIC = 1946 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 1947 1948 if (isPIC && Subtarget.isSVR4ABI()) { 1949 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 1950 PPCII::MO_PIC_FLAG); 1951 return getTOCEntry(DAG, SDLoc(GA), false, GA); 1952 } 1953 1954 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 1955 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 1956 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 1957 } 1958 1959 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1960 SelectionDAG &DAG) const { 1961 EVT PtrVT = Op.getValueType(); 1962 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 1963 const BlockAddress *BA = BASDN->getBlockAddress(); 1964 1965 // 64-bit SVR4 ABI code is always position-independent. 1966 // The actual BlockAddress is stored in the TOC. 1967 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1968 setUsesTOCBasePtr(DAG); 1969 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 1970 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 1971 } 1972 1973 unsigned MOHiFlag, MOLoFlag; 1974 bool isPIC = 1975 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 1976 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 1977 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 1978 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 1979 } 1980 1981 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1982 SelectionDAG &DAG) const { 1983 1984 // FIXME: TLS addresses currently use medium model code sequences, 1985 // which is the most useful form. Eventually support for small and 1986 // large models could be added if users need it, at the cost of 1987 // additional complexity. 1988 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1989 SDLoc dl(GA); 1990 const GlobalValue *GV = GA->getGlobal(); 1991 EVT PtrVT = getPointerTy(); 1992 bool is64bit = Subtarget.isPPC64(); 1993 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 1994 PICLevel::Level picLevel = M->getPICLevel(); 1995 1996 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 1997 1998 if (Model == TLSModel::LocalExec) { 1999 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2000 PPCII::MO_TPREL_HA); 2001 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2002 PPCII::MO_TPREL_LO); 2003 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2004 is64bit ? MVT::i64 : MVT::i32); 2005 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2006 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2007 } 2008 2009 if (Model == TLSModel::InitialExec) { 2010 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2011 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2012 PPCII::MO_TLS); 2013 SDValue GOTPtr; 2014 if (is64bit) { 2015 setUsesTOCBasePtr(DAG); 2016 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2017 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2018 PtrVT, GOTReg, TGA); 2019 } else 2020 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2021 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2022 PtrVT, TGA, GOTPtr); 2023 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2024 } 2025 2026 if (Model == TLSModel::GeneralDynamic) { 2027 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2028 SDValue GOTPtr; 2029 if (is64bit) { 2030 setUsesTOCBasePtr(DAG); 2031 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2032 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2033 GOTReg, TGA); 2034 } else { 2035 if (picLevel == PICLevel::Small) 2036 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2037 else 2038 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2039 } 2040 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2041 GOTPtr, TGA, TGA); 2042 } 2043 2044 if (Model == TLSModel::LocalDynamic) { 2045 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2046 SDValue GOTPtr; 2047 if (is64bit) { 2048 setUsesTOCBasePtr(DAG); 2049 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2050 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2051 GOTReg, TGA); 2052 } else { 2053 if (picLevel == PICLevel::Small) 2054 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2055 else 2056 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2057 } 2058 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2059 PtrVT, GOTPtr, TGA, TGA); 2060 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2061 PtrVT, TLSAddr, TGA); 2062 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2063 } 2064 2065 llvm_unreachable("Unknown TLS model!"); 2066 } 2067 2068 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2069 SelectionDAG &DAG) const { 2070 EVT PtrVT = Op.getValueType(); 2071 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2072 SDLoc DL(GSDN); 2073 const GlobalValue *GV = GSDN->getGlobal(); 2074 2075 // 64-bit SVR4 ABI code is always position-independent. 2076 // The actual address of the GlobalValue is stored in the TOC. 2077 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2078 setUsesTOCBasePtr(DAG); 2079 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2080 return getTOCEntry(DAG, DL, true, GA); 2081 } 2082 2083 unsigned MOHiFlag, MOLoFlag; 2084 bool isPIC = 2085 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag, GV); 2086 2087 if (isPIC && Subtarget.isSVR4ABI()) { 2088 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2089 GSDN->getOffset(), 2090 PPCII::MO_PIC_FLAG); 2091 return getTOCEntry(DAG, DL, false, GA); 2092 } 2093 2094 SDValue GAHi = 2095 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2096 SDValue GALo = 2097 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2098 2099 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 2100 2101 // If the global reference is actually to a non-lazy-pointer, we have to do an 2102 // extra load to get the address of the global. 2103 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2104 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 2105 false, false, false, 0); 2106 return Ptr; 2107 } 2108 2109 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2110 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2111 SDLoc dl(Op); 2112 2113 if (Op.getValueType() == MVT::v2i64) { 2114 // When the operands themselves are v2i64 values, we need to do something 2115 // special because VSX has no underlying comparison operations for these. 2116 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2117 // Equality can be handled by casting to the legal type for Altivec 2118 // comparisons, everything else needs to be expanded. 2119 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2120 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2121 DAG.getSetCC(dl, MVT::v4i32, 2122 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2123 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2124 CC)); 2125 } 2126 2127 return SDValue(); 2128 } 2129 2130 // We handle most of these in the usual way. 2131 return Op; 2132 } 2133 2134 // If we're comparing for equality to zero, expose the fact that this is 2135 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 2136 // fold the new nodes. 2137 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2138 if (C->isNullValue() && CC == ISD::SETEQ) { 2139 EVT VT = Op.getOperand(0).getValueType(); 2140 SDValue Zext = Op.getOperand(0); 2141 if (VT.bitsLT(MVT::i32)) { 2142 VT = MVT::i32; 2143 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 2144 } 2145 unsigned Log2b = Log2_32(VT.getSizeInBits()); 2146 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 2147 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 2148 DAG.getConstant(Log2b, dl, MVT::i32)); 2149 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 2150 } 2151 // Leave comparisons against 0 and -1 alone for now, since they're usually 2152 // optimized. FIXME: revisit this when we can custom lower all setcc 2153 // optimizations. 2154 if (C->isAllOnesValue() || C->isNullValue()) 2155 return SDValue(); 2156 } 2157 2158 // If we have an integer seteq/setne, turn it into a compare against zero 2159 // by xor'ing the rhs with the lhs, which is faster than setting a 2160 // condition register, reading it back out, and masking the correct bit. The 2161 // normal approach here uses sub to do this instead of xor. Using xor exposes 2162 // the result to other bit-twiddling opportunities. 2163 EVT LHSVT = Op.getOperand(0).getValueType(); 2164 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2165 EVT VT = Op.getValueType(); 2166 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2167 Op.getOperand(1)); 2168 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2169 } 2170 return SDValue(); 2171 } 2172 2173 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 2174 const PPCSubtarget &Subtarget) const { 2175 SDNode *Node = Op.getNode(); 2176 EVT VT = Node->getValueType(0); 2177 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2178 SDValue InChain = Node->getOperand(0); 2179 SDValue VAListPtr = Node->getOperand(1); 2180 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2181 SDLoc dl(Node); 2182 2183 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2184 2185 // gpr_index 2186 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2187 VAListPtr, MachinePointerInfo(SV), MVT::i8, 2188 false, false, false, 0); 2189 InChain = GprIndex.getValue(1); 2190 2191 if (VT == MVT::i64) { 2192 // Check if GprIndex is even 2193 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2194 DAG.getConstant(1, dl, MVT::i32)); 2195 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2196 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2197 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2198 DAG.getConstant(1, dl, MVT::i32)); 2199 // Align GprIndex to be even if it isn't 2200 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2201 GprIndex); 2202 } 2203 2204 // fpr index is 1 byte after gpr 2205 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2206 DAG.getConstant(1, dl, MVT::i32)); 2207 2208 // fpr 2209 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2210 FprPtr, MachinePointerInfo(SV), MVT::i8, 2211 false, false, false, 0); 2212 InChain = FprIndex.getValue(1); 2213 2214 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2215 DAG.getConstant(8, dl, MVT::i32)); 2216 2217 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2218 DAG.getConstant(4, dl, MVT::i32)); 2219 2220 // areas 2221 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 2222 MachinePointerInfo(), false, false, 2223 false, 0); 2224 InChain = OverflowArea.getValue(1); 2225 2226 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 2227 MachinePointerInfo(), false, false, 2228 false, 0); 2229 InChain = RegSaveArea.getValue(1); 2230 2231 // select overflow_area if index > 8 2232 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2233 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2234 2235 // adjustment constant gpr_index * 4/8 2236 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2237 VT.isInteger() ? GprIndex : FprIndex, 2238 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2239 MVT::i32)); 2240 2241 // OurReg = RegSaveArea + RegConstant 2242 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2243 RegConstant); 2244 2245 // Floating types are 32 bytes into RegSaveArea 2246 if (VT.isFloatingPoint()) 2247 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2248 DAG.getConstant(32, dl, MVT::i32)); 2249 2250 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2251 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2252 VT.isInteger() ? GprIndex : FprIndex, 2253 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2254 MVT::i32)); 2255 2256 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2257 VT.isInteger() ? VAListPtr : FprPtr, 2258 MachinePointerInfo(SV), 2259 MVT::i8, false, false, 0); 2260 2261 // determine if we should load from reg_save_area or overflow_area 2262 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2263 2264 // increase overflow_area by 4/8 if gpr/fpr > 8 2265 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2266 DAG.getConstant(VT.isInteger() ? 4 : 8, 2267 dl, MVT::i32)); 2268 2269 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2270 OverflowAreaPlusN); 2271 2272 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 2273 OverflowAreaPtr, 2274 MachinePointerInfo(), 2275 MVT::i32, false, false, 0); 2276 2277 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 2278 false, false, false, 0); 2279 } 2280 2281 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 2282 const PPCSubtarget &Subtarget) const { 2283 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2284 2285 // We have to copy the entire va_list struct: 2286 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2287 return DAG.getMemcpy(Op.getOperand(0), Op, 2288 Op.getOperand(1), Op.getOperand(2), 2289 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2290 false, MachinePointerInfo(), MachinePointerInfo()); 2291 } 2292 2293 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2294 SelectionDAG &DAG) const { 2295 return Op.getOperand(0); 2296 } 2297 2298 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2299 SelectionDAG &DAG) const { 2300 SDValue Chain = Op.getOperand(0); 2301 SDValue Trmp = Op.getOperand(1); // trampoline 2302 SDValue FPtr = Op.getOperand(2); // nested function 2303 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2304 SDLoc dl(Op); 2305 2306 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2307 bool isPPC64 = (PtrVT == MVT::i64); 2308 Type *IntPtrTy = 2309 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType( 2310 *DAG.getContext()); 2311 2312 TargetLowering::ArgListTy Args; 2313 TargetLowering::ArgListEntry Entry; 2314 2315 Entry.Ty = IntPtrTy; 2316 Entry.Node = Trmp; Args.push_back(Entry); 2317 2318 // TrampSize == (isPPC64 ? 48 : 40); 2319 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2320 isPPC64 ? MVT::i64 : MVT::i32); 2321 Args.push_back(Entry); 2322 2323 Entry.Node = FPtr; Args.push_back(Entry); 2324 Entry.Node = Nest; Args.push_back(Entry); 2325 2326 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2327 TargetLowering::CallLoweringInfo CLI(DAG); 2328 CLI.setDebugLoc(dl).setChain(Chain) 2329 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2330 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2331 std::move(Args), 0); 2332 2333 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2334 return CallResult.second; 2335 } 2336 2337 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 2338 const PPCSubtarget &Subtarget) const { 2339 MachineFunction &MF = DAG.getMachineFunction(); 2340 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2341 2342 SDLoc dl(Op); 2343 2344 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2345 // vastart just stores the address of the VarArgsFrameIndex slot into the 2346 // memory location argument. 2347 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2348 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2349 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2350 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2351 MachinePointerInfo(SV), 2352 false, false, 0); 2353 } 2354 2355 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2356 // We suppose the given va_list is already allocated. 2357 // 2358 // typedef struct { 2359 // char gpr; /* index into the array of 8 GPRs 2360 // * stored in the register save area 2361 // * gpr=0 corresponds to r3, 2362 // * gpr=1 to r4, etc. 2363 // */ 2364 // char fpr; /* index into the array of 8 FPRs 2365 // * stored in the register save area 2366 // * fpr=0 corresponds to f1, 2367 // * fpr=1 to f2, etc. 2368 // */ 2369 // char *overflow_arg_area; 2370 // /* location on stack that holds 2371 // * the next overflow argument 2372 // */ 2373 // char *reg_save_area; 2374 // /* where r3:r10 and f1:f8 (if saved) 2375 // * are stored 2376 // */ 2377 // } va_list[1]; 2378 2379 2380 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2381 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2382 2383 2384 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2385 2386 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2387 PtrVT); 2388 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2389 PtrVT); 2390 2391 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2392 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2393 2394 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2395 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2396 2397 uint64_t FPROffset = 1; 2398 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2399 2400 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2401 2402 // Store first byte : number of int regs 2403 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2404 Op.getOperand(1), 2405 MachinePointerInfo(SV), 2406 MVT::i8, false, false, 0); 2407 uint64_t nextOffset = FPROffset; 2408 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2409 ConstFPROffset); 2410 2411 // Store second byte : number of float regs 2412 SDValue secondStore = 2413 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2414 MachinePointerInfo(SV, nextOffset), MVT::i8, 2415 false, false, 0); 2416 nextOffset += StackOffset; 2417 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2418 2419 // Store second word : arguments given on stack 2420 SDValue thirdStore = 2421 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2422 MachinePointerInfo(SV, nextOffset), 2423 false, false, 0); 2424 nextOffset += FrameOffset; 2425 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2426 2427 // Store third word : arguments given in registers 2428 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2429 MachinePointerInfo(SV, nextOffset), 2430 false, false, 0); 2431 2432 } 2433 2434 #include "PPCGenCallingConv.inc" 2435 2436 // Function whose sole purpose is to kill compiler warnings 2437 // stemming from unused functions included from PPCGenCallingConv.inc. 2438 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2439 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2440 } 2441 2442 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2443 CCValAssign::LocInfo &LocInfo, 2444 ISD::ArgFlagsTy &ArgFlags, 2445 CCState &State) { 2446 return true; 2447 } 2448 2449 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2450 MVT &LocVT, 2451 CCValAssign::LocInfo &LocInfo, 2452 ISD::ArgFlagsTy &ArgFlags, 2453 CCState &State) { 2454 static const MCPhysReg ArgRegs[] = { 2455 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2456 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2457 }; 2458 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2459 2460 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2461 2462 // Skip one register if the first unallocated register has an even register 2463 // number and there are still argument registers available which have not been 2464 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2465 // need to skip a register if RegNum is odd. 2466 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2467 State.AllocateReg(ArgRegs[RegNum]); 2468 } 2469 2470 // Always return false here, as this function only makes sure that the first 2471 // unallocated register has an odd register number and does not actually 2472 // allocate a register for the current argument. 2473 return false; 2474 } 2475 2476 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2477 MVT &LocVT, 2478 CCValAssign::LocInfo &LocInfo, 2479 ISD::ArgFlagsTy &ArgFlags, 2480 CCState &State) { 2481 static const MCPhysReg ArgRegs[] = { 2482 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2483 PPC::F8 2484 }; 2485 2486 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2487 2488 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2489 2490 // If there is only one Floating-point register left we need to put both f64 2491 // values of a split ppc_fp128 value on the stack. 2492 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2493 State.AllocateReg(ArgRegs[RegNum]); 2494 } 2495 2496 // Always return false here, as this function only makes sure that the two f64 2497 // values a ppc_fp128 value is split into are both passed in registers or both 2498 // passed on the stack and does not actually allocate a register for the 2499 // current argument. 2500 return false; 2501 } 2502 2503 /// FPR - The set of FP registers that should be allocated for arguments, 2504 /// on Darwin. 2505 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2506 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2507 PPC::F11, PPC::F12, PPC::F13}; 2508 2509 /// QFPR - The set of QPX registers that should be allocated for arguments. 2510 static const MCPhysReg QFPR[] = { 2511 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2512 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2513 2514 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2515 /// the stack. 2516 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2517 unsigned PtrByteSize) { 2518 unsigned ArgSize = ArgVT.getStoreSize(); 2519 if (Flags.isByVal()) 2520 ArgSize = Flags.getByValSize(); 2521 2522 // Round up to multiples of the pointer size, except for array members, 2523 // which are always packed. 2524 if (!Flags.isInConsecutiveRegs()) 2525 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2526 2527 return ArgSize; 2528 } 2529 2530 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2531 /// on the stack. 2532 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2533 ISD::ArgFlagsTy Flags, 2534 unsigned PtrByteSize) { 2535 unsigned Align = PtrByteSize; 2536 2537 // Altivec parameters are padded to a 16 byte boundary. 2538 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2539 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2540 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2541 ArgVT == MVT::v1i128) 2542 Align = 16; 2543 // QPX vector types stored in double-precision are padded to a 32 byte 2544 // boundary. 2545 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2546 Align = 32; 2547 2548 // ByVal parameters are aligned as requested. 2549 if (Flags.isByVal()) { 2550 unsigned BVAlign = Flags.getByValAlign(); 2551 if (BVAlign > PtrByteSize) { 2552 if (BVAlign % PtrByteSize != 0) 2553 llvm_unreachable( 2554 "ByVal alignment is not a multiple of the pointer size"); 2555 2556 Align = BVAlign; 2557 } 2558 } 2559 2560 // Array members are always packed to their original alignment. 2561 if (Flags.isInConsecutiveRegs()) { 2562 // If the array member was split into multiple registers, the first 2563 // needs to be aligned to the size of the full type. (Except for 2564 // ppcf128, which is only aligned as its f64 components.) 2565 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2566 Align = OrigVT.getStoreSize(); 2567 else 2568 Align = ArgVT.getStoreSize(); 2569 } 2570 2571 return Align; 2572 } 2573 2574 /// CalculateStackSlotUsed - Return whether this argument will use its 2575 /// stack slot (instead of being passed in registers). ArgOffset, 2576 /// AvailableFPRs, and AvailableVRs must hold the current argument 2577 /// position, and will be updated to account for this argument. 2578 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2579 ISD::ArgFlagsTy Flags, 2580 unsigned PtrByteSize, 2581 unsigned LinkageSize, 2582 unsigned ParamAreaSize, 2583 unsigned &ArgOffset, 2584 unsigned &AvailableFPRs, 2585 unsigned &AvailableVRs, bool HasQPX) { 2586 bool UseMemory = false; 2587 2588 // Respect alignment of argument on the stack. 2589 unsigned Align = 2590 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2591 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2592 // If there's no space left in the argument save area, we must 2593 // use memory (this check also catches zero-sized arguments). 2594 if (ArgOffset >= LinkageSize + ParamAreaSize) 2595 UseMemory = true; 2596 2597 // Allocate argument on the stack. 2598 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2599 if (Flags.isInConsecutiveRegsLast()) 2600 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2601 // If we overran the argument save area, we must use memory 2602 // (this check catches arguments passed partially in memory) 2603 if (ArgOffset > LinkageSize + ParamAreaSize) 2604 UseMemory = true; 2605 2606 // However, if the argument is actually passed in an FPR or a VR, 2607 // we don't use memory after all. 2608 if (!Flags.isByVal()) { 2609 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2610 // QPX registers overlap with the scalar FP registers. 2611 (HasQPX && (ArgVT == MVT::v4f32 || 2612 ArgVT == MVT::v4f64 || 2613 ArgVT == MVT::v4i1))) 2614 if (AvailableFPRs > 0) { 2615 --AvailableFPRs; 2616 return false; 2617 } 2618 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2619 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2620 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2621 ArgVT == MVT::v1i128) 2622 if (AvailableVRs > 0) { 2623 --AvailableVRs; 2624 return false; 2625 } 2626 } 2627 2628 return UseMemory; 2629 } 2630 2631 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2632 /// ensure minimum alignment required for target. 2633 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2634 unsigned NumBytes) { 2635 unsigned TargetAlign = Lowering->getStackAlignment(); 2636 unsigned AlignMask = TargetAlign - 1; 2637 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2638 return NumBytes; 2639 } 2640 2641 SDValue 2642 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 2643 CallingConv::ID CallConv, bool isVarArg, 2644 const SmallVectorImpl<ISD::InputArg> 2645 &Ins, 2646 SDLoc dl, SelectionDAG &DAG, 2647 SmallVectorImpl<SDValue> &InVals) 2648 const { 2649 if (Subtarget.isSVR4ABI()) { 2650 if (Subtarget.isPPC64()) 2651 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2652 dl, DAG, InVals); 2653 else 2654 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2655 dl, DAG, InVals); 2656 } else { 2657 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2658 dl, DAG, InVals); 2659 } 2660 } 2661 2662 SDValue 2663 PPCTargetLowering::LowerFormalArguments_32SVR4( 2664 SDValue Chain, 2665 CallingConv::ID CallConv, bool isVarArg, 2666 const SmallVectorImpl<ISD::InputArg> 2667 &Ins, 2668 SDLoc dl, SelectionDAG &DAG, 2669 SmallVectorImpl<SDValue> &InVals) const { 2670 2671 // 32-bit SVR4 ABI Stack Frame Layout: 2672 // +-----------------------------------+ 2673 // +--> | Back chain | 2674 // | +-----------------------------------+ 2675 // | | Floating-point register save area | 2676 // | +-----------------------------------+ 2677 // | | General register save area | 2678 // | +-----------------------------------+ 2679 // | | CR save word | 2680 // | +-----------------------------------+ 2681 // | | VRSAVE save word | 2682 // | +-----------------------------------+ 2683 // | | Alignment padding | 2684 // | +-----------------------------------+ 2685 // | | Vector register save area | 2686 // | +-----------------------------------+ 2687 // | | Local variable space | 2688 // | +-----------------------------------+ 2689 // | | Parameter list area | 2690 // | +-----------------------------------+ 2691 // | | LR save word | 2692 // | +-----------------------------------+ 2693 // SP--> +--- | Back chain | 2694 // +-----------------------------------+ 2695 // 2696 // Specifications: 2697 // System V Application Binary Interface PowerPC Processor Supplement 2698 // AltiVec Technology Programming Interface Manual 2699 2700 MachineFunction &MF = DAG.getMachineFunction(); 2701 MachineFrameInfo *MFI = MF.getFrameInfo(); 2702 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2703 2704 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2705 // Potential tail calls could cause overwriting of argument stack slots. 2706 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2707 (CallConv == CallingConv::Fast)); 2708 unsigned PtrByteSize = 4; 2709 2710 // Assign locations to all of the incoming arguments. 2711 SmallVector<CCValAssign, 16> ArgLocs; 2712 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2713 *DAG.getContext()); 2714 2715 // Reserve space for the linkage area on the stack. 2716 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2717 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2718 2719 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2720 2721 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2722 CCValAssign &VA = ArgLocs[i]; 2723 2724 // Arguments stored in registers. 2725 if (VA.isRegLoc()) { 2726 const TargetRegisterClass *RC; 2727 EVT ValVT = VA.getValVT(); 2728 2729 switch (ValVT.getSimpleVT().SimpleTy) { 2730 default: 2731 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2732 case MVT::i1: 2733 case MVT::i32: 2734 RC = &PPC::GPRCRegClass; 2735 break; 2736 case MVT::f32: 2737 if (Subtarget.hasP8Vector()) 2738 RC = &PPC::VSSRCRegClass; 2739 else 2740 RC = &PPC::F4RCRegClass; 2741 break; 2742 case MVT::f64: 2743 if (Subtarget.hasVSX()) 2744 RC = &PPC::VSFRCRegClass; 2745 else 2746 RC = &PPC::F8RCRegClass; 2747 break; 2748 case MVT::v16i8: 2749 case MVT::v8i16: 2750 case MVT::v4i32: 2751 RC = &PPC::VRRCRegClass; 2752 break; 2753 case MVT::v4f32: 2754 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 2755 break; 2756 case MVT::v2f64: 2757 case MVT::v2i64: 2758 RC = &PPC::VSHRCRegClass; 2759 break; 2760 case MVT::v4f64: 2761 RC = &PPC::QFRCRegClass; 2762 break; 2763 case MVT::v4i1: 2764 RC = &PPC::QBRCRegClass; 2765 break; 2766 } 2767 2768 // Transform the arguments stored in physical registers into virtual ones. 2769 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2770 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2771 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2772 2773 if (ValVT == MVT::i1) 2774 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2775 2776 InVals.push_back(ArgValue); 2777 } else { 2778 // Argument stored in memory. 2779 assert(VA.isMemLoc()); 2780 2781 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2782 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2783 isImmutable); 2784 2785 // Create load nodes to retrieve arguments from the stack. 2786 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2787 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2788 MachinePointerInfo(), 2789 false, false, false, 0)); 2790 } 2791 } 2792 2793 // Assign locations to all of the incoming aggregate by value arguments. 2794 // Aggregates passed by value are stored in the local variable space of the 2795 // caller's stack frame, right above the parameter list area. 2796 SmallVector<CCValAssign, 16> ByValArgLocs; 2797 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2798 ByValArgLocs, *DAG.getContext()); 2799 2800 // Reserve stack space for the allocations in CCInfo. 2801 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2802 2803 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2804 2805 // Area that is at least reserved in the caller of this function. 2806 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2807 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2808 2809 // Set the size that is at least reserved in caller of this function. Tail 2810 // call optimized function's reserved stack space needs to be aligned so that 2811 // taking the difference between two stack areas will result in an aligned 2812 // stack. 2813 MinReservedArea = 2814 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 2815 FuncInfo->setMinReservedArea(MinReservedArea); 2816 2817 SmallVector<SDValue, 8> MemOps; 2818 2819 // If the function takes variable number of arguments, make a frame index for 2820 // the start of the first vararg value... for expansion of llvm.va_start. 2821 if (isVarArg) { 2822 static const MCPhysReg GPArgRegs[] = { 2823 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2824 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2825 }; 2826 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2827 2828 static const MCPhysReg FPArgRegs[] = { 2829 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2830 PPC::F8 2831 }; 2832 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2833 if (DisablePPCFloatInVariadic) 2834 NumFPArgRegs = 0; 2835 2836 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 2837 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 2838 2839 // Make room for NumGPArgRegs and NumFPArgRegs. 2840 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2841 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 2842 2843 FuncInfo->setVarArgsStackOffset( 2844 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2845 CCInfo.getNextStackOffset(), true)); 2846 2847 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2848 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2849 2850 // The fixed integer arguments of a variadic function are stored to the 2851 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2852 // the result of va_next. 2853 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2854 // Get an existing live-in vreg, or add a new one. 2855 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2856 if (!VReg) 2857 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2858 2859 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2860 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2861 MachinePointerInfo(), false, false, 0); 2862 MemOps.push_back(Store); 2863 // Increment the address by four for the next argument to store 2864 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 2865 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2866 } 2867 2868 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2869 // is set. 2870 // The double arguments are stored to the VarArgsFrameIndex 2871 // on the stack. 2872 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2873 // Get an existing live-in vreg, or add a new one. 2874 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2875 if (!VReg) 2876 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2877 2878 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2879 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2880 MachinePointerInfo(), false, false, 0); 2881 MemOps.push_back(Store); 2882 // Increment the address by eight for the next argument to store 2883 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 2884 PtrVT); 2885 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2886 } 2887 } 2888 2889 if (!MemOps.empty()) 2890 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 2891 2892 return Chain; 2893 } 2894 2895 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2896 // value to MVT::i64 and then truncate to the correct register size. 2897 SDValue 2898 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 2899 SelectionDAG &DAG, SDValue ArgVal, 2900 SDLoc dl) const { 2901 if (Flags.isSExt()) 2902 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 2903 DAG.getValueType(ObjectVT)); 2904 else if (Flags.isZExt()) 2905 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 2906 DAG.getValueType(ObjectVT)); 2907 2908 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 2909 } 2910 2911 SDValue 2912 PPCTargetLowering::LowerFormalArguments_64SVR4( 2913 SDValue Chain, 2914 CallingConv::ID CallConv, bool isVarArg, 2915 const SmallVectorImpl<ISD::InputArg> 2916 &Ins, 2917 SDLoc dl, SelectionDAG &DAG, 2918 SmallVectorImpl<SDValue> &InVals) const { 2919 // TODO: add description of PPC stack frame format, or at least some docs. 2920 // 2921 bool isELFv2ABI = Subtarget.isELFv2ABI(); 2922 bool isLittleEndian = Subtarget.isLittleEndian(); 2923 MachineFunction &MF = DAG.getMachineFunction(); 2924 MachineFrameInfo *MFI = MF.getFrameInfo(); 2925 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2926 2927 assert(!(CallConv == CallingConv::Fast && isVarArg) && 2928 "fastcc not supported on varargs functions"); 2929 2930 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2931 // Potential tail calls could cause overwriting of argument stack slots. 2932 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2933 (CallConv == CallingConv::Fast)); 2934 unsigned PtrByteSize = 8; 2935 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2936 2937 static const MCPhysReg GPR[] = { 2938 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2939 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2940 }; 2941 static const MCPhysReg VR[] = { 2942 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2943 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2944 }; 2945 static const MCPhysReg VSRH[] = { 2946 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 2947 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 2948 }; 2949 2950 const unsigned Num_GPR_Regs = array_lengthof(GPR); 2951 const unsigned Num_FPR_Regs = 13; 2952 const unsigned Num_VR_Regs = array_lengthof(VR); 2953 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 2954 2955 // Do a first pass over the arguments to determine whether the ABI 2956 // guarantees that our caller has allocated the parameter save area 2957 // on its stack frame. In the ELFv1 ABI, this is always the case; 2958 // in the ELFv2 ABI, it is true if this is a vararg function or if 2959 // any parameter is located in a stack slot. 2960 2961 bool HasParameterArea = !isELFv2ABI || isVarArg; 2962 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 2963 unsigned NumBytes = LinkageSize; 2964 unsigned AvailableFPRs = Num_FPR_Regs; 2965 unsigned AvailableVRs = Num_VR_Regs; 2966 for (unsigned i = 0, e = Ins.size(); i != e; ++i) 2967 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 2968 PtrByteSize, LinkageSize, ParamAreaSize, 2969 NumBytes, AvailableFPRs, AvailableVRs, 2970 Subtarget.hasQPX())) 2971 HasParameterArea = true; 2972 2973 // Add DAG nodes to load the arguments or copy them out of registers. On 2974 // entry to a function on PPC, the arguments start after the linkage area, 2975 // although the first ones are often in registers. 2976 2977 unsigned ArgOffset = LinkageSize; 2978 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2979 unsigned &QFPR_idx = FPR_idx; 2980 SmallVector<SDValue, 8> MemOps; 2981 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2982 unsigned CurArgIdx = 0; 2983 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2984 SDValue ArgVal; 2985 bool needsLoad = false; 2986 EVT ObjectVT = Ins[ArgNo].VT; 2987 EVT OrigVT = Ins[ArgNo].ArgVT; 2988 unsigned ObjSize = ObjectVT.getStoreSize(); 2989 unsigned ArgSize = ObjSize; 2990 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2991 if (Ins[ArgNo].isOrigArg()) { 2992 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 2993 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 2994 } 2995 // We re-align the argument offset for each argument, except when using the 2996 // fast calling convention, when we need to make sure we do that only when 2997 // we'll actually use a stack slot. 2998 unsigned CurArgOffset, Align; 2999 auto ComputeArgOffset = [&]() { 3000 /* Respect alignment of argument on the stack. */ 3001 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3002 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3003 CurArgOffset = ArgOffset; 3004 }; 3005 3006 if (CallConv != CallingConv::Fast) { 3007 ComputeArgOffset(); 3008 3009 /* Compute GPR index associated with argument offset. */ 3010 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3011 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3012 } 3013 3014 // FIXME the codegen can be much improved in some cases. 3015 // We do not have to keep everything in memory. 3016 if (Flags.isByVal()) { 3017 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3018 3019 if (CallConv == CallingConv::Fast) 3020 ComputeArgOffset(); 3021 3022 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3023 ObjSize = Flags.getByValSize(); 3024 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3025 // Empty aggregate parameters do not take up registers. Examples: 3026 // struct { } a; 3027 // union { } b; 3028 // int c[0]; 3029 // etc. However, we have to provide a place-holder in InVals, so 3030 // pretend we have an 8-byte item at the current address for that 3031 // purpose. 3032 if (!ObjSize) { 3033 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3034 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3035 InVals.push_back(FIN); 3036 continue; 3037 } 3038 3039 // Create a stack object covering all stack doublewords occupied 3040 // by the argument. If the argument is (fully or partially) on 3041 // the stack, or if the argument is fully in registers but the 3042 // caller has allocated the parameter save anyway, we can refer 3043 // directly to the caller's stack frame. Otherwise, create a 3044 // local copy in our own frame. 3045 int FI; 3046 if (HasParameterArea || 3047 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3048 FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true); 3049 else 3050 FI = MFI->CreateStackObject(ArgSize, Align, false); 3051 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3052 3053 // Handle aggregates smaller than 8 bytes. 3054 if (ObjSize < PtrByteSize) { 3055 // The value of the object is its address, which differs from the 3056 // address of the enclosing doubleword on big-endian systems. 3057 SDValue Arg = FIN; 3058 if (!isLittleEndian) { 3059 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3060 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3061 } 3062 InVals.push_back(Arg); 3063 3064 if (GPR_idx != Num_GPR_Regs) { 3065 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3066 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3067 SDValue Store; 3068 3069 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3070 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3071 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3072 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3073 MachinePointerInfo(FuncArg), 3074 ObjType, false, false, 0); 3075 } else { 3076 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3077 // store the whole register as-is to the parameter save area 3078 // slot. 3079 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3080 MachinePointerInfo(FuncArg), 3081 false, false, 0); 3082 } 3083 3084 MemOps.push_back(Store); 3085 } 3086 // Whether we copied from a register or not, advance the offset 3087 // into the parameter save area by a full doubleword. 3088 ArgOffset += PtrByteSize; 3089 continue; 3090 } 3091 3092 // The value of the object is its address, which is the address of 3093 // its first stack doubleword. 3094 InVals.push_back(FIN); 3095 3096 // Store whatever pieces of the object are in registers to memory. 3097 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3098 if (GPR_idx == Num_GPR_Regs) 3099 break; 3100 3101 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3102 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3103 SDValue Addr = FIN; 3104 if (j) { 3105 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3106 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3107 } 3108 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3109 MachinePointerInfo(FuncArg, j), 3110 false, false, 0); 3111 MemOps.push_back(Store); 3112 ++GPR_idx; 3113 } 3114 ArgOffset += ArgSize; 3115 continue; 3116 } 3117 3118 switch (ObjectVT.getSimpleVT().SimpleTy) { 3119 default: llvm_unreachable("Unhandled argument type!"); 3120 case MVT::i1: 3121 case MVT::i32: 3122 case MVT::i64: 3123 // These can be scalar arguments or elements of an integer array type 3124 // passed directly. Clang may use those instead of "byval" aggregate 3125 // types to avoid forcing arguments to memory unnecessarily. 3126 if (GPR_idx != Num_GPR_Regs) { 3127 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3128 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3129 3130 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3131 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3132 // value to MVT::i64 and then truncate to the correct register size. 3133 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3134 } else { 3135 if (CallConv == CallingConv::Fast) 3136 ComputeArgOffset(); 3137 3138 needsLoad = true; 3139 ArgSize = PtrByteSize; 3140 } 3141 if (CallConv != CallingConv::Fast || needsLoad) 3142 ArgOffset += 8; 3143 break; 3144 3145 case MVT::f32: 3146 case MVT::f64: 3147 // These can be scalar arguments or elements of a float array type 3148 // passed directly. The latter are used to implement ELFv2 homogenous 3149 // float aggregates. 3150 if (FPR_idx != Num_FPR_Regs) { 3151 unsigned VReg; 3152 3153 if (ObjectVT == MVT::f32) 3154 VReg = MF.addLiveIn(FPR[FPR_idx], 3155 Subtarget.hasP8Vector() 3156 ? &PPC::VSSRCRegClass 3157 : &PPC::F4RCRegClass); 3158 else 3159 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3160 ? &PPC::VSFRCRegClass 3161 : &PPC::F8RCRegClass); 3162 3163 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3164 ++FPR_idx; 3165 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3166 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3167 // once we support fp <-> gpr moves. 3168 3169 // This can only ever happen in the presence of f32 array types, 3170 // since otherwise we never run out of FPRs before running out 3171 // of GPRs. 3172 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3173 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3174 3175 if (ObjectVT == MVT::f32) { 3176 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3177 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3178 DAG.getConstant(32, dl, MVT::i32)); 3179 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3180 } 3181 3182 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3183 } else { 3184 if (CallConv == CallingConv::Fast) 3185 ComputeArgOffset(); 3186 3187 needsLoad = true; 3188 } 3189 3190 // When passing an array of floats, the array occupies consecutive 3191 // space in the argument area; only round up to the next doubleword 3192 // at the end of the array. Otherwise, each float takes 8 bytes. 3193 if (CallConv != CallingConv::Fast || needsLoad) { 3194 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3195 ArgOffset += ArgSize; 3196 if (Flags.isInConsecutiveRegsLast()) 3197 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3198 } 3199 break; 3200 case MVT::v4f32: 3201 case MVT::v4i32: 3202 case MVT::v8i16: 3203 case MVT::v16i8: 3204 case MVT::v2f64: 3205 case MVT::v2i64: 3206 case MVT::v1i128: 3207 if (!Subtarget.hasQPX()) { 3208 // These can be scalar arguments or elements of a vector array type 3209 // passed directly. The latter are used to implement ELFv2 homogenous 3210 // vector aggregates. 3211 if (VR_idx != Num_VR_Regs) { 3212 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 3213 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 3214 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3215 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3216 ++VR_idx; 3217 } else { 3218 if (CallConv == CallingConv::Fast) 3219 ComputeArgOffset(); 3220 3221 needsLoad = true; 3222 } 3223 if (CallConv != CallingConv::Fast || needsLoad) 3224 ArgOffset += 16; 3225 break; 3226 } // not QPX 3227 3228 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3229 "Invalid QPX parameter type"); 3230 /* fall through */ 3231 3232 case MVT::v4f64: 3233 case MVT::v4i1: 3234 // QPX vectors are treated like their scalar floating-point subregisters 3235 // (except that they're larger). 3236 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3237 if (QFPR_idx != Num_QFPR_Regs) { 3238 const TargetRegisterClass *RC; 3239 switch (ObjectVT.getSimpleVT().SimpleTy) { 3240 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3241 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3242 default: RC = &PPC::QBRCRegClass; break; 3243 } 3244 3245 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3246 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3247 ++QFPR_idx; 3248 } else { 3249 if (CallConv == CallingConv::Fast) 3250 ComputeArgOffset(); 3251 needsLoad = true; 3252 } 3253 if (CallConv != CallingConv::Fast || needsLoad) 3254 ArgOffset += Sz; 3255 break; 3256 } 3257 3258 // We need to load the argument to a virtual register if we determined 3259 // above that we ran out of physical registers of the appropriate type. 3260 if (needsLoad) { 3261 if (ObjSize < ArgSize && !isLittleEndian) 3262 CurArgOffset += ArgSize - ObjSize; 3263 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3264 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3265 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3266 false, false, false, 0); 3267 } 3268 3269 InVals.push_back(ArgVal); 3270 } 3271 3272 // Area that is at least reserved in the caller of this function. 3273 unsigned MinReservedArea; 3274 if (HasParameterArea) 3275 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3276 else 3277 MinReservedArea = LinkageSize; 3278 3279 // Set the size that is at least reserved in caller of this function. Tail 3280 // call optimized functions' reserved stack space needs to be aligned so that 3281 // taking the difference between two stack areas will result in an aligned 3282 // stack. 3283 MinReservedArea = 3284 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3285 FuncInfo->setMinReservedArea(MinReservedArea); 3286 3287 // If the function takes variable number of arguments, make a frame index for 3288 // the start of the first vararg value... for expansion of llvm.va_start. 3289 if (isVarArg) { 3290 int Depth = ArgOffset; 3291 3292 FuncInfo->setVarArgsFrameIndex( 3293 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 3294 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3295 3296 // If this function is vararg, store any remaining integer argument regs 3297 // to their spots on the stack so that they may be loaded by deferencing the 3298 // result of va_next. 3299 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3300 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3301 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3302 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3303 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3304 MachinePointerInfo(), false, false, 0); 3305 MemOps.push_back(Store); 3306 // Increment the address by four for the next argument to store 3307 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3308 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3309 } 3310 } 3311 3312 if (!MemOps.empty()) 3313 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3314 3315 return Chain; 3316 } 3317 3318 SDValue 3319 PPCTargetLowering::LowerFormalArguments_Darwin( 3320 SDValue Chain, 3321 CallingConv::ID CallConv, bool isVarArg, 3322 const SmallVectorImpl<ISD::InputArg> 3323 &Ins, 3324 SDLoc dl, SelectionDAG &DAG, 3325 SmallVectorImpl<SDValue> &InVals) const { 3326 // TODO: add description of PPC stack frame format, or at least some docs. 3327 // 3328 MachineFunction &MF = DAG.getMachineFunction(); 3329 MachineFrameInfo *MFI = MF.getFrameInfo(); 3330 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3331 3332 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3333 bool isPPC64 = PtrVT == MVT::i64; 3334 // Potential tail calls could cause overwriting of argument stack slots. 3335 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3336 (CallConv == CallingConv::Fast)); 3337 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3338 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3339 unsigned ArgOffset = LinkageSize; 3340 // Area that is at least reserved in caller of this function. 3341 unsigned MinReservedArea = ArgOffset; 3342 3343 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3344 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3345 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3346 }; 3347 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3348 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3349 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3350 }; 3351 static const MCPhysReg VR[] = { 3352 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3353 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3354 }; 3355 3356 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3357 const unsigned Num_FPR_Regs = 13; 3358 const unsigned Num_VR_Regs = array_lengthof( VR); 3359 3360 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3361 3362 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3363 3364 // In 32-bit non-varargs functions, the stack space for vectors is after the 3365 // stack space for non-vectors. We do not use this space unless we have 3366 // too many vectors to fit in registers, something that only occurs in 3367 // constructed examples:), but we have to walk the arglist to figure 3368 // that out...for the pathological case, compute VecArgOffset as the 3369 // start of the vector parameter area. Computing VecArgOffset is the 3370 // entire point of the following loop. 3371 unsigned VecArgOffset = ArgOffset; 3372 if (!isVarArg && !isPPC64) { 3373 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3374 ++ArgNo) { 3375 EVT ObjectVT = Ins[ArgNo].VT; 3376 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3377 3378 if (Flags.isByVal()) { 3379 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3380 unsigned ObjSize = Flags.getByValSize(); 3381 unsigned ArgSize = 3382 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3383 VecArgOffset += ArgSize; 3384 continue; 3385 } 3386 3387 switch(ObjectVT.getSimpleVT().SimpleTy) { 3388 default: llvm_unreachable("Unhandled argument type!"); 3389 case MVT::i1: 3390 case MVT::i32: 3391 case MVT::f32: 3392 VecArgOffset += 4; 3393 break; 3394 case MVT::i64: // PPC64 3395 case MVT::f64: 3396 // FIXME: We are guaranteed to be !isPPC64 at this point. 3397 // Does MVT::i64 apply? 3398 VecArgOffset += 8; 3399 break; 3400 case MVT::v4f32: 3401 case MVT::v4i32: 3402 case MVT::v8i16: 3403 case MVT::v16i8: 3404 // Nothing to do, we're only looking at Nonvector args here. 3405 break; 3406 } 3407 } 3408 } 3409 // We've found where the vector parameter area in memory is. Skip the 3410 // first 12 parameters; these don't use that memory. 3411 VecArgOffset = ((VecArgOffset+15)/16)*16; 3412 VecArgOffset += 12*16; 3413 3414 // Add DAG nodes to load the arguments or copy them out of registers. On 3415 // entry to a function on PPC, the arguments start after the linkage area, 3416 // although the first ones are often in registers. 3417 3418 SmallVector<SDValue, 8> MemOps; 3419 unsigned nAltivecParamsAtEnd = 0; 3420 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3421 unsigned CurArgIdx = 0; 3422 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3423 SDValue ArgVal; 3424 bool needsLoad = false; 3425 EVT ObjectVT = Ins[ArgNo].VT; 3426 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3427 unsigned ArgSize = ObjSize; 3428 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3429 if (Ins[ArgNo].isOrigArg()) { 3430 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3431 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3432 } 3433 unsigned CurArgOffset = ArgOffset; 3434 3435 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3436 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3437 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3438 if (isVarArg || isPPC64) { 3439 MinReservedArea = ((MinReservedArea+15)/16)*16; 3440 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3441 Flags, 3442 PtrByteSize); 3443 } else nAltivecParamsAtEnd++; 3444 } else 3445 // Calculate min reserved area. 3446 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3447 Flags, 3448 PtrByteSize); 3449 3450 // FIXME the codegen can be much improved in some cases. 3451 // We do not have to keep everything in memory. 3452 if (Flags.isByVal()) { 3453 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3454 3455 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3456 ObjSize = Flags.getByValSize(); 3457 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3458 // Objects of size 1 and 2 are right justified, everything else is 3459 // left justified. This means the memory address is adjusted forwards. 3460 if (ObjSize==1 || ObjSize==2) { 3461 CurArgOffset = CurArgOffset + (4 - ObjSize); 3462 } 3463 // The value of the object is its address. 3464 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true); 3465 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3466 InVals.push_back(FIN); 3467 if (ObjSize==1 || ObjSize==2) { 3468 if (GPR_idx != Num_GPR_Regs) { 3469 unsigned VReg; 3470 if (isPPC64) 3471 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3472 else 3473 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3474 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3475 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3476 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3477 MachinePointerInfo(FuncArg), 3478 ObjType, false, false, 0); 3479 MemOps.push_back(Store); 3480 ++GPR_idx; 3481 } 3482 3483 ArgOffset += PtrByteSize; 3484 3485 continue; 3486 } 3487 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3488 // Store whatever pieces of the object are in registers 3489 // to memory. ArgOffset will be the address of the beginning 3490 // of the object. 3491 if (GPR_idx != Num_GPR_Regs) { 3492 unsigned VReg; 3493 if (isPPC64) 3494 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3495 else 3496 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3497 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3498 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3499 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3500 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3501 MachinePointerInfo(FuncArg, j), 3502 false, false, 0); 3503 MemOps.push_back(Store); 3504 ++GPR_idx; 3505 ArgOffset += PtrByteSize; 3506 } else { 3507 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3508 break; 3509 } 3510 } 3511 continue; 3512 } 3513 3514 switch (ObjectVT.getSimpleVT().SimpleTy) { 3515 default: llvm_unreachable("Unhandled argument type!"); 3516 case MVT::i1: 3517 case MVT::i32: 3518 if (!isPPC64) { 3519 if (GPR_idx != Num_GPR_Regs) { 3520 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3521 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3522 3523 if (ObjectVT == MVT::i1) 3524 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3525 3526 ++GPR_idx; 3527 } else { 3528 needsLoad = true; 3529 ArgSize = PtrByteSize; 3530 } 3531 // All int arguments reserve stack space in the Darwin ABI. 3532 ArgOffset += PtrByteSize; 3533 break; 3534 } 3535 // FALLTHROUGH 3536 case MVT::i64: // PPC64 3537 if (GPR_idx != Num_GPR_Regs) { 3538 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3539 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3540 3541 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3542 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3543 // value to MVT::i64 and then truncate to the correct register size. 3544 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3545 3546 ++GPR_idx; 3547 } else { 3548 needsLoad = true; 3549 ArgSize = PtrByteSize; 3550 } 3551 // All int arguments reserve stack space in the Darwin ABI. 3552 ArgOffset += 8; 3553 break; 3554 3555 case MVT::f32: 3556 case MVT::f64: 3557 // Every 4 bytes of argument space consumes one of the GPRs available for 3558 // argument passing. 3559 if (GPR_idx != Num_GPR_Regs) { 3560 ++GPR_idx; 3561 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3562 ++GPR_idx; 3563 } 3564 if (FPR_idx != Num_FPR_Regs) { 3565 unsigned VReg; 3566 3567 if (ObjectVT == MVT::f32) 3568 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3569 else 3570 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3571 3572 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3573 ++FPR_idx; 3574 } else { 3575 needsLoad = true; 3576 } 3577 3578 // All FP arguments reserve stack space in the Darwin ABI. 3579 ArgOffset += isPPC64 ? 8 : ObjSize; 3580 break; 3581 case MVT::v4f32: 3582 case MVT::v4i32: 3583 case MVT::v8i16: 3584 case MVT::v16i8: 3585 // Note that vector arguments in registers don't reserve stack space, 3586 // except in varargs functions. 3587 if (VR_idx != Num_VR_Regs) { 3588 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3589 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3590 if (isVarArg) { 3591 while ((ArgOffset % 16) != 0) { 3592 ArgOffset += PtrByteSize; 3593 if (GPR_idx != Num_GPR_Regs) 3594 GPR_idx++; 3595 } 3596 ArgOffset += 16; 3597 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3598 } 3599 ++VR_idx; 3600 } else { 3601 if (!isVarArg && !isPPC64) { 3602 // Vectors go after all the nonvectors. 3603 CurArgOffset = VecArgOffset; 3604 VecArgOffset += 16; 3605 } else { 3606 // Vectors are aligned. 3607 ArgOffset = ((ArgOffset+15)/16)*16; 3608 CurArgOffset = ArgOffset; 3609 ArgOffset += 16; 3610 } 3611 needsLoad = true; 3612 } 3613 break; 3614 } 3615 3616 // We need to load the argument to a virtual register if we determined above 3617 // that we ran out of physical registers of the appropriate type. 3618 if (needsLoad) { 3619 int FI = MFI->CreateFixedObject(ObjSize, 3620 CurArgOffset + (ArgSize - ObjSize), 3621 isImmutable); 3622 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3623 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3624 false, false, false, 0); 3625 } 3626 3627 InVals.push_back(ArgVal); 3628 } 3629 3630 // Allow for Altivec parameters at the end, if needed. 3631 if (nAltivecParamsAtEnd) { 3632 MinReservedArea = ((MinReservedArea+15)/16)*16; 3633 MinReservedArea += 16*nAltivecParamsAtEnd; 3634 } 3635 3636 // Area that is at least reserved in the caller of this function. 3637 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3638 3639 // Set the size that is at least reserved in caller of this function. Tail 3640 // call optimized functions' reserved stack space needs to be aligned so that 3641 // taking the difference between two stack areas will result in an aligned 3642 // stack. 3643 MinReservedArea = 3644 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3645 FuncInfo->setMinReservedArea(MinReservedArea); 3646 3647 // If the function takes variable number of arguments, make a frame index for 3648 // the start of the first vararg value... for expansion of llvm.va_start. 3649 if (isVarArg) { 3650 int Depth = ArgOffset; 3651 3652 FuncInfo->setVarArgsFrameIndex( 3653 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3654 Depth, true)); 3655 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3656 3657 // If this function is vararg, store any remaining integer argument regs 3658 // to their spots on the stack so that they may be loaded by deferencing the 3659 // result of va_next. 3660 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3661 unsigned VReg; 3662 3663 if (isPPC64) 3664 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3665 else 3666 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3667 3668 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3669 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3670 MachinePointerInfo(), false, false, 0); 3671 MemOps.push_back(Store); 3672 // Increment the address by four for the next argument to store 3673 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3674 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3675 } 3676 } 3677 3678 if (!MemOps.empty()) 3679 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3680 3681 return Chain; 3682 } 3683 3684 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3685 /// adjusted to accommodate the arguments for the tailcall. 3686 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3687 unsigned ParamSize) { 3688 3689 if (!isTailCall) return 0; 3690 3691 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3692 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3693 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3694 // Remember only if the new adjustement is bigger. 3695 if (SPDiff < FI->getTailCallSPDelta()) 3696 FI->setTailCallSPDelta(SPDiff); 3697 3698 return SPDiff; 3699 } 3700 3701 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 3702 /// for tail call optimization. Targets which want to do tail call 3703 /// optimization should implement this function. 3704 bool 3705 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 3706 CallingConv::ID CalleeCC, 3707 bool isVarArg, 3708 const SmallVectorImpl<ISD::InputArg> &Ins, 3709 SelectionDAG& DAG) const { 3710 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 3711 return false; 3712 3713 // Variable argument functions are not supported. 3714 if (isVarArg) 3715 return false; 3716 3717 MachineFunction &MF = DAG.getMachineFunction(); 3718 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3719 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 3720 // Functions containing by val parameters are not supported. 3721 for (unsigned i = 0; i != Ins.size(); i++) { 3722 ISD::ArgFlagsTy Flags = Ins[i].Flags; 3723 if (Flags.isByVal()) return false; 3724 } 3725 3726 // Non-PIC/GOT tail calls are supported. 3727 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 3728 return true; 3729 3730 // At the moment we can only do local tail calls (in same module, hidden 3731 // or protected) if we are generating PIC. 3732 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3733 return G->getGlobal()->hasHiddenVisibility() 3734 || G->getGlobal()->hasProtectedVisibility(); 3735 } 3736 3737 return false; 3738 } 3739 3740 /// isCallCompatibleAddress - Return the immediate to use if the specified 3741 /// 32-bit value is representable in the immediate field of a BxA instruction. 3742 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 3743 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 3744 if (!C) return nullptr; 3745 3746 int Addr = C->getZExtValue(); 3747 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 3748 SignExtend32<26>(Addr) != Addr) 3749 return nullptr; // Top 6 bits have to be sext of immediate. 3750 3751 return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op), 3752 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 3753 } 3754 3755 namespace { 3756 3757 struct TailCallArgumentInfo { 3758 SDValue Arg; 3759 SDValue FrameIdxOp; 3760 int FrameIdx; 3761 3762 TailCallArgumentInfo() : FrameIdx(0) {} 3763 }; 3764 3765 } 3766 3767 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 3768 static void 3769 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 3770 SDValue Chain, 3771 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 3772 SmallVectorImpl<SDValue> &MemOpChains, 3773 SDLoc dl) { 3774 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 3775 SDValue Arg = TailCallArgs[i].Arg; 3776 SDValue FIN = TailCallArgs[i].FrameIdxOp; 3777 int FI = TailCallArgs[i].FrameIdx; 3778 // Store relative to framepointer. 3779 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 3780 MachinePointerInfo::getFixedStack(FI), 3781 false, false, 0)); 3782 } 3783 } 3784 3785 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 3786 /// the appropriate stack slot for the tail call optimized function call. 3787 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 3788 MachineFunction &MF, 3789 SDValue Chain, 3790 SDValue OldRetAddr, 3791 SDValue OldFP, 3792 int SPDiff, 3793 bool isPPC64, 3794 bool isDarwinABI, 3795 SDLoc dl) { 3796 if (SPDiff) { 3797 // Calculate the new stack slot for the return address. 3798 int SlotSize = isPPC64 ? 8 : 4; 3799 const PPCFrameLowering *FL = 3800 MF.getSubtarget<PPCSubtarget>().getFrameLowering(); 3801 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 3802 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3803 NewRetAddrLoc, true); 3804 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3805 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 3806 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 3807 MachinePointerInfo::getFixedStack(NewRetAddr), 3808 false, false, 0); 3809 3810 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 3811 // slot as the FP is never overwritten. 3812 if (isDarwinABI) { 3813 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 3814 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3815 true); 3816 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3817 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 3818 MachinePointerInfo::getFixedStack(NewFPIdx), 3819 false, false, 0); 3820 } 3821 } 3822 return Chain; 3823 } 3824 3825 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3826 /// the position of the argument. 3827 static void 3828 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3829 SDValue Arg, int SPDiff, unsigned ArgOffset, 3830 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3831 int Offset = ArgOffset + SPDiff; 3832 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3833 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3834 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3835 SDValue FIN = DAG.getFrameIndex(FI, VT); 3836 TailCallArgumentInfo Info; 3837 Info.Arg = Arg; 3838 Info.FrameIdxOp = FIN; 3839 Info.FrameIdx = FI; 3840 TailCallArguments.push_back(Info); 3841 } 3842 3843 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3844 /// stack slot. Returns the chain as result and the loaded frame pointers in 3845 /// LROpOut/FPOpout. Used when tail calling. 3846 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3847 int SPDiff, 3848 SDValue Chain, 3849 SDValue &LROpOut, 3850 SDValue &FPOpOut, 3851 bool isDarwinABI, 3852 SDLoc dl) const { 3853 if (SPDiff) { 3854 // Load the LR and FP stack slot for later adjusting. 3855 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 3856 LROpOut = getReturnAddrFrameIndex(DAG); 3857 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3858 false, false, false, 0); 3859 Chain = SDValue(LROpOut.getNode(), 1); 3860 3861 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3862 // slot as the FP is never overwritten. 3863 if (isDarwinABI) { 3864 FPOpOut = getFramePointerFrameIndex(DAG); 3865 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 3866 false, false, false, 0); 3867 Chain = SDValue(FPOpOut.getNode(), 1); 3868 } 3869 } 3870 return Chain; 3871 } 3872 3873 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 3874 /// by "Src" to address "Dst" of size "Size". Alignment information is 3875 /// specified by the specific parameter attribute. The copy will be passed as 3876 /// a byval function parameter. 3877 /// Sometimes what we are copying is the end of a larger object, the part that 3878 /// does not fit in registers. 3879 static SDValue 3880 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 3881 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 3882 SDLoc dl) { 3883 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 3884 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 3885 false, false, false, MachinePointerInfo(), 3886 MachinePointerInfo()); 3887 } 3888 3889 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 3890 /// tail calls. 3891 static void 3892 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 3893 SDValue Arg, SDValue PtrOff, int SPDiff, 3894 unsigned ArgOffset, bool isPPC64, bool isTailCall, 3895 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 3896 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 3897 SDLoc dl) { 3898 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3899 if (!isTailCall) { 3900 if (isVector) { 3901 SDValue StackPtr; 3902 if (isPPC64) 3903 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3904 else 3905 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3906 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3907 DAG.getConstant(ArgOffset, dl, PtrVT)); 3908 } 3909 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3910 MachinePointerInfo(), false, false, 0)); 3911 // Calculate and remember argument location. 3912 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 3913 TailCallArguments); 3914 } 3915 3916 static 3917 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 3918 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 3919 SDValue LROp, SDValue FPOp, bool isDarwinABI, 3920 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 3921 MachineFunction &MF = DAG.getMachineFunction(); 3922 3923 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 3924 // might overwrite each other in case of tail call optimization. 3925 SmallVector<SDValue, 8> MemOpChains2; 3926 // Do not flag preceding copytoreg stuff together with the following stuff. 3927 InFlag = SDValue(); 3928 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 3929 MemOpChains2, dl); 3930 if (!MemOpChains2.empty()) 3931 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 3932 3933 // Store the return address to the appropriate stack slot. 3934 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 3935 isPPC64, isDarwinABI, dl); 3936 3937 // Emit callseq_end just before tailcall node. 3938 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 3939 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 3940 InFlag = Chain.getValue(1); 3941 } 3942 3943 // Is this global address that of a function that can be called by name? (as 3944 // opposed to something that must hold a descriptor for an indirect call). 3945 static bool isFunctionGlobalAddress(SDValue Callee) { 3946 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3947 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 3948 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 3949 return false; 3950 3951 return G->getGlobal()->getType()->getElementType()->isFunctionTy(); 3952 } 3953 3954 return false; 3955 } 3956 3957 static 3958 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 3959 SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff, 3960 bool isTailCall, bool IsPatchPoint, 3961 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 3962 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 3963 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 3964 3965 bool isPPC64 = Subtarget.isPPC64(); 3966 bool isSVR4ABI = Subtarget.isSVR4ABI(); 3967 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3968 3969 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3970 NodeTys.push_back(MVT::Other); // Returns a chain 3971 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 3972 3973 unsigned CallOpc = PPCISD::CALL; 3974 3975 bool needIndirectCall = true; 3976 if (!isSVR4ABI || !isPPC64) 3977 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 3978 // If this is an absolute destination address, use the munged value. 3979 Callee = SDValue(Dest, 0); 3980 needIndirectCall = false; 3981 } 3982 3983 if (isFunctionGlobalAddress(Callee)) { 3984 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 3985 // A call to a TLS address is actually an indirect call to a 3986 // thread-specific pointer. 3987 unsigned OpFlags = 0; 3988 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 3989 (Subtarget.getTargetTriple().isMacOSX() && 3990 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 3991 (G->getGlobal()->isDeclaration() || 3992 G->getGlobal()->isWeakForLinker())) || 3993 (Subtarget.isTargetELF() && !isPPC64 && 3994 !G->getGlobal()->hasLocalLinkage() && 3995 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 3996 // PC-relative references to external symbols should go through $stub, 3997 // unless we're building with the leopard linker or later, which 3998 // automatically synthesizes these stubs. 3999 OpFlags = PPCII::MO_PLT_OR_STUB; 4000 } 4001 4002 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4003 // every direct call is) turn it into a TargetGlobalAddress / 4004 // TargetExternalSymbol node so that legalize doesn't hack it. 4005 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4006 Callee.getValueType(), 0, OpFlags); 4007 needIndirectCall = false; 4008 } 4009 4010 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4011 unsigned char OpFlags = 0; 4012 4013 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4014 (Subtarget.getTargetTriple().isMacOSX() && 4015 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) || 4016 (Subtarget.isTargetELF() && !isPPC64 && 4017 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4018 // PC-relative references to external symbols should go through $stub, 4019 // unless we're building with the leopard linker or later, which 4020 // automatically synthesizes these stubs. 4021 OpFlags = PPCII::MO_PLT_OR_STUB; 4022 } 4023 4024 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4025 OpFlags); 4026 needIndirectCall = false; 4027 } 4028 4029 if (IsPatchPoint) { 4030 // We'll form an invalid direct call when lowering a patchpoint; the full 4031 // sequence for an indirect call is complicated, and many of the 4032 // instructions introduced might have side effects (and, thus, can't be 4033 // removed later). The call itself will be removed as soon as the 4034 // argument/return lowering is complete, so the fact that it has the wrong 4035 // kind of operands should not really matter. 4036 needIndirectCall = false; 4037 } 4038 4039 if (needIndirectCall) { 4040 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4041 // to do the call, we can't use PPCISD::CALL. 4042 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4043 4044 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4045 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4046 // entry point, but to the function descriptor (the function entry point 4047 // address is part of the function descriptor though). 4048 // The function descriptor is a three doubleword structure with the 4049 // following fields: function entry point, TOC base address and 4050 // environment pointer. 4051 // Thus for a call through a function pointer, the following actions need 4052 // to be performed: 4053 // 1. Save the TOC of the caller in the TOC save area of its stack 4054 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4055 // 2. Load the address of the function entry point from the function 4056 // descriptor. 4057 // 3. Load the TOC of the callee from the function descriptor into r2. 4058 // 4. Load the environment pointer from the function descriptor into 4059 // r11. 4060 // 5. Branch to the function entry point address. 4061 // 6. On return of the callee, the TOC of the caller needs to be 4062 // restored (this is done in FinishCall()). 4063 // 4064 // The loads are scheduled at the beginning of the call sequence, and the 4065 // register copies are flagged together to ensure that no other 4066 // operations can be scheduled in between. E.g. without flagging the 4067 // copies together, a TOC access in the caller could be scheduled between 4068 // the assignment of the callee TOC and the branch to the callee, which 4069 // results in the TOC access going through the TOC of the callee instead 4070 // of going through the TOC of the caller, which leads to incorrect code. 4071 4072 // Load the address of the function entry point from the function 4073 // descriptor. 4074 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4075 if (LDChain.getValueType() == MVT::Glue) 4076 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4077 4078 bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors(); 4079 4080 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4081 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4082 false, false, LoadsInv, 8); 4083 4084 // Load environment pointer into r11. 4085 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4086 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4087 SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, 4088 MPI.getWithOffset(16), false, false, 4089 LoadsInv, 8); 4090 4091 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4092 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4093 SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, 4094 MPI.getWithOffset(8), false, false, 4095 LoadsInv, 8); 4096 4097 setUsesTOCBasePtr(DAG); 4098 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4099 InFlag); 4100 Chain = TOCVal.getValue(0); 4101 InFlag = TOCVal.getValue(1); 4102 4103 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4104 InFlag); 4105 4106 Chain = EnvVal.getValue(0); 4107 InFlag = EnvVal.getValue(1); 4108 4109 MTCTROps[0] = Chain; 4110 MTCTROps[1] = LoadFuncPtr; 4111 MTCTROps[2] = InFlag; 4112 } 4113 4114 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4115 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4116 InFlag = Chain.getValue(1); 4117 4118 NodeTys.clear(); 4119 NodeTys.push_back(MVT::Other); 4120 NodeTys.push_back(MVT::Glue); 4121 Ops.push_back(Chain); 4122 CallOpc = PPCISD::BCTRL; 4123 Callee.setNode(nullptr); 4124 // Add use of X11 (holding environment pointer) 4125 if (isSVR4ABI && isPPC64 && !isELFv2ABI) 4126 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4127 // Add CTR register as callee so a bctr can be emitted later. 4128 if (isTailCall) 4129 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4130 } 4131 4132 // If this is a direct call, pass the chain and the callee. 4133 if (Callee.getNode()) { 4134 Ops.push_back(Chain); 4135 Ops.push_back(Callee); 4136 } 4137 // If this is a tail call add stack pointer delta. 4138 if (isTailCall) 4139 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4140 4141 // Add argument registers to the end of the list so that they are known live 4142 // into the call. 4143 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4144 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4145 RegsToPass[i].second.getValueType())); 4146 4147 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4148 // into the call. 4149 if (isSVR4ABI && isPPC64 && !IsPatchPoint) { 4150 setUsesTOCBasePtr(DAG); 4151 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4152 } 4153 4154 return CallOpc; 4155 } 4156 4157 static 4158 bool isLocalCall(const SDValue &Callee) 4159 { 4160 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4161 return !G->getGlobal()->isDeclaration() && 4162 !G->getGlobal()->isWeakForLinker(); 4163 return false; 4164 } 4165 4166 SDValue 4167 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 4168 CallingConv::ID CallConv, bool isVarArg, 4169 const SmallVectorImpl<ISD::InputArg> &Ins, 4170 SDLoc dl, SelectionDAG &DAG, 4171 SmallVectorImpl<SDValue> &InVals) const { 4172 4173 SmallVector<CCValAssign, 16> RVLocs; 4174 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4175 *DAG.getContext()); 4176 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4177 4178 // Copy all of the result registers out of their specified physreg. 4179 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4180 CCValAssign &VA = RVLocs[i]; 4181 assert(VA.isRegLoc() && "Can only return in registers!"); 4182 4183 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4184 VA.getLocReg(), VA.getLocVT(), InFlag); 4185 Chain = Val.getValue(1); 4186 InFlag = Val.getValue(2); 4187 4188 switch (VA.getLocInfo()) { 4189 default: llvm_unreachable("Unknown loc info!"); 4190 case CCValAssign::Full: break; 4191 case CCValAssign::AExt: 4192 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4193 break; 4194 case CCValAssign::ZExt: 4195 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4196 DAG.getValueType(VA.getValVT())); 4197 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4198 break; 4199 case CCValAssign::SExt: 4200 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4201 DAG.getValueType(VA.getValVT())); 4202 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4203 break; 4204 } 4205 4206 InVals.push_back(Val); 4207 } 4208 4209 return Chain; 4210 } 4211 4212 SDValue 4213 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 4214 bool isTailCall, bool isVarArg, bool IsPatchPoint, 4215 SelectionDAG &DAG, 4216 SmallVector<std::pair<unsigned, SDValue>, 8> 4217 &RegsToPass, 4218 SDValue InFlag, SDValue Chain, 4219 SDValue CallSeqStart, SDValue &Callee, 4220 int SPDiff, unsigned NumBytes, 4221 const SmallVectorImpl<ISD::InputArg> &Ins, 4222 SmallVectorImpl<SDValue> &InVals, 4223 ImmutableCallSite *CS) const { 4224 4225 std::vector<EVT> NodeTys; 4226 SmallVector<SDValue, 8> Ops; 4227 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4228 SPDiff, isTailCall, IsPatchPoint, RegsToPass, 4229 Ops, NodeTys, CS, Subtarget); 4230 4231 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4232 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4233 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4234 4235 // When performing tail call optimization the callee pops its arguments off 4236 // the stack. Account for this here so these bytes can be pushed back on in 4237 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4238 int BytesCalleePops = 4239 (CallConv == CallingConv::Fast && 4240 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4241 4242 // Add a register mask operand representing the call-preserved registers. 4243 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4244 const uint32_t *Mask = 4245 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4246 assert(Mask && "Missing call preserved mask for calling convention"); 4247 Ops.push_back(DAG.getRegisterMask(Mask)); 4248 4249 if (InFlag.getNode()) 4250 Ops.push_back(InFlag); 4251 4252 // Emit tail call. 4253 if (isTailCall) { 4254 assert(((Callee.getOpcode() == ISD::Register && 4255 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4256 Callee.getOpcode() == ISD::TargetExternalSymbol || 4257 Callee.getOpcode() == ISD::TargetGlobalAddress || 4258 isa<ConstantSDNode>(Callee)) && 4259 "Expecting an global address, external symbol, absolute value or register"); 4260 4261 DAG.getMachineFunction().getFrameInfo()->setHasTailCall(); 4262 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4263 } 4264 4265 // Add a NOP immediately after the branch instruction when using the 64-bit 4266 // SVR4 ABI. At link time, if caller and callee are in a different module and 4267 // thus have a different TOC, the call will be replaced with a call to a stub 4268 // function which saves the current TOC, loads the TOC of the callee and 4269 // branches to the callee. The NOP will be replaced with a load instruction 4270 // which restores the TOC of the caller from the TOC save slot of the current 4271 // stack frame. If caller and callee belong to the same module (and have the 4272 // same TOC), the NOP will remain unchanged. 4273 4274 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4275 !IsPatchPoint) { 4276 if (CallOpc == PPCISD::BCTRL) { 4277 // This is a call through a function pointer. 4278 // Restore the caller TOC from the save area into R2. 4279 // See PrepareCall() for more information about calls through function 4280 // pointers in the 64-bit SVR4 ABI. 4281 // We are using a target-specific load with r2 hard coded, because the 4282 // result of a target-independent load would never go directly into r2, 4283 // since r2 is a reserved register (which prevents the register allocator 4284 // from allocating it), resulting in an additional register being 4285 // allocated and an unnecessary move instruction being generated. 4286 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4287 4288 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4289 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4290 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4291 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4292 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4293 4294 // The address needs to go after the chain input but before the flag (or 4295 // any other variadic arguments). 4296 Ops.insert(std::next(Ops.begin()), AddTOC); 4297 } else if ((CallOpc == PPCISD::CALL) && 4298 (!isLocalCall(Callee) || 4299 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) 4300 // Otherwise insert NOP for non-local calls. 4301 CallOpc = PPCISD::CALL_NOP; 4302 } 4303 4304 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4305 InFlag = Chain.getValue(1); 4306 4307 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4308 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4309 InFlag, dl); 4310 if (!Ins.empty()) 4311 InFlag = Chain.getValue(1); 4312 4313 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4314 Ins, dl, DAG, InVals); 4315 } 4316 4317 SDValue 4318 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4319 SmallVectorImpl<SDValue> &InVals) const { 4320 SelectionDAG &DAG = CLI.DAG; 4321 SDLoc &dl = CLI.DL; 4322 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4323 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4324 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4325 SDValue Chain = CLI.Chain; 4326 SDValue Callee = CLI.Callee; 4327 bool &isTailCall = CLI.IsTailCall; 4328 CallingConv::ID CallConv = CLI.CallConv; 4329 bool isVarArg = CLI.IsVarArg; 4330 bool IsPatchPoint = CLI.IsPatchPoint; 4331 ImmutableCallSite *CS = CLI.CS; 4332 4333 if (isTailCall) 4334 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4335 Ins, DAG); 4336 4337 if (!isTailCall && CS && CS->isMustTailCall()) 4338 report_fatal_error("failed to perform tail call elimination on a call " 4339 "site marked musttail"); 4340 4341 if (Subtarget.isSVR4ABI()) { 4342 if (Subtarget.isPPC64()) 4343 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4344 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4345 dl, DAG, InVals, CS); 4346 else 4347 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4348 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4349 dl, DAG, InVals, CS); 4350 } 4351 4352 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4353 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4354 dl, DAG, InVals, CS); 4355 } 4356 4357 SDValue 4358 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 4359 CallingConv::ID CallConv, bool isVarArg, 4360 bool isTailCall, bool IsPatchPoint, 4361 const SmallVectorImpl<ISD::OutputArg> &Outs, 4362 const SmallVectorImpl<SDValue> &OutVals, 4363 const SmallVectorImpl<ISD::InputArg> &Ins, 4364 SDLoc dl, SelectionDAG &DAG, 4365 SmallVectorImpl<SDValue> &InVals, 4366 ImmutableCallSite *CS) const { 4367 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4368 // of the 32-bit SVR4 ABI stack frame layout. 4369 4370 assert((CallConv == CallingConv::C || 4371 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4372 4373 unsigned PtrByteSize = 4; 4374 4375 MachineFunction &MF = DAG.getMachineFunction(); 4376 4377 // Mark this function as potentially containing a function that contains a 4378 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4379 // and restoring the callers stack pointer in this functions epilog. This is 4380 // done because by tail calling the called function might overwrite the value 4381 // in this function's (MF) stack pointer stack slot 0(SP). 4382 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4383 CallConv == CallingConv::Fast) 4384 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4385 4386 // Count how many bytes are to be pushed on the stack, including the linkage 4387 // area, parameter list area and the part of the local variable space which 4388 // contains copies of aggregates which are passed by value. 4389 4390 // Assign locations to all of the outgoing arguments. 4391 SmallVector<CCValAssign, 16> ArgLocs; 4392 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4393 *DAG.getContext()); 4394 4395 // Reserve space for the linkage area on the stack. 4396 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4397 PtrByteSize); 4398 4399 if (isVarArg) { 4400 // Handle fixed and variable vector arguments differently. 4401 // Fixed vector arguments go into registers as long as registers are 4402 // available. Variable vector arguments always go into memory. 4403 unsigned NumArgs = Outs.size(); 4404 4405 for (unsigned i = 0; i != NumArgs; ++i) { 4406 MVT ArgVT = Outs[i].VT; 4407 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4408 bool Result; 4409 4410 if (Outs[i].IsFixed) { 4411 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4412 CCInfo); 4413 } else { 4414 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4415 ArgFlags, CCInfo); 4416 } 4417 4418 if (Result) { 4419 #ifndef NDEBUG 4420 errs() << "Call operand #" << i << " has unhandled type " 4421 << EVT(ArgVT).getEVTString() << "\n"; 4422 #endif 4423 llvm_unreachable(nullptr); 4424 } 4425 } 4426 } else { 4427 // All arguments are treated the same. 4428 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4429 } 4430 4431 // Assign locations to all of the outgoing aggregate by value arguments. 4432 SmallVector<CCValAssign, 16> ByValArgLocs; 4433 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4434 ByValArgLocs, *DAG.getContext()); 4435 4436 // Reserve stack space for the allocations in CCInfo. 4437 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4438 4439 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4440 4441 // Size of the linkage area, parameter list area and the part of the local 4442 // space variable where copies of aggregates which are passed by value are 4443 // stored. 4444 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4445 4446 // Calculate by how many bytes the stack has to be adjusted in case of tail 4447 // call optimization. 4448 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4449 4450 // Adjust the stack pointer for the new arguments... 4451 // These operations are automatically eliminated by the prolog/epilog pass 4452 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4453 dl); 4454 SDValue CallSeqStart = Chain; 4455 4456 // Load the return address and frame pointer so it can be moved somewhere else 4457 // later. 4458 SDValue LROp, FPOp; 4459 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 4460 dl); 4461 4462 // Set up a copy of the stack pointer for use loading and storing any 4463 // arguments that may not fit in the registers available for argument 4464 // passing. 4465 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4466 4467 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4468 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4469 SmallVector<SDValue, 8> MemOpChains; 4470 4471 bool seenFloatArg = false; 4472 // Walk the register/memloc assignments, inserting copies/loads. 4473 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4474 i != e; 4475 ++i) { 4476 CCValAssign &VA = ArgLocs[i]; 4477 SDValue Arg = OutVals[i]; 4478 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4479 4480 if (Flags.isByVal()) { 4481 // Argument is an aggregate which is passed by value, thus we need to 4482 // create a copy of it in the local variable space of the current stack 4483 // frame (which is the stack frame of the caller) and pass the address of 4484 // this copy to the callee. 4485 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4486 CCValAssign &ByValVA = ByValArgLocs[j++]; 4487 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4488 4489 // Memory reserved in the local variable space of the callers stack frame. 4490 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4491 4492 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4493 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 4494 4495 // Create a copy of the argument in the local area of the current 4496 // stack frame. 4497 SDValue MemcpyCall = 4498 CreateCopyOfByValArgument(Arg, PtrOff, 4499 CallSeqStart.getNode()->getOperand(0), 4500 Flags, DAG, dl); 4501 4502 // This must go outside the CALLSEQ_START..END. 4503 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4504 CallSeqStart.getNode()->getOperand(1), 4505 SDLoc(MemcpyCall)); 4506 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4507 NewCallSeqStart.getNode()); 4508 Chain = CallSeqStart = NewCallSeqStart; 4509 4510 // Pass the address of the aggregate copy on the stack either in a 4511 // physical register or in the parameter list area of the current stack 4512 // frame to the callee. 4513 Arg = PtrOff; 4514 } 4515 4516 if (VA.isRegLoc()) { 4517 if (Arg.getValueType() == MVT::i1) 4518 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4519 4520 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4521 // Put argument in a physical register. 4522 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4523 } else { 4524 // Put argument in the parameter list area of the current stack frame. 4525 assert(VA.isMemLoc()); 4526 unsigned LocMemOffset = VA.getLocMemOffset(); 4527 4528 if (!isTailCall) { 4529 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4530 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 4531 4532 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4533 MachinePointerInfo(), 4534 false, false, 0)); 4535 } else { 4536 // Calculate and remember argument location. 4537 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4538 TailCallArguments); 4539 } 4540 } 4541 } 4542 4543 if (!MemOpChains.empty()) 4544 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4545 4546 // Build a sequence of copy-to-reg nodes chained together with token chain 4547 // and flag operands which copy the outgoing args into the appropriate regs. 4548 SDValue InFlag; 4549 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4550 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4551 RegsToPass[i].second, InFlag); 4552 InFlag = Chain.getValue(1); 4553 } 4554 4555 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4556 // registers. 4557 if (isVarArg) { 4558 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4559 SDValue Ops[] = { Chain, InFlag }; 4560 4561 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4562 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4563 4564 InFlag = Chain.getValue(1); 4565 } 4566 4567 if (isTailCall) 4568 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 4569 false, TailCallArguments); 4570 4571 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, DAG, 4572 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 4573 NumBytes, Ins, InVals, CS); 4574 } 4575 4576 // Copy an argument into memory, being careful to do this outside the 4577 // call sequence for the call to which the argument belongs. 4578 SDValue 4579 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 4580 SDValue CallSeqStart, 4581 ISD::ArgFlagsTy Flags, 4582 SelectionDAG &DAG, 4583 SDLoc dl) const { 4584 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4585 CallSeqStart.getNode()->getOperand(0), 4586 Flags, DAG, dl); 4587 // The MEMCPY must go outside the CALLSEQ_START..END. 4588 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4589 CallSeqStart.getNode()->getOperand(1), 4590 SDLoc(MemcpyCall)); 4591 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4592 NewCallSeqStart.getNode()); 4593 return NewCallSeqStart; 4594 } 4595 4596 SDValue 4597 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 4598 CallingConv::ID CallConv, bool isVarArg, 4599 bool isTailCall, bool IsPatchPoint, 4600 const SmallVectorImpl<ISD::OutputArg> &Outs, 4601 const SmallVectorImpl<SDValue> &OutVals, 4602 const SmallVectorImpl<ISD::InputArg> &Ins, 4603 SDLoc dl, SelectionDAG &DAG, 4604 SmallVectorImpl<SDValue> &InVals, 4605 ImmutableCallSite *CS) const { 4606 4607 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4608 bool isLittleEndian = Subtarget.isLittleEndian(); 4609 unsigned NumOps = Outs.size(); 4610 4611 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4612 unsigned PtrByteSize = 8; 4613 4614 MachineFunction &MF = DAG.getMachineFunction(); 4615 4616 // Mark this function as potentially containing a function that contains a 4617 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4618 // and restoring the callers stack pointer in this functions epilog. This is 4619 // done because by tail calling the called function might overwrite the value 4620 // in this function's (MF) stack pointer stack slot 0(SP). 4621 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4622 CallConv == CallingConv::Fast) 4623 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4624 4625 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4626 "fastcc not supported on varargs functions"); 4627 4628 // Count how many bytes are to be pushed on the stack, including the linkage 4629 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 4630 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 4631 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 4632 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4633 unsigned NumBytes = LinkageSize; 4634 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4635 unsigned &QFPR_idx = FPR_idx; 4636 4637 static const MCPhysReg GPR[] = { 4638 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4639 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4640 }; 4641 static const MCPhysReg VR[] = { 4642 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4643 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4644 }; 4645 static const MCPhysReg VSRH[] = { 4646 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 4647 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 4648 }; 4649 4650 const unsigned NumGPRs = array_lengthof(GPR); 4651 const unsigned NumFPRs = 13; 4652 const unsigned NumVRs = array_lengthof(VR); 4653 const unsigned NumQFPRs = NumFPRs; 4654 4655 // When using the fast calling convention, we don't provide backing for 4656 // arguments that will be in registers. 4657 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 4658 4659 // Add up all the space actually used. 4660 for (unsigned i = 0; i != NumOps; ++i) { 4661 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4662 EVT ArgVT = Outs[i].VT; 4663 EVT OrigVT = Outs[i].ArgVT; 4664 4665 if (CallConv == CallingConv::Fast) { 4666 if (Flags.isByVal()) 4667 NumGPRsUsed += (Flags.getByValSize()+7)/8; 4668 else 4669 switch (ArgVT.getSimpleVT().SimpleTy) { 4670 default: llvm_unreachable("Unexpected ValueType for argument!"); 4671 case MVT::i1: 4672 case MVT::i32: 4673 case MVT::i64: 4674 if (++NumGPRsUsed <= NumGPRs) 4675 continue; 4676 break; 4677 case MVT::v4i32: 4678 case MVT::v8i16: 4679 case MVT::v16i8: 4680 case MVT::v2f64: 4681 case MVT::v2i64: 4682 case MVT::v1i128: 4683 if (++NumVRsUsed <= NumVRs) 4684 continue; 4685 break; 4686 case MVT::v4f32: 4687 // When using QPX, this is handled like a FP register, otherwise, it 4688 // is an Altivec register. 4689 if (Subtarget.hasQPX()) { 4690 if (++NumFPRsUsed <= NumFPRs) 4691 continue; 4692 } else { 4693 if (++NumVRsUsed <= NumVRs) 4694 continue; 4695 } 4696 break; 4697 case MVT::f32: 4698 case MVT::f64: 4699 case MVT::v4f64: // QPX 4700 case MVT::v4i1: // QPX 4701 if (++NumFPRsUsed <= NumFPRs) 4702 continue; 4703 break; 4704 } 4705 } 4706 4707 /* Respect alignment of argument on the stack. */ 4708 unsigned Align = 4709 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4710 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 4711 4712 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 4713 if (Flags.isInConsecutiveRegsLast()) 4714 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4715 } 4716 4717 unsigned NumBytesActuallyUsed = NumBytes; 4718 4719 // The prolog code of the callee may store up to 8 GPR argument registers to 4720 // the stack, allowing va_start to index over them in memory if its varargs. 4721 // Because we cannot tell if this is needed on the caller side, we have to 4722 // conservatively assume that it is needed. As such, make sure we have at 4723 // least enough stack space for the caller to store the 8 GPRs. 4724 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 4725 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 4726 4727 // Tail call needs the stack to be aligned. 4728 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4729 CallConv == CallingConv::Fast) 4730 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 4731 4732 // Calculate by how many bytes the stack has to be adjusted in case of tail 4733 // call optimization. 4734 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4735 4736 // To protect arguments on the stack from being clobbered in a tail call, 4737 // force all the loads to happen before doing any other lowering. 4738 if (isTailCall) 4739 Chain = DAG.getStackArgumentTokenFactor(Chain); 4740 4741 // Adjust the stack pointer for the new arguments... 4742 // These operations are automatically eliminated by the prolog/epilog pass 4743 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4744 dl); 4745 SDValue CallSeqStart = Chain; 4746 4747 // Load the return address and frame pointer so it can be move somewhere else 4748 // later. 4749 SDValue LROp, FPOp; 4750 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4751 dl); 4752 4753 // Set up a copy of the stack pointer for use loading and storing any 4754 // arguments that may not fit in the registers available for argument 4755 // passing. 4756 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4757 4758 // Figure out which arguments are going to go in registers, and which in 4759 // memory. Also, if this is a vararg function, floating point operations 4760 // must be stored to our stack, and loaded into integer regs as well, if 4761 // any integer regs are available for argument passing. 4762 unsigned ArgOffset = LinkageSize; 4763 4764 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4765 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4766 4767 SmallVector<SDValue, 8> MemOpChains; 4768 for (unsigned i = 0; i != NumOps; ++i) { 4769 SDValue Arg = OutVals[i]; 4770 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4771 EVT ArgVT = Outs[i].VT; 4772 EVT OrigVT = Outs[i].ArgVT; 4773 4774 // PtrOff will be used to store the current argument to the stack if a 4775 // register cannot be found for it. 4776 SDValue PtrOff; 4777 4778 // We re-align the argument offset for each argument, except when using the 4779 // fast calling convention, when we need to make sure we do that only when 4780 // we'll actually use a stack slot. 4781 auto ComputePtrOff = [&]() { 4782 /* Respect alignment of argument on the stack. */ 4783 unsigned Align = 4784 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4785 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 4786 4787 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 4788 4789 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4790 }; 4791 4792 if (CallConv != CallingConv::Fast) { 4793 ComputePtrOff(); 4794 4795 /* Compute GPR index associated with argument offset. */ 4796 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4797 GPR_idx = std::min(GPR_idx, NumGPRs); 4798 } 4799 4800 // Promote integers to 64-bit values. 4801 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 4802 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4803 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4804 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4805 } 4806 4807 // FIXME memcpy is used way more than necessary. Correctness first. 4808 // Note: "by value" is code for passing a structure by value, not 4809 // basic types. 4810 if (Flags.isByVal()) { 4811 // Note: Size includes alignment padding, so 4812 // struct x { short a; char b; } 4813 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 4814 // These are the proper values we need for right-justifying the 4815 // aggregate in a parameter register. 4816 unsigned Size = Flags.getByValSize(); 4817 4818 // An empty aggregate parameter takes up no storage and no 4819 // registers. 4820 if (Size == 0) 4821 continue; 4822 4823 if (CallConv == CallingConv::Fast) 4824 ComputePtrOff(); 4825 4826 // All aggregates smaller than 8 bytes must be passed right-justified. 4827 if (Size==1 || Size==2 || Size==4) { 4828 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 4829 if (GPR_idx != NumGPRs) { 4830 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4831 MachinePointerInfo(), VT, 4832 false, false, false, 0); 4833 MemOpChains.push_back(Load.getValue(1)); 4834 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4835 4836 ArgOffset += PtrByteSize; 4837 continue; 4838 } 4839 } 4840 4841 if (GPR_idx == NumGPRs && Size < 8) { 4842 SDValue AddPtr = PtrOff; 4843 if (!isLittleEndian) { 4844 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 4845 PtrOff.getValueType()); 4846 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4847 } 4848 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4849 CallSeqStart, 4850 Flags, DAG, dl); 4851 ArgOffset += PtrByteSize; 4852 continue; 4853 } 4854 // Copy entire object into memory. There are cases where gcc-generated 4855 // code assumes it is there, even if it could be put entirely into 4856 // registers. (This is not what the doc says.) 4857 4858 // FIXME: The above statement is likely due to a misunderstanding of the 4859 // documents. All arguments must be copied into the parameter area BY 4860 // THE CALLEE in the event that the callee takes the address of any 4861 // formal argument. That has not yet been implemented. However, it is 4862 // reasonable to use the stack area as a staging area for the register 4863 // load. 4864 4865 // Skip this for small aggregates, as we will use the same slot for a 4866 // right-justified copy, below. 4867 if (Size >= 8) 4868 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4869 CallSeqStart, 4870 Flags, DAG, dl); 4871 4872 // When a register is available, pass a small aggregate right-justified. 4873 if (Size < 8 && GPR_idx != NumGPRs) { 4874 // The easiest way to get this right-justified in a register 4875 // is to copy the structure into the rightmost portion of a 4876 // local variable slot, then load the whole slot into the 4877 // register. 4878 // FIXME: The memcpy seems to produce pretty awful code for 4879 // small aggregates, particularly for packed ones. 4880 // FIXME: It would be preferable to use the slot in the 4881 // parameter save area instead of a new local variable. 4882 SDValue AddPtr = PtrOff; 4883 if (!isLittleEndian) { 4884 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 4885 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4886 } 4887 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4888 CallSeqStart, 4889 Flags, DAG, dl); 4890 4891 // Load the slot into the register. 4892 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 4893 MachinePointerInfo(), 4894 false, false, false, 0); 4895 MemOpChains.push_back(Load.getValue(1)); 4896 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4897 4898 // Done with this argument. 4899 ArgOffset += PtrByteSize; 4900 continue; 4901 } 4902 4903 // For aggregates larger than PtrByteSize, copy the pieces of the 4904 // object that fit into registers from the parameter save area. 4905 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4906 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 4907 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4908 if (GPR_idx != NumGPRs) { 4909 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4910 MachinePointerInfo(), 4911 false, false, false, 0); 4912 MemOpChains.push_back(Load.getValue(1)); 4913 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4914 ArgOffset += PtrByteSize; 4915 } else { 4916 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4917 break; 4918 } 4919 } 4920 continue; 4921 } 4922 4923 switch (Arg.getSimpleValueType().SimpleTy) { 4924 default: llvm_unreachable("Unexpected ValueType for argument!"); 4925 case MVT::i1: 4926 case MVT::i32: 4927 case MVT::i64: 4928 // These can be scalar arguments or elements of an integer array type 4929 // passed directly. Clang may use those instead of "byval" aggregate 4930 // types to avoid forcing arguments to memory unnecessarily. 4931 if (GPR_idx != NumGPRs) { 4932 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4933 } else { 4934 if (CallConv == CallingConv::Fast) 4935 ComputePtrOff(); 4936 4937 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4938 true, isTailCall, false, MemOpChains, 4939 TailCallArguments, dl); 4940 if (CallConv == CallingConv::Fast) 4941 ArgOffset += PtrByteSize; 4942 } 4943 if (CallConv != CallingConv::Fast) 4944 ArgOffset += PtrByteSize; 4945 break; 4946 case MVT::f32: 4947 case MVT::f64: { 4948 // These can be scalar arguments or elements of a float array type 4949 // passed directly. The latter are used to implement ELFv2 homogenous 4950 // float aggregates. 4951 4952 // Named arguments go into FPRs first, and once they overflow, the 4953 // remaining arguments go into GPRs and then the parameter save area. 4954 // Unnamed arguments for vararg functions always go to GPRs and 4955 // then the parameter save area. For now, put all arguments to vararg 4956 // routines always in both locations (FPR *and* GPR or stack slot). 4957 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 4958 bool NeededLoad = false; 4959 4960 // First load the argument into the next available FPR. 4961 if (FPR_idx != NumFPRs) 4962 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4963 4964 // Next, load the argument into GPR or stack slot if needed. 4965 if (!NeedGPROrStack) 4966 ; 4967 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 4968 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 4969 // once we support fp <-> gpr moves. 4970 4971 // In the non-vararg case, this can only ever happen in the 4972 // presence of f32 array types, since otherwise we never run 4973 // out of FPRs before running out of GPRs. 4974 SDValue ArgVal; 4975 4976 // Double values are always passed in a single GPR. 4977 if (Arg.getValueType() != MVT::f32) { 4978 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 4979 4980 // Non-array float values are extended and passed in a GPR. 4981 } else if (!Flags.isInConsecutiveRegs()) { 4982 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 4983 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 4984 4985 // If we have an array of floats, we collect every odd element 4986 // together with its predecessor into one GPR. 4987 } else if (ArgOffset % PtrByteSize != 0) { 4988 SDValue Lo, Hi; 4989 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 4990 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 4991 if (!isLittleEndian) 4992 std::swap(Lo, Hi); 4993 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 4994 4995 // The final element, if even, goes into the first half of a GPR. 4996 } else if (Flags.isInConsecutiveRegsLast()) { 4997 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 4998 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 4999 if (!isLittleEndian) 5000 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5001 DAG.getConstant(32, dl, MVT::i32)); 5002 5003 // Non-final even elements are skipped; they will be handled 5004 // together the with subsequent argument on the next go-around. 5005 } else 5006 ArgVal = SDValue(); 5007 5008 if (ArgVal.getNode()) 5009 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5010 } else { 5011 if (CallConv == CallingConv::Fast) 5012 ComputePtrOff(); 5013 5014 // Single-precision floating-point values are mapped to the 5015 // second (rightmost) word of the stack doubleword. 5016 if (Arg.getValueType() == MVT::f32 && 5017 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5018 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5019 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5020 } 5021 5022 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5023 true, isTailCall, false, MemOpChains, 5024 TailCallArguments, dl); 5025 5026 NeededLoad = true; 5027 } 5028 // When passing an array of floats, the array occupies consecutive 5029 // space in the argument area; only round up to the next doubleword 5030 // at the end of the array. Otherwise, each float takes 8 bytes. 5031 if (CallConv != CallingConv::Fast || NeededLoad) { 5032 ArgOffset += (Arg.getValueType() == MVT::f32 && 5033 Flags.isInConsecutiveRegs()) ? 4 : 8; 5034 if (Flags.isInConsecutiveRegsLast()) 5035 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5036 } 5037 break; 5038 } 5039 case MVT::v4f32: 5040 case MVT::v4i32: 5041 case MVT::v8i16: 5042 case MVT::v16i8: 5043 case MVT::v2f64: 5044 case MVT::v2i64: 5045 case MVT::v1i128: 5046 if (!Subtarget.hasQPX()) { 5047 // These can be scalar arguments or elements of a vector array type 5048 // passed directly. The latter are used to implement ELFv2 homogenous 5049 // vector aggregates. 5050 5051 // For a varargs call, named arguments go into VRs or on the stack as 5052 // usual; unnamed arguments always go to the stack or the corresponding 5053 // GPRs when within range. For now, we always put the value in both 5054 // locations (or even all three). 5055 if (isVarArg) { 5056 // We could elide this store in the case where the object fits 5057 // entirely in R registers. Maybe later. 5058 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5059 MachinePointerInfo(), false, false, 0); 5060 MemOpChains.push_back(Store); 5061 if (VR_idx != NumVRs) { 5062 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5063 MachinePointerInfo(), 5064 false, false, false, 0); 5065 MemOpChains.push_back(Load.getValue(1)); 5066 5067 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5068 Arg.getSimpleValueType() == MVT::v2i64) ? 5069 VSRH[VR_idx] : VR[VR_idx]; 5070 ++VR_idx; 5071 5072 RegsToPass.push_back(std::make_pair(VReg, Load)); 5073 } 5074 ArgOffset += 16; 5075 for (unsigned i=0; i<16; i+=PtrByteSize) { 5076 if (GPR_idx == NumGPRs) 5077 break; 5078 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5079 DAG.getConstant(i, dl, PtrVT)); 5080 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5081 false, false, false, 0); 5082 MemOpChains.push_back(Load.getValue(1)); 5083 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5084 } 5085 break; 5086 } 5087 5088 // Non-varargs Altivec params go into VRs or on the stack. 5089 if (VR_idx != NumVRs) { 5090 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5091 Arg.getSimpleValueType() == MVT::v2i64) ? 5092 VSRH[VR_idx] : VR[VR_idx]; 5093 ++VR_idx; 5094 5095 RegsToPass.push_back(std::make_pair(VReg, Arg)); 5096 } else { 5097 if (CallConv == CallingConv::Fast) 5098 ComputePtrOff(); 5099 5100 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5101 true, isTailCall, true, MemOpChains, 5102 TailCallArguments, dl); 5103 if (CallConv == CallingConv::Fast) 5104 ArgOffset += 16; 5105 } 5106 5107 if (CallConv != CallingConv::Fast) 5108 ArgOffset += 16; 5109 break; 5110 } // not QPX 5111 5112 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5113 "Invalid QPX parameter type"); 5114 5115 /* fall through */ 5116 case MVT::v4f64: 5117 case MVT::v4i1: { 5118 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5119 if (isVarArg) { 5120 // We could elide this store in the case where the object fits 5121 // entirely in R registers. Maybe later. 5122 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5123 MachinePointerInfo(), false, false, 0); 5124 MemOpChains.push_back(Store); 5125 if (QFPR_idx != NumQFPRs) { 5126 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, 5127 Store, PtrOff, MachinePointerInfo(), 5128 false, false, false, 0); 5129 MemOpChains.push_back(Load.getValue(1)); 5130 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5131 } 5132 ArgOffset += (IsF32 ? 16 : 32); 5133 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5134 if (GPR_idx == NumGPRs) 5135 break; 5136 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5137 DAG.getConstant(i, dl, PtrVT)); 5138 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5139 false, false, false, 0); 5140 MemOpChains.push_back(Load.getValue(1)); 5141 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5142 } 5143 break; 5144 } 5145 5146 // Non-varargs QPX params go into registers or on the stack. 5147 if (QFPR_idx != NumQFPRs) { 5148 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5149 } else { 5150 if (CallConv == CallingConv::Fast) 5151 ComputePtrOff(); 5152 5153 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5154 true, isTailCall, true, MemOpChains, 5155 TailCallArguments, dl); 5156 if (CallConv == CallingConv::Fast) 5157 ArgOffset += (IsF32 ? 16 : 32); 5158 } 5159 5160 if (CallConv != CallingConv::Fast) 5161 ArgOffset += (IsF32 ? 16 : 32); 5162 break; 5163 } 5164 } 5165 } 5166 5167 assert(NumBytesActuallyUsed == ArgOffset); 5168 (void)NumBytesActuallyUsed; 5169 5170 if (!MemOpChains.empty()) 5171 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5172 5173 // Check if this is an indirect call (MTCTR/BCTRL). 5174 // See PrepareCall() for more information about calls through function 5175 // pointers in the 64-bit SVR4 ABI. 5176 if (!isTailCall && !IsPatchPoint && 5177 !isFunctionGlobalAddress(Callee) && 5178 !isa<ExternalSymbolSDNode>(Callee)) { 5179 // Load r2 into a virtual register and store it to the TOC save area. 5180 setUsesTOCBasePtr(DAG); 5181 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5182 // TOC save area offset. 5183 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5184 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5185 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5186 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, 5187 MachinePointerInfo::getStack(TOCSaveOffset), 5188 false, false, 0); 5189 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5190 // This does not mean the MTCTR instruction must use R12; it's easier 5191 // to model this as an extra parameter, so do that. 5192 if (isELFv2ABI && !IsPatchPoint) 5193 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5194 } 5195 5196 // Build a sequence of copy-to-reg nodes chained together with token chain 5197 // and flag operands which copy the outgoing args into the appropriate regs. 5198 SDValue InFlag; 5199 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5200 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5201 RegsToPass[i].second, InFlag); 5202 InFlag = Chain.getValue(1); 5203 } 5204 5205 if (isTailCall) 5206 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 5207 FPOp, true, TailCallArguments); 5208 5209 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, DAG, 5210 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5211 NumBytes, Ins, InVals, CS); 5212 } 5213 5214 SDValue 5215 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 5216 CallingConv::ID CallConv, bool isVarArg, 5217 bool isTailCall, bool IsPatchPoint, 5218 const SmallVectorImpl<ISD::OutputArg> &Outs, 5219 const SmallVectorImpl<SDValue> &OutVals, 5220 const SmallVectorImpl<ISD::InputArg> &Ins, 5221 SDLoc dl, SelectionDAG &DAG, 5222 SmallVectorImpl<SDValue> &InVals, 5223 ImmutableCallSite *CS) const { 5224 5225 unsigned NumOps = Outs.size(); 5226 5227 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5228 bool isPPC64 = PtrVT == MVT::i64; 5229 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5230 5231 MachineFunction &MF = DAG.getMachineFunction(); 5232 5233 // Mark this function as potentially containing a function that contains a 5234 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5235 // and restoring the callers stack pointer in this functions epilog. This is 5236 // done because by tail calling the called function might overwrite the value 5237 // in this function's (MF) stack pointer stack slot 0(SP). 5238 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5239 CallConv == CallingConv::Fast) 5240 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5241 5242 // Count how many bytes are to be pushed on the stack, including the linkage 5243 // area, and parameter passing area. We start with 24/48 bytes, which is 5244 // prereserved space for [SP][CR][LR][3 x unused]. 5245 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5246 unsigned NumBytes = LinkageSize; 5247 5248 // Add up all the space actually used. 5249 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5250 // they all go in registers, but we must reserve stack space for them for 5251 // possible use by the caller. In varargs or 64-bit calls, parameters are 5252 // assigned stack space in order, with padding so Altivec parameters are 5253 // 16-byte aligned. 5254 unsigned nAltivecParamsAtEnd = 0; 5255 for (unsigned i = 0; i != NumOps; ++i) { 5256 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5257 EVT ArgVT = Outs[i].VT; 5258 // Varargs Altivec parameters are padded to a 16 byte boundary. 5259 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5260 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5261 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5262 if (!isVarArg && !isPPC64) { 5263 // Non-varargs Altivec parameters go after all the non-Altivec 5264 // parameters; handle those later so we know how much padding we need. 5265 nAltivecParamsAtEnd++; 5266 continue; 5267 } 5268 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5269 NumBytes = ((NumBytes+15)/16)*16; 5270 } 5271 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5272 } 5273 5274 // Allow for Altivec parameters at the end, if needed. 5275 if (nAltivecParamsAtEnd) { 5276 NumBytes = ((NumBytes+15)/16)*16; 5277 NumBytes += 16*nAltivecParamsAtEnd; 5278 } 5279 5280 // The prolog code of the callee may store up to 8 GPR argument registers to 5281 // the stack, allowing va_start to index over them in memory if its varargs. 5282 // Because we cannot tell if this is needed on the caller side, we have to 5283 // conservatively assume that it is needed. As such, make sure we have at 5284 // least enough stack space for the caller to store the 8 GPRs. 5285 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5286 5287 // Tail call needs the stack to be aligned. 5288 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5289 CallConv == CallingConv::Fast) 5290 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5291 5292 // Calculate by how many bytes the stack has to be adjusted in case of tail 5293 // call optimization. 5294 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5295 5296 // To protect arguments on the stack from being clobbered in a tail call, 5297 // force all the loads to happen before doing any other lowering. 5298 if (isTailCall) 5299 Chain = DAG.getStackArgumentTokenFactor(Chain); 5300 5301 // Adjust the stack pointer for the new arguments... 5302 // These operations are automatically eliminated by the prolog/epilog pass 5303 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5304 dl); 5305 SDValue CallSeqStart = Chain; 5306 5307 // Load the return address and frame pointer so it can be move somewhere else 5308 // later. 5309 SDValue LROp, FPOp; 5310 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5311 dl); 5312 5313 // Set up a copy of the stack pointer for use loading and storing any 5314 // arguments that may not fit in the registers available for argument 5315 // passing. 5316 SDValue StackPtr; 5317 if (isPPC64) 5318 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5319 else 5320 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5321 5322 // Figure out which arguments are going to go in registers, and which in 5323 // memory. Also, if this is a vararg function, floating point operations 5324 // must be stored to our stack, and loaded into integer regs as well, if 5325 // any integer regs are available for argument passing. 5326 unsigned ArgOffset = LinkageSize; 5327 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5328 5329 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5330 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5331 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5332 }; 5333 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5334 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5335 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5336 }; 5337 static const MCPhysReg VR[] = { 5338 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5339 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5340 }; 5341 const unsigned NumGPRs = array_lengthof(GPR_32); 5342 const unsigned NumFPRs = 13; 5343 const unsigned NumVRs = array_lengthof(VR); 5344 5345 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5346 5347 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5348 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5349 5350 SmallVector<SDValue, 8> MemOpChains; 5351 for (unsigned i = 0; i != NumOps; ++i) { 5352 SDValue Arg = OutVals[i]; 5353 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5354 5355 // PtrOff will be used to store the current argument to the stack if a 5356 // register cannot be found for it. 5357 SDValue PtrOff; 5358 5359 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5360 5361 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5362 5363 // On PPC64, promote integers to 64-bit values. 5364 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5365 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5366 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5367 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5368 } 5369 5370 // FIXME memcpy is used way more than necessary. Correctness first. 5371 // Note: "by value" is code for passing a structure by value, not 5372 // basic types. 5373 if (Flags.isByVal()) { 5374 unsigned Size = Flags.getByValSize(); 5375 // Very small objects are passed right-justified. Everything else is 5376 // passed left-justified. 5377 if (Size==1 || Size==2) { 5378 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5379 if (GPR_idx != NumGPRs) { 5380 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5381 MachinePointerInfo(), VT, 5382 false, false, false, 0); 5383 MemOpChains.push_back(Load.getValue(1)); 5384 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5385 5386 ArgOffset += PtrByteSize; 5387 } else { 5388 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5389 PtrOff.getValueType()); 5390 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5391 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5392 CallSeqStart, 5393 Flags, DAG, dl); 5394 ArgOffset += PtrByteSize; 5395 } 5396 continue; 5397 } 5398 // Copy entire object into memory. There are cases where gcc-generated 5399 // code assumes it is there, even if it could be put entirely into 5400 // registers. (This is not what the doc says.) 5401 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5402 CallSeqStart, 5403 Flags, DAG, dl); 5404 5405 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5406 // copy the pieces of the object that fit into registers from the 5407 // parameter save area. 5408 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5409 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5410 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5411 if (GPR_idx != NumGPRs) { 5412 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5413 MachinePointerInfo(), 5414 false, false, false, 0); 5415 MemOpChains.push_back(Load.getValue(1)); 5416 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5417 ArgOffset += PtrByteSize; 5418 } else { 5419 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5420 break; 5421 } 5422 } 5423 continue; 5424 } 5425 5426 switch (Arg.getSimpleValueType().SimpleTy) { 5427 default: llvm_unreachable("Unexpected ValueType for argument!"); 5428 case MVT::i1: 5429 case MVT::i32: 5430 case MVT::i64: 5431 if (GPR_idx != NumGPRs) { 5432 if (Arg.getValueType() == MVT::i1) 5433 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5434 5435 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5436 } else { 5437 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5438 isPPC64, isTailCall, false, MemOpChains, 5439 TailCallArguments, dl); 5440 } 5441 ArgOffset += PtrByteSize; 5442 break; 5443 case MVT::f32: 5444 case MVT::f64: 5445 if (FPR_idx != NumFPRs) { 5446 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5447 5448 if (isVarArg) { 5449 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5450 MachinePointerInfo(), false, false, 0); 5451 MemOpChains.push_back(Store); 5452 5453 // Float varargs are always shadowed in available integer registers 5454 if (GPR_idx != NumGPRs) { 5455 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5456 MachinePointerInfo(), false, false, 5457 false, 0); 5458 MemOpChains.push_back(Load.getValue(1)); 5459 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5460 } 5461 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5462 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5463 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5464 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5465 MachinePointerInfo(), 5466 false, false, false, 0); 5467 MemOpChains.push_back(Load.getValue(1)); 5468 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5469 } 5470 } else { 5471 // If we have any FPRs remaining, we may also have GPRs remaining. 5472 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5473 // GPRs. 5474 if (GPR_idx != NumGPRs) 5475 ++GPR_idx; 5476 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 5477 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 5478 ++GPR_idx; 5479 } 5480 } else 5481 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5482 isPPC64, isTailCall, false, MemOpChains, 5483 TailCallArguments, dl); 5484 if (isPPC64) 5485 ArgOffset += 8; 5486 else 5487 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 5488 break; 5489 case MVT::v4f32: 5490 case MVT::v4i32: 5491 case MVT::v8i16: 5492 case MVT::v16i8: 5493 if (isVarArg) { 5494 // These go aligned on the stack, or in the corresponding R registers 5495 // when within range. The Darwin PPC ABI doc claims they also go in 5496 // V registers; in fact gcc does this only for arguments that are 5497 // prototyped, not for those that match the ... We do it for all 5498 // arguments, seems to work. 5499 while (ArgOffset % 16 !=0) { 5500 ArgOffset += PtrByteSize; 5501 if (GPR_idx != NumGPRs) 5502 GPR_idx++; 5503 } 5504 // We could elide this store in the case where the object fits 5505 // entirely in R registers. Maybe later. 5506 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5507 DAG.getConstant(ArgOffset, dl, PtrVT)); 5508 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5509 MachinePointerInfo(), false, false, 0); 5510 MemOpChains.push_back(Store); 5511 if (VR_idx != NumVRs) { 5512 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5513 MachinePointerInfo(), 5514 false, false, false, 0); 5515 MemOpChains.push_back(Load.getValue(1)); 5516 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5517 } 5518 ArgOffset += 16; 5519 for (unsigned i=0; i<16; i+=PtrByteSize) { 5520 if (GPR_idx == NumGPRs) 5521 break; 5522 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5523 DAG.getConstant(i, dl, PtrVT)); 5524 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5525 false, false, false, 0); 5526 MemOpChains.push_back(Load.getValue(1)); 5527 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5528 } 5529 break; 5530 } 5531 5532 // Non-varargs Altivec params generally go in registers, but have 5533 // stack space allocated at the end. 5534 if (VR_idx != NumVRs) { 5535 // Doesn't have GPR space allocated. 5536 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5537 } else if (nAltivecParamsAtEnd==0) { 5538 // We are emitting Altivec params in order. 5539 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5540 isPPC64, isTailCall, true, MemOpChains, 5541 TailCallArguments, dl); 5542 ArgOffset += 16; 5543 } 5544 break; 5545 } 5546 } 5547 // If all Altivec parameters fit in registers, as they usually do, 5548 // they get stack space following the non-Altivec parameters. We 5549 // don't track this here because nobody below needs it. 5550 // If there are more Altivec parameters than fit in registers emit 5551 // the stores here. 5552 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5553 unsigned j = 0; 5554 // Offset is aligned; skip 1st 12 params which go in V registers. 5555 ArgOffset = ((ArgOffset+15)/16)*16; 5556 ArgOffset += 12*16; 5557 for (unsigned i = 0; i != NumOps; ++i) { 5558 SDValue Arg = OutVals[i]; 5559 EVT ArgType = Outs[i].VT; 5560 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5561 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5562 if (++j > NumVRs) { 5563 SDValue PtrOff; 5564 // We are emitting Altivec params in order. 5565 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5566 isPPC64, isTailCall, true, MemOpChains, 5567 TailCallArguments, dl); 5568 ArgOffset += 16; 5569 } 5570 } 5571 } 5572 } 5573 5574 if (!MemOpChains.empty()) 5575 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5576 5577 // On Darwin, R12 must contain the address of an indirect callee. This does 5578 // not mean the MTCTR instruction must use R12; it's easier to model this as 5579 // an extra parameter, so do that. 5580 if (!isTailCall && 5581 !isFunctionGlobalAddress(Callee) && 5582 !isa<ExternalSymbolSDNode>(Callee) && 5583 !isBLACompatibleAddress(Callee, DAG)) 5584 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5585 PPC::R12), Callee)); 5586 5587 // Build a sequence of copy-to-reg nodes chained together with token chain 5588 // and flag operands which copy the outgoing args into the appropriate regs. 5589 SDValue InFlag; 5590 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5591 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5592 RegsToPass[i].second, InFlag); 5593 InFlag = Chain.getValue(1); 5594 } 5595 5596 if (isTailCall) 5597 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 5598 FPOp, true, TailCallArguments); 5599 5600 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, DAG, 5601 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5602 NumBytes, Ins, InVals, CS); 5603 } 5604 5605 bool 5606 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5607 MachineFunction &MF, bool isVarArg, 5608 const SmallVectorImpl<ISD::OutputArg> &Outs, 5609 LLVMContext &Context) const { 5610 SmallVector<CCValAssign, 16> RVLocs; 5611 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 5612 return CCInfo.CheckReturn(Outs, RetCC_PPC); 5613 } 5614 5615 SDValue 5616 PPCTargetLowering::LowerReturn(SDValue Chain, 5617 CallingConv::ID CallConv, bool isVarArg, 5618 const SmallVectorImpl<ISD::OutputArg> &Outs, 5619 const SmallVectorImpl<SDValue> &OutVals, 5620 SDLoc dl, SelectionDAG &DAG) const { 5621 5622 SmallVector<CCValAssign, 16> RVLocs; 5623 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5624 *DAG.getContext()); 5625 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 5626 5627 SDValue Flag; 5628 SmallVector<SDValue, 4> RetOps(1, Chain); 5629 5630 // Copy the result values into the output registers. 5631 for (unsigned i = 0; i != RVLocs.size(); ++i) { 5632 CCValAssign &VA = RVLocs[i]; 5633 assert(VA.isRegLoc() && "Can only return in registers!"); 5634 5635 SDValue Arg = OutVals[i]; 5636 5637 switch (VA.getLocInfo()) { 5638 default: llvm_unreachable("Unknown loc info!"); 5639 case CCValAssign::Full: break; 5640 case CCValAssign::AExt: 5641 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 5642 break; 5643 case CCValAssign::ZExt: 5644 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 5645 break; 5646 case CCValAssign::SExt: 5647 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 5648 break; 5649 } 5650 5651 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 5652 Flag = Chain.getValue(1); 5653 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 5654 } 5655 5656 RetOps[0] = Chain; // Update chain. 5657 5658 // Add the flag if we have it. 5659 if (Flag.getNode()) 5660 RetOps.push_back(Flag); 5661 5662 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 5663 } 5664 5665 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 5666 const PPCSubtarget &Subtarget) const { 5667 // When we pop the dynamic allocation we need to restore the SP link. 5668 SDLoc dl(Op); 5669 5670 // Get the corect type for pointers. 5671 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5672 5673 // Construct the stack pointer operand. 5674 bool isPPC64 = Subtarget.isPPC64(); 5675 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 5676 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 5677 5678 // Get the operands for the STACKRESTORE. 5679 SDValue Chain = Op.getOperand(0); 5680 SDValue SaveSP = Op.getOperand(1); 5681 5682 // Load the old link SP. 5683 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 5684 MachinePointerInfo(), 5685 false, false, false, 0); 5686 5687 // Restore the stack pointer. 5688 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 5689 5690 // Store the old link SP. 5691 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 5692 false, false, 0); 5693 } 5694 5695 5696 5697 SDValue 5698 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 5699 MachineFunction &MF = DAG.getMachineFunction(); 5700 bool isPPC64 = Subtarget.isPPC64(); 5701 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5702 5703 // Get current frame pointer save index. The users of this index will be 5704 // primarily DYNALLOC instructions. 5705 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5706 int RASI = FI->getReturnAddrSaveIndex(); 5707 5708 // If the frame pointer save index hasn't been defined yet. 5709 if (!RASI) { 5710 // Find out what the fix offset of the frame pointer save area. 5711 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 5712 // Allocate the frame index for frame pointer save area. 5713 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 5714 // Save the result. 5715 FI->setReturnAddrSaveIndex(RASI); 5716 } 5717 return DAG.getFrameIndex(RASI, PtrVT); 5718 } 5719 5720 SDValue 5721 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 5722 MachineFunction &MF = DAG.getMachineFunction(); 5723 bool isPPC64 = Subtarget.isPPC64(); 5724 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5725 5726 // Get current frame pointer save index. The users of this index will be 5727 // primarily DYNALLOC instructions. 5728 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5729 int FPSI = FI->getFramePointerSaveIndex(); 5730 5731 // If the frame pointer save index hasn't been defined yet. 5732 if (!FPSI) { 5733 // Find out what the fix offset of the frame pointer save area. 5734 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 5735 // Allocate the frame index for frame pointer save area. 5736 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 5737 // Save the result. 5738 FI->setFramePointerSaveIndex(FPSI); 5739 } 5740 return DAG.getFrameIndex(FPSI, PtrVT); 5741 } 5742 5743 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 5744 SelectionDAG &DAG, 5745 const PPCSubtarget &Subtarget) const { 5746 // Get the inputs. 5747 SDValue Chain = Op.getOperand(0); 5748 SDValue Size = Op.getOperand(1); 5749 SDLoc dl(Op); 5750 5751 // Get the corect type for pointers. 5752 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5753 // Negate the size. 5754 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 5755 DAG.getConstant(0, dl, PtrVT), Size); 5756 // Construct a node for the frame pointer save index. 5757 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 5758 // Build a DYNALLOC node. 5759 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 5760 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 5761 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 5762 } 5763 5764 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 5765 SelectionDAG &DAG) const { 5766 SDLoc DL(Op); 5767 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 5768 DAG.getVTList(MVT::i32, MVT::Other), 5769 Op.getOperand(0), Op.getOperand(1)); 5770 } 5771 5772 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 5773 SelectionDAG &DAG) const { 5774 SDLoc DL(Op); 5775 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 5776 Op.getOperand(0), Op.getOperand(1)); 5777 } 5778 5779 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 5780 if (Op.getValueType().isVector()) 5781 return LowerVectorLoad(Op, DAG); 5782 5783 assert(Op.getValueType() == MVT::i1 && 5784 "Custom lowering only for i1 loads"); 5785 5786 // First, load 8 bits into 32 bits, then truncate to 1 bit. 5787 5788 SDLoc dl(Op); 5789 LoadSDNode *LD = cast<LoadSDNode>(Op); 5790 5791 SDValue Chain = LD->getChain(); 5792 SDValue BasePtr = LD->getBasePtr(); 5793 MachineMemOperand *MMO = LD->getMemOperand(); 5794 5795 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(), Chain, 5796 BasePtr, MVT::i8, MMO); 5797 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 5798 5799 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 5800 return DAG.getMergeValues(Ops, dl); 5801 } 5802 5803 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 5804 if (Op.getOperand(1).getValueType().isVector()) 5805 return LowerVectorStore(Op, DAG); 5806 5807 assert(Op.getOperand(1).getValueType() == MVT::i1 && 5808 "Custom lowering only for i1 stores"); 5809 5810 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 5811 5812 SDLoc dl(Op); 5813 StoreSDNode *ST = cast<StoreSDNode>(Op); 5814 5815 SDValue Chain = ST->getChain(); 5816 SDValue BasePtr = ST->getBasePtr(); 5817 SDValue Value = ST->getValue(); 5818 MachineMemOperand *MMO = ST->getMemOperand(); 5819 5820 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(), Value); 5821 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 5822 } 5823 5824 // FIXME: Remove this once the ANDI glue bug is fixed: 5825 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 5826 assert(Op.getValueType() == MVT::i1 && 5827 "Custom lowering only for i1 results"); 5828 5829 SDLoc DL(Op); 5830 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 5831 Op.getOperand(0)); 5832 } 5833 5834 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 5835 /// possible. 5836 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 5837 // Not FP? Not a fsel. 5838 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 5839 !Op.getOperand(2).getValueType().isFloatingPoint()) 5840 return Op; 5841 5842 // We might be able to do better than this under some circumstances, but in 5843 // general, fsel-based lowering of select is a finite-math-only optimization. 5844 // For more information, see section F.3 of the 2.06 ISA specification. 5845 if (!DAG.getTarget().Options.NoInfsFPMath || 5846 !DAG.getTarget().Options.NoNaNsFPMath) 5847 return Op; 5848 5849 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5850 5851 EVT ResVT = Op.getValueType(); 5852 EVT CmpVT = Op.getOperand(0).getValueType(); 5853 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5854 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 5855 SDLoc dl(Op); 5856 5857 // If the RHS of the comparison is a 0.0, we don't need to do the 5858 // subtraction at all. 5859 SDValue Sel1; 5860 if (isFloatingPointZero(RHS)) 5861 switch (CC) { 5862 default: break; // SETUO etc aren't handled by fsel. 5863 case ISD::SETNE: 5864 std::swap(TV, FV); 5865 case ISD::SETEQ: 5866 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5867 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5868 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 5869 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 5870 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 5871 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 5872 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 5873 case ISD::SETULT: 5874 case ISD::SETLT: 5875 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 5876 case ISD::SETOGE: 5877 case ISD::SETGE: 5878 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5879 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5880 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 5881 case ISD::SETUGT: 5882 case ISD::SETGT: 5883 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 5884 case ISD::SETOLE: 5885 case ISD::SETLE: 5886 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5887 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5888 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 5889 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 5890 } 5891 5892 SDValue Cmp; 5893 switch (CC) { 5894 default: break; // SETUO etc aren't handled by fsel. 5895 case ISD::SETNE: 5896 std::swap(TV, FV); 5897 case ISD::SETEQ: 5898 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 5899 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5900 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5901 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 5902 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 5903 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 5904 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 5905 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 5906 case ISD::SETULT: 5907 case ISD::SETLT: 5908 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 5909 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5910 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5911 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 5912 case ISD::SETOGE: 5913 case ISD::SETGE: 5914 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 5915 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5916 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5917 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 5918 case ISD::SETUGT: 5919 case ISD::SETGT: 5920 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 5921 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5922 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5923 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 5924 case ISD::SETOLE: 5925 case ISD::SETLE: 5926 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 5927 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5928 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5929 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 5930 } 5931 return Op; 5932 } 5933 5934 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 5935 SelectionDAG &DAG, 5936 SDLoc dl) const { 5937 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 5938 SDValue Src = Op.getOperand(0); 5939 if (Src.getValueType() == MVT::f32) 5940 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 5941 5942 SDValue Tmp; 5943 switch (Op.getSimpleValueType().SimpleTy) { 5944 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 5945 case MVT::i32: 5946 Tmp = DAG.getNode( 5947 Op.getOpcode() == ISD::FP_TO_SINT 5948 ? PPCISD::FCTIWZ 5949 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 5950 dl, MVT::f64, Src); 5951 break; 5952 case MVT::i64: 5953 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 5954 "i64 FP_TO_UINT is supported only with FPCVT"); 5955 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 5956 PPCISD::FCTIDUZ, 5957 dl, MVT::f64, Src); 5958 break; 5959 } 5960 5961 // Convert the FP value to an int value through memory. 5962 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 5963 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 5964 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 5965 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 5966 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI); 5967 5968 // Emit a store to the stack slot. 5969 SDValue Chain; 5970 if (i32Stack) { 5971 MachineFunction &MF = DAG.getMachineFunction(); 5972 MachineMemOperand *MMO = 5973 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 5974 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 5975 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 5976 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 5977 } else 5978 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 5979 MPI, false, false, 0); 5980 5981 // Result is a load from the stack slot. If loading 4 bytes, make sure to 5982 // add in a bias. 5983 if (Op.getValueType() == MVT::i32 && !i32Stack) { 5984 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 5985 DAG.getConstant(4, dl, FIPtr.getValueType())); 5986 MPI = MPI.getWithOffset(4); 5987 } 5988 5989 RLI.Chain = Chain; 5990 RLI.Ptr = FIPtr; 5991 RLI.MPI = MPI; 5992 } 5993 5994 /// \brief Custom lowers floating point to integer conversions to use 5995 /// the direct move instructions available in ISA 2.07 to avoid the 5996 /// need for load/store combinations. 5997 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 5998 SelectionDAG &DAG, 5999 SDLoc dl) const { 6000 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6001 SDValue Src = Op.getOperand(0); 6002 6003 if (Src.getValueType() == MVT::f32) 6004 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6005 6006 SDValue Tmp; 6007 switch (Op.getSimpleValueType().SimpleTy) { 6008 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6009 case MVT::i32: 6010 Tmp = DAG.getNode( 6011 Op.getOpcode() == ISD::FP_TO_SINT 6012 ? PPCISD::FCTIWZ 6013 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6014 dl, MVT::f64, Src); 6015 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6016 break; 6017 case MVT::i64: 6018 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6019 "i64 FP_TO_UINT is supported only with FPCVT"); 6020 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6021 PPCISD::FCTIDUZ, 6022 dl, MVT::f64, Src); 6023 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6024 break; 6025 } 6026 return Tmp; 6027 } 6028 6029 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6030 SDLoc dl) const { 6031 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6032 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6033 6034 ReuseLoadInfo RLI; 6035 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6036 6037 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6038 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6039 RLI.Ranges); 6040 } 6041 6042 // We're trying to insert a regular store, S, and then a load, L. If the 6043 // incoming value, O, is a load, we might just be able to have our load use the 6044 // address used by O. However, we don't know if anything else will store to 6045 // that address before we can load from it. To prevent this situation, we need 6046 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6047 // the same chain operand as O, we create a token factor from the chain results 6048 // of O and L, and we replace all uses of O's chain result with that token 6049 // factor (see spliceIntoChain below for this last part). 6050 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6051 ReuseLoadInfo &RLI, 6052 SelectionDAG &DAG, 6053 ISD::LoadExtType ET) const { 6054 SDLoc dl(Op); 6055 if (ET == ISD::NON_EXTLOAD && 6056 (Op.getOpcode() == ISD::FP_TO_UINT || 6057 Op.getOpcode() == ISD::FP_TO_SINT) && 6058 isOperationLegalOrCustom(Op.getOpcode(), 6059 Op.getOperand(0).getValueType())) { 6060 6061 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6062 return true; 6063 } 6064 6065 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6066 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6067 LD->isNonTemporal()) 6068 return false; 6069 if (LD->getMemoryVT() != MemVT) 6070 return false; 6071 6072 RLI.Ptr = LD->getBasePtr(); 6073 if (LD->isIndexed() && LD->getOffset().getOpcode() != ISD::UNDEF) { 6074 assert(LD->getAddressingMode() == ISD::PRE_INC && 6075 "Non-pre-inc AM on PPC?"); 6076 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6077 LD->getOffset()); 6078 } 6079 6080 RLI.Chain = LD->getChain(); 6081 RLI.MPI = LD->getPointerInfo(); 6082 RLI.IsInvariant = LD->isInvariant(); 6083 RLI.Alignment = LD->getAlignment(); 6084 RLI.AAInfo = LD->getAAInfo(); 6085 RLI.Ranges = LD->getRanges(); 6086 6087 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6088 return true; 6089 } 6090 6091 // Given the head of the old chain, ResChain, insert a token factor containing 6092 // it and NewResChain, and make users of ResChain now be users of that token 6093 // factor. 6094 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6095 SDValue NewResChain, 6096 SelectionDAG &DAG) const { 6097 if (!ResChain) 6098 return; 6099 6100 SDLoc dl(NewResChain); 6101 6102 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6103 NewResChain, DAG.getUNDEF(MVT::Other)); 6104 assert(TF.getNode() != NewResChain.getNode() && 6105 "A new TF really is required here"); 6106 6107 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6108 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6109 } 6110 6111 /// \brief Custom lowers integer to floating point conversions to use 6112 /// the direct move instructions available in ISA 2.07 to avoid the 6113 /// need for load/store combinations. 6114 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6115 SelectionDAG &DAG, 6116 SDLoc dl) const { 6117 assert((Op.getValueType() == MVT::f32 || 6118 Op.getValueType() == MVT::f64) && 6119 "Invalid floating point type as target of conversion"); 6120 assert(Subtarget.hasFPCVT() && 6121 "Int to FP conversions with direct moves require FPCVT"); 6122 SDValue FP; 6123 SDValue Src = Op.getOperand(0); 6124 bool SinglePrec = Op.getValueType() == MVT::f32; 6125 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6126 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6127 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6128 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6129 6130 if (WordInt) { 6131 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6132 dl, MVT::f64, Src); 6133 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6134 } 6135 else { 6136 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6137 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6138 } 6139 6140 return FP; 6141 } 6142 6143 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6144 SelectionDAG &DAG) const { 6145 SDLoc dl(Op); 6146 6147 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6148 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6149 return SDValue(); 6150 6151 SDValue Value = Op.getOperand(0); 6152 // The values are now known to be -1 (false) or 1 (true). To convert this 6153 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6154 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6155 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6156 6157 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 6158 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 6159 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 6160 6161 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6162 6163 if (Op.getValueType() != MVT::v4f64) 6164 Value = DAG.getNode(ISD::FP_ROUND, dl, 6165 Op.getValueType(), Value, 6166 DAG.getIntPtrConstant(1, dl)); 6167 return Value; 6168 } 6169 6170 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6171 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6172 return SDValue(); 6173 6174 if (Op.getOperand(0).getValueType() == MVT::i1) 6175 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6176 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6177 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6178 6179 // If we have direct moves, we can do all the conversion, skip the store/load 6180 // however, without FPCVT we can't do most conversions. 6181 if (Subtarget.hasDirectMove() && Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6182 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6183 6184 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6185 "UINT_TO_FP is supported only with FPCVT"); 6186 6187 // If we have FCFIDS, then use it when converting to single-precision. 6188 // Otherwise, convert to double-precision and then round. 6189 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6190 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6191 : PPCISD::FCFIDS) 6192 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6193 : PPCISD::FCFID); 6194 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6195 ? MVT::f32 6196 : MVT::f64; 6197 6198 if (Op.getOperand(0).getValueType() == MVT::i64) { 6199 SDValue SINT = Op.getOperand(0); 6200 // When converting to single-precision, we actually need to convert 6201 // to double-precision first and then round to single-precision. 6202 // To avoid double-rounding effects during that operation, we have 6203 // to prepare the input operand. Bits that might be truncated when 6204 // converting to double-precision are replaced by a bit that won't 6205 // be lost at this stage, but is below the single-precision rounding 6206 // position. 6207 // 6208 // However, if -enable-unsafe-fp-math is in effect, accept double 6209 // rounding to avoid the extra overhead. 6210 if (Op.getValueType() == MVT::f32 && 6211 !Subtarget.hasFPCVT() && 6212 !DAG.getTarget().Options.UnsafeFPMath) { 6213 6214 // Twiddle input to make sure the low 11 bits are zero. (If this 6215 // is the case, we are guaranteed the value will fit into the 53 bit 6216 // mantissa of an IEEE double-precision value without rounding.) 6217 // If any of those low 11 bits were not zero originally, make sure 6218 // bit 12 (value 2048) is set instead, so that the final rounding 6219 // to single-precision gets the correct result. 6220 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6221 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6222 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6223 Round, DAG.getConstant(2047, dl, MVT::i64)); 6224 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6225 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6226 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6227 6228 // However, we cannot use that value unconditionally: if the magnitude 6229 // of the input value is small, the bit-twiddling we did above might 6230 // end up visibly changing the output. Fortunately, in that case, we 6231 // don't need to twiddle bits since the original input will convert 6232 // exactly to double-precision floating-point already. Therefore, 6233 // construct a conditional to use the original value if the top 11 6234 // bits are all sign-bit copies, and use the rounded value computed 6235 // above otherwise. 6236 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6237 SINT, DAG.getConstant(53, dl, MVT::i32)); 6238 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6239 Cond, DAG.getConstant(1, dl, MVT::i64)); 6240 Cond = DAG.getSetCC(dl, MVT::i32, 6241 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6242 6243 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6244 } 6245 6246 ReuseLoadInfo RLI; 6247 SDValue Bits; 6248 6249 MachineFunction &MF = DAG.getMachineFunction(); 6250 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6251 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6252 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6253 RLI.Ranges); 6254 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6255 } else if (Subtarget.hasLFIWAX() && 6256 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6257 MachineMemOperand *MMO = 6258 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6259 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6260 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6261 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6262 DAG.getVTList(MVT::f64, MVT::Other), 6263 Ops, MVT::i32, MMO); 6264 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6265 } else if (Subtarget.hasFPCVT() && 6266 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6267 MachineMemOperand *MMO = 6268 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6269 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6270 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6271 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6272 DAG.getVTList(MVT::f64, MVT::Other), 6273 Ops, MVT::i32, MMO); 6274 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6275 } else if (((Subtarget.hasLFIWAX() && 6276 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6277 (Subtarget.hasFPCVT() && 6278 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6279 SINT.getOperand(0).getValueType() == MVT::i32) { 6280 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6281 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 6282 6283 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6284 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6285 6286 SDValue Store = 6287 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6288 MachinePointerInfo::getFixedStack(FrameIdx), 6289 false, false, 0); 6290 6291 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6292 "Expected an i32 store"); 6293 6294 RLI.Ptr = FIdx; 6295 RLI.Chain = Store; 6296 RLI.MPI = MachinePointerInfo::getFixedStack(FrameIdx); 6297 RLI.Alignment = 4; 6298 6299 MachineMemOperand *MMO = 6300 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6301 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6302 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6303 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6304 PPCISD::LFIWZX : PPCISD::LFIWAX, 6305 dl, DAG.getVTList(MVT::f64, MVT::Other), 6306 Ops, MVT::i32, MMO); 6307 } else 6308 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6309 6310 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6311 6312 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6313 FP = DAG.getNode(ISD::FP_ROUND, dl, 6314 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 6315 return FP; 6316 } 6317 6318 assert(Op.getOperand(0).getValueType() == MVT::i32 && 6319 "Unhandled INT_TO_FP type in custom expander!"); 6320 // Since we only generate this in 64-bit mode, we can take advantage of 6321 // 64-bit registers. In particular, sign extend the input value into the 6322 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 6323 // then lfd it and fcfid it. 6324 MachineFunction &MF = DAG.getMachineFunction(); 6325 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6326 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 6327 6328 SDValue Ld; 6329 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 6330 ReuseLoadInfo RLI; 6331 bool ReusingLoad; 6332 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 6333 DAG))) { 6334 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6335 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6336 6337 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 6338 MachinePointerInfo::getFixedStack(FrameIdx), 6339 false, false, 0); 6340 6341 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6342 "Expected an i32 store"); 6343 6344 RLI.Ptr = FIdx; 6345 RLI.Chain = Store; 6346 RLI.MPI = MachinePointerInfo::getFixedStack(FrameIdx); 6347 RLI.Alignment = 4; 6348 } 6349 6350 MachineMemOperand *MMO = 6351 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6352 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6353 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6354 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 6355 PPCISD::LFIWZX : PPCISD::LFIWAX, 6356 dl, DAG.getVTList(MVT::f64, MVT::Other), 6357 Ops, MVT::i32, MMO); 6358 if (ReusingLoad) 6359 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 6360 } else { 6361 assert(Subtarget.isPPC64() && 6362 "i32->FP without LFIWAX supported only on PPC64"); 6363 6364 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 6365 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6366 6367 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 6368 Op.getOperand(0)); 6369 6370 // STD the extended value into the stack slot. 6371 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx, 6372 MachinePointerInfo::getFixedStack(FrameIdx), 6373 false, false, 0); 6374 6375 // Load the value as a double. 6376 Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, 6377 MachinePointerInfo::getFixedStack(FrameIdx), 6378 false, false, false, 0); 6379 } 6380 6381 // FCFID it and return it. 6382 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 6383 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6384 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 6385 DAG.getIntPtrConstant(0, dl)); 6386 return FP; 6387 } 6388 6389 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6390 SelectionDAG &DAG) const { 6391 SDLoc dl(Op); 6392 /* 6393 The rounding mode is in bits 30:31 of FPSR, and has the following 6394 settings: 6395 00 Round to nearest 6396 01 Round to 0 6397 10 Round to +inf 6398 11 Round to -inf 6399 6400 FLT_ROUNDS, on the other hand, expects the following: 6401 -1 Undefined 6402 0 Round to 0 6403 1 Round to nearest 6404 2 Round to +inf 6405 3 Round to -inf 6406 6407 To perform the conversion, we do: 6408 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 6409 */ 6410 6411 MachineFunction &MF = DAG.getMachineFunction(); 6412 EVT VT = Op.getValueType(); 6413 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 6414 6415 // Save FP Control Word to register 6416 EVT NodeTys[] = { 6417 MVT::f64, // return register 6418 MVT::Glue // unused in this context 6419 }; 6420 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 6421 6422 // Save FP register to stack slot 6423 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 6424 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 6425 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 6426 StackSlot, MachinePointerInfo(), false, false,0); 6427 6428 // Load FP Control Word from low 32 bits of stack slot. 6429 SDValue Four = DAG.getConstant(4, dl, PtrVT); 6430 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 6431 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 6432 false, false, false, 0); 6433 6434 // Transform as necessary 6435 SDValue CWD1 = 6436 DAG.getNode(ISD::AND, dl, MVT::i32, 6437 CWD, DAG.getConstant(3, dl, MVT::i32)); 6438 SDValue CWD2 = 6439 DAG.getNode(ISD::SRL, dl, MVT::i32, 6440 DAG.getNode(ISD::AND, dl, MVT::i32, 6441 DAG.getNode(ISD::XOR, dl, MVT::i32, 6442 CWD, DAG.getConstant(3, dl, MVT::i32)), 6443 DAG.getConstant(3, dl, MVT::i32)), 6444 DAG.getConstant(1, dl, MVT::i32)); 6445 6446 SDValue RetVal = 6447 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 6448 6449 return DAG.getNode((VT.getSizeInBits() < 16 ? 6450 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 6451 } 6452 6453 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6454 EVT VT = Op.getValueType(); 6455 unsigned BitWidth = VT.getSizeInBits(); 6456 SDLoc dl(Op); 6457 assert(Op.getNumOperands() == 3 && 6458 VT == Op.getOperand(1).getValueType() && 6459 "Unexpected SHL!"); 6460 6461 // Expand into a bunch of logical ops. Note that these ops 6462 // depend on the PPC behavior for oversized shift amounts. 6463 SDValue Lo = Op.getOperand(0); 6464 SDValue Hi = Op.getOperand(1); 6465 SDValue Amt = Op.getOperand(2); 6466 EVT AmtVT = Amt.getValueType(); 6467 6468 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6469 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6470 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 6471 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 6472 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 6473 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6474 DAG.getConstant(-BitWidth, dl, AmtVT)); 6475 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 6476 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6477 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 6478 SDValue OutOps[] = { OutLo, OutHi }; 6479 return DAG.getMergeValues(OutOps, dl); 6480 } 6481 6482 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6483 EVT VT = Op.getValueType(); 6484 SDLoc dl(Op); 6485 unsigned BitWidth = VT.getSizeInBits(); 6486 assert(Op.getNumOperands() == 3 && 6487 VT == Op.getOperand(1).getValueType() && 6488 "Unexpected SRL!"); 6489 6490 // Expand into a bunch of logical ops. Note that these ops 6491 // depend on the PPC behavior for oversized shift amounts. 6492 SDValue Lo = Op.getOperand(0); 6493 SDValue Hi = Op.getOperand(1); 6494 SDValue Amt = Op.getOperand(2); 6495 EVT AmtVT = Amt.getValueType(); 6496 6497 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6498 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6499 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6500 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6501 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6502 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6503 DAG.getConstant(-BitWidth, dl, AmtVT)); 6504 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 6505 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6506 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 6507 SDValue OutOps[] = { OutLo, OutHi }; 6508 return DAG.getMergeValues(OutOps, dl); 6509 } 6510 6511 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 6512 SDLoc dl(Op); 6513 EVT VT = Op.getValueType(); 6514 unsigned BitWidth = VT.getSizeInBits(); 6515 assert(Op.getNumOperands() == 3 && 6516 VT == Op.getOperand(1).getValueType() && 6517 "Unexpected SRA!"); 6518 6519 // Expand into a bunch of logical ops, followed by a select_cc. 6520 SDValue Lo = Op.getOperand(0); 6521 SDValue Hi = Op.getOperand(1); 6522 SDValue Amt = Op.getOperand(2); 6523 EVT AmtVT = Amt.getValueType(); 6524 6525 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6526 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6527 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6528 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6529 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6530 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6531 DAG.getConstant(-BitWidth, dl, AmtVT)); 6532 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 6533 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 6534 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 6535 Tmp4, Tmp6, ISD::SETLE); 6536 SDValue OutOps[] = { OutLo, OutHi }; 6537 return DAG.getMergeValues(OutOps, dl); 6538 } 6539 6540 //===----------------------------------------------------------------------===// 6541 // Vector related lowering. 6542 // 6543 6544 /// BuildSplatI - Build a canonical splati of Val with an element size of 6545 /// SplatSize. Cast the result to VT. 6546 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 6547 SelectionDAG &DAG, SDLoc dl) { 6548 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 6549 6550 static const MVT VTys[] = { // canonical VT to use for each size. 6551 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 6552 }; 6553 6554 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 6555 6556 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 6557 if (Val == -1) 6558 SplatSize = 1; 6559 6560 EVT CanonicalVT = VTys[SplatSize-1]; 6561 6562 // Build a canonical splat for this value. 6563 SDValue Elt = DAG.getConstant(Val, dl, MVT::i32); 6564 SmallVector<SDValue, 8> Ops; 6565 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 6566 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops); 6567 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 6568 } 6569 6570 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 6571 /// specified intrinsic ID. 6572 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 6573 SelectionDAG &DAG, SDLoc dl, 6574 EVT DestVT = MVT::Other) { 6575 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 6576 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6577 DAG.getConstant(IID, dl, MVT::i32), Op); 6578 } 6579 6580 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 6581 /// specified intrinsic ID. 6582 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 6583 SelectionDAG &DAG, SDLoc dl, 6584 EVT DestVT = MVT::Other) { 6585 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 6586 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6587 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 6588 } 6589 6590 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 6591 /// specified intrinsic ID. 6592 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 6593 SDValue Op2, SelectionDAG &DAG, 6594 SDLoc dl, EVT DestVT = MVT::Other) { 6595 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 6596 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6597 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 6598 } 6599 6600 6601 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 6602 /// amount. The result has the specified value type. 6603 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 6604 EVT VT, SelectionDAG &DAG, SDLoc dl) { 6605 // Force LHS/RHS to be the right type. 6606 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 6607 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 6608 6609 int Ops[16]; 6610 for (unsigned i = 0; i != 16; ++i) 6611 Ops[i] = i + Amt; 6612 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 6613 return DAG.getNode(ISD::BITCAST, dl, VT, T); 6614 } 6615 6616 // If this is a case we can't handle, return null and let the default 6617 // expansion code take care of it. If we CAN select this case, and if it 6618 // selects to a single instruction, return Op. Otherwise, if we can codegen 6619 // this case more efficiently than a constant pool load, lower it to the 6620 // sequence of ops that should be used. 6621 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 6622 SelectionDAG &DAG) const { 6623 SDLoc dl(Op); 6624 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6625 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 6626 6627 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 6628 // We first build an i32 vector, load it into a QPX register, 6629 // then convert it to a floating-point vector and compare it 6630 // to a zero vector to get the boolean result. 6631 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 6632 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 6633 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FrameIdx); 6634 EVT PtrVT = getPointerTy(); 6635 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6636 6637 assert(BVN->getNumOperands() == 4 && 6638 "BUILD_VECTOR for v4i1 does not have 4 operands"); 6639 6640 bool IsConst = true; 6641 for (unsigned i = 0; i < 4; ++i) { 6642 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; 6643 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 6644 IsConst = false; 6645 break; 6646 } 6647 } 6648 6649 if (IsConst) { 6650 Constant *One = 6651 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 6652 Constant *NegOne = 6653 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 6654 6655 SmallVector<Constant*, 4> CV(4, NegOne); 6656 for (unsigned i = 0; i < 4; ++i) { 6657 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) 6658 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 6659 else if (cast<ConstantSDNode>(BVN->getOperand(i))-> 6660 getConstantIntValue()->isZero()) 6661 continue; 6662 else 6663 CV[i] = One; 6664 } 6665 6666 Constant *CP = ConstantVector::get(CV); 6667 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(), 6668 16 /* alignment */); 6669 6670 SmallVector<SDValue, 2> Ops; 6671 Ops.push_back(DAG.getEntryNode()); 6672 Ops.push_back(CPIdx); 6673 6674 SmallVector<EVT, 2> ValueVTs; 6675 ValueVTs.push_back(MVT::v4i1); 6676 ValueVTs.push_back(MVT::Other); // chain 6677 SDVTList VTs = DAG.getVTList(ValueVTs); 6678 6679 return DAG.getMemIntrinsicNode(PPCISD::QVLFSb, 6680 dl, VTs, Ops, MVT::v4f32, 6681 MachinePointerInfo::getConstantPool()); 6682 } 6683 6684 SmallVector<SDValue, 4> Stores; 6685 for (unsigned i = 0; i < 4; ++i) { 6686 if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; 6687 6688 unsigned Offset = 4*i; 6689 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 6690 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 6691 6692 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 6693 if (StoreSize > 4) { 6694 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 6695 BVN->getOperand(i), Idx, 6696 PtrInfo.getWithOffset(Offset), 6697 MVT::i32, false, false, 0)); 6698 } else { 6699 SDValue StoreValue = BVN->getOperand(i); 6700 if (StoreSize < 4) 6701 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 6702 6703 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 6704 StoreValue, Idx, 6705 PtrInfo.getWithOffset(Offset), 6706 false, false, 0)); 6707 } 6708 } 6709 6710 SDValue StoreChain; 6711 if (!Stores.empty()) 6712 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 6713 else 6714 StoreChain = DAG.getEntryNode(); 6715 6716 // Now load from v4i32 into the QPX register; this will extend it to 6717 // v4i64 but not yet convert it to a floating point. Nevertheless, this 6718 // is typed as v4f64 because the QPX register integer states are not 6719 // explicitly represented. 6720 6721 SmallVector<SDValue, 2> Ops; 6722 Ops.push_back(StoreChain); 6723 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32)); 6724 Ops.push_back(FIdx); 6725 6726 SmallVector<EVT, 2> ValueVTs; 6727 ValueVTs.push_back(MVT::v4f64); 6728 ValueVTs.push_back(MVT::Other); // chain 6729 SDVTList VTs = DAG.getVTList(ValueVTs); 6730 6731 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 6732 dl, VTs, Ops, MVT::v4i32, PtrInfo); 6733 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 6734 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 6735 LoadedVect); 6736 6737 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::f64); 6738 FPZeros = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 6739 FPZeros, FPZeros, FPZeros, FPZeros); 6740 6741 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 6742 } 6743 6744 // All other QPX vectors are handled by generic code. 6745 if (Subtarget.hasQPX()) 6746 return SDValue(); 6747 6748 // Check if this is a splat of a constant value. 6749 APInt APSplatBits, APSplatUndef; 6750 unsigned SplatBitSize; 6751 bool HasAnyUndefs; 6752 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 6753 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 6754 SplatBitSize > 32) 6755 return SDValue(); 6756 6757 unsigned SplatBits = APSplatBits.getZExtValue(); 6758 unsigned SplatUndef = APSplatUndef.getZExtValue(); 6759 unsigned SplatSize = SplatBitSize / 8; 6760 6761 // First, handle single instruction cases. 6762 6763 // All zeros? 6764 if (SplatBits == 0) { 6765 // Canonicalize all zero vectors to be v4i32. 6766 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 6767 SDValue Z = DAG.getConstant(0, dl, MVT::i32); 6768 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 6769 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 6770 } 6771 return Op; 6772 } 6773 6774 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 6775 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 6776 (32-SplatBitSize)); 6777 if (SextVal >= -16 && SextVal <= 15) 6778 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 6779 6780 6781 // Two instruction sequences. 6782 6783 // If this value is in the range [-32,30] and is even, use: 6784 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 6785 // If this value is in the range [17,31] and is odd, use: 6786 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 6787 // If this value is in the range [-31,-17] and is odd, use: 6788 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 6789 // Note the last two are three-instruction sequences. 6790 if (SextVal >= -32 && SextVal <= 31) { 6791 // To avoid having these optimizations undone by constant folding, 6792 // we convert to a pseudo that will be expanded later into one of 6793 // the above forms. 6794 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 6795 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 6796 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 6797 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 6798 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 6799 if (VT == Op.getValueType()) 6800 return RetVal; 6801 else 6802 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 6803 } 6804 6805 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 6806 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 6807 // for fneg/fabs. 6808 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 6809 // Make -1 and vspltisw -1: 6810 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 6811 6812 // Make the VSLW intrinsic, computing 0x8000_0000. 6813 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 6814 OnesV, DAG, dl); 6815 6816 // xor by OnesV to invert it. 6817 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 6818 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6819 } 6820 6821 // Check to see if this is a wide variety of vsplti*, binop self cases. 6822 static const signed char SplatCsts[] = { 6823 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 6824 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 6825 }; 6826 6827 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 6828 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 6829 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 6830 int i = SplatCsts[idx]; 6831 6832 // Figure out what shift amount will be used by altivec if shifted by i in 6833 // this splat size. 6834 unsigned TypeShiftAmt = i & (SplatBitSize-1); 6835 6836 // vsplti + shl self. 6837 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 6838 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6839 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6840 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 6841 Intrinsic::ppc_altivec_vslw 6842 }; 6843 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6844 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6845 } 6846 6847 // vsplti + srl self. 6848 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 6849 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6850 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6851 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 6852 Intrinsic::ppc_altivec_vsrw 6853 }; 6854 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6855 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6856 } 6857 6858 // vsplti + sra self. 6859 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 6860 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6861 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6862 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 6863 Intrinsic::ppc_altivec_vsraw 6864 }; 6865 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6866 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6867 } 6868 6869 // vsplti + rol self. 6870 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 6871 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 6872 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6873 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6874 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 6875 Intrinsic::ppc_altivec_vrlw 6876 }; 6877 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6878 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6879 } 6880 6881 // t = vsplti c, result = vsldoi t, t, 1 6882 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 6883 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 6884 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 6885 } 6886 // t = vsplti c, result = vsldoi t, t, 2 6887 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 6888 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 6889 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 6890 } 6891 // t = vsplti c, result = vsldoi t, t, 3 6892 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 6893 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 6894 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 6895 } 6896 } 6897 6898 return SDValue(); 6899 } 6900 6901 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 6902 /// the specified operations to build the shuffle. 6903 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 6904 SDValue RHS, SelectionDAG &DAG, 6905 SDLoc dl) { 6906 unsigned OpNum = (PFEntry >> 26) & 0x0F; 6907 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 6908 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 6909 6910 enum { 6911 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 6912 OP_VMRGHW, 6913 OP_VMRGLW, 6914 OP_VSPLTISW0, 6915 OP_VSPLTISW1, 6916 OP_VSPLTISW2, 6917 OP_VSPLTISW3, 6918 OP_VSLDOI4, 6919 OP_VSLDOI8, 6920 OP_VSLDOI12 6921 }; 6922 6923 if (OpNum == OP_COPY) { 6924 if (LHSID == (1*9+2)*9+3) return LHS; 6925 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 6926 return RHS; 6927 } 6928 6929 SDValue OpLHS, OpRHS; 6930 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 6931 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 6932 6933 int ShufIdxs[16]; 6934 switch (OpNum) { 6935 default: llvm_unreachable("Unknown i32 permute!"); 6936 case OP_VMRGHW: 6937 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 6938 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 6939 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 6940 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 6941 break; 6942 case OP_VMRGLW: 6943 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 6944 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 6945 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 6946 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 6947 break; 6948 case OP_VSPLTISW0: 6949 for (unsigned i = 0; i != 16; ++i) 6950 ShufIdxs[i] = (i&3)+0; 6951 break; 6952 case OP_VSPLTISW1: 6953 for (unsigned i = 0; i != 16; ++i) 6954 ShufIdxs[i] = (i&3)+4; 6955 break; 6956 case OP_VSPLTISW2: 6957 for (unsigned i = 0; i != 16; ++i) 6958 ShufIdxs[i] = (i&3)+8; 6959 break; 6960 case OP_VSPLTISW3: 6961 for (unsigned i = 0; i != 16; ++i) 6962 ShufIdxs[i] = (i&3)+12; 6963 break; 6964 case OP_VSLDOI4: 6965 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 6966 case OP_VSLDOI8: 6967 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 6968 case OP_VSLDOI12: 6969 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 6970 } 6971 EVT VT = OpLHS.getValueType(); 6972 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 6973 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 6974 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 6975 return DAG.getNode(ISD::BITCAST, dl, VT, T); 6976 } 6977 6978 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 6979 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 6980 /// return the code it can be lowered into. Worst case, it can always be 6981 /// lowered into a vperm. 6982 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 6983 SelectionDAG &DAG) const { 6984 SDLoc dl(Op); 6985 SDValue V1 = Op.getOperand(0); 6986 SDValue V2 = Op.getOperand(1); 6987 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6988 EVT VT = Op.getValueType(); 6989 bool isLittleEndian = Subtarget.isLittleEndian(); 6990 6991 if (Subtarget.hasQPX()) { 6992 if (VT.getVectorNumElements() != 4) 6993 return SDValue(); 6994 6995 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 6996 6997 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 6998 if (AlignIdx != -1) { 6999 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7000 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7001 } else if (SVOp->isSplat()) { 7002 int SplatIdx = SVOp->getSplatIndex(); 7003 if (SplatIdx >= 4) { 7004 std::swap(V1, V2); 7005 SplatIdx -= 4; 7006 } 7007 7008 // FIXME: If SplatIdx == 0 and the input came from a load, then there is 7009 // nothing to do. 7010 7011 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7012 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7013 } 7014 7015 // Lower this into a qvgpci/qvfperm pair. 7016 7017 // Compute the qvgpci literal 7018 unsigned idx = 0; 7019 for (unsigned i = 0; i < 4; ++i) { 7020 int m = SVOp->getMaskElt(i); 7021 unsigned mm = m >= 0 ? (unsigned) m : i; 7022 idx |= mm << (3-i)*3; 7023 } 7024 7025 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7026 DAG.getConstant(idx, dl, MVT::i32)); 7027 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7028 } 7029 7030 // Cases that are handled by instructions that take permute immediates 7031 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7032 // selected by the instruction selector. 7033 if (V2.getOpcode() == ISD::UNDEF) { 7034 if (PPC::isSplatShuffleMask(SVOp, 1) || 7035 PPC::isSplatShuffleMask(SVOp, 2) || 7036 PPC::isSplatShuffleMask(SVOp, 4) || 7037 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7038 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7039 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7040 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7041 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7042 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7043 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7044 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7045 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7046 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG)) { 7047 return Op; 7048 } 7049 } 7050 7051 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7052 // and produce a fixed permutation. If any of these match, do not lower to 7053 // VPERM. 7054 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7055 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7056 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7057 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7058 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7059 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7060 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7061 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7062 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7063 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7064 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG)) 7065 return Op; 7066 7067 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7068 // perfect shuffle table to emit an optimal matching sequence. 7069 ArrayRef<int> PermMask = SVOp->getMask(); 7070 7071 unsigned PFIndexes[4]; 7072 bool isFourElementShuffle = true; 7073 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7074 unsigned EltNo = 8; // Start out undef. 7075 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7076 if (PermMask[i*4+j] < 0) 7077 continue; // Undef, ignore it. 7078 7079 unsigned ByteSource = PermMask[i*4+j]; 7080 if ((ByteSource & 3) != j) { 7081 isFourElementShuffle = false; 7082 break; 7083 } 7084 7085 if (EltNo == 8) { 7086 EltNo = ByteSource/4; 7087 } else if (EltNo != ByteSource/4) { 7088 isFourElementShuffle = false; 7089 break; 7090 } 7091 } 7092 PFIndexes[i] = EltNo; 7093 } 7094 7095 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7096 // perfect shuffle vector to determine if it is cost effective to do this as 7097 // discrete instructions, or whether we should use a vperm. 7098 // For now, we skip this for little endian until such time as we have a 7099 // little-endian perfect shuffle table. 7100 if (isFourElementShuffle && !isLittleEndian) { 7101 // Compute the index in the perfect shuffle table. 7102 unsigned PFTableIndex = 7103 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7104 7105 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7106 unsigned Cost = (PFEntry >> 30); 7107 7108 // Determining when to avoid vperm is tricky. Many things affect the cost 7109 // of vperm, particularly how many times the perm mask needs to be computed. 7110 // For example, if the perm mask can be hoisted out of a loop or is already 7111 // used (perhaps because there are multiple permutes with the same shuffle 7112 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7113 // the loop requires an extra register. 7114 // 7115 // As a compromise, we only emit discrete instructions if the shuffle can be 7116 // generated in 3 or fewer operations. When we have loop information 7117 // available, if this block is within a loop, we should avoid using vperm 7118 // for 3-operation perms and use a constant pool load instead. 7119 if (Cost < 3) 7120 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7121 } 7122 7123 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7124 // vector that will get spilled to the constant pool. 7125 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 7126 7127 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7128 // that it is in input element units, not in bytes. Convert now. 7129 7130 // For little endian, the order of the input vectors is reversed, and 7131 // the permutation mask is complemented with respect to 31. This is 7132 // necessary to produce proper semantics with the big-endian-biased vperm 7133 // instruction. 7134 EVT EltVT = V1.getValueType().getVectorElementType(); 7135 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7136 7137 SmallVector<SDValue, 16> ResultMask; 7138 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7139 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7140 7141 for (unsigned j = 0; j != BytesPerElement; ++j) 7142 if (isLittleEndian) 7143 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7144 dl, MVT::i32)); 7145 else 7146 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7147 MVT::i32)); 7148 } 7149 7150 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 7151 ResultMask); 7152 if (isLittleEndian) 7153 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7154 V2, V1, VPermMask); 7155 else 7156 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7157 V1, V2, VPermMask); 7158 } 7159 7160 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 7161 /// altivec comparison. If it is, return true and fill in Opc/isDot with 7162 /// information about the intrinsic. 7163 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 7164 bool &isDot, const PPCSubtarget &Subtarget) { 7165 unsigned IntrinsicID = 7166 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7167 CompareOpc = -1; 7168 isDot = false; 7169 switch (IntrinsicID) { 7170 default: return false; 7171 // Comparison predicates. 7172 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 7173 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 7174 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 7175 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 7176 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 7177 case Intrinsic::ppc_altivec_vcmpequd_p: 7178 if (Subtarget.hasP8Altivec()) { 7179 CompareOpc = 199; 7180 isDot = 1; 7181 } 7182 else 7183 return false; 7184 7185 break; 7186 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 7187 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 7188 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 7189 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 7190 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 7191 case Intrinsic::ppc_altivec_vcmpgtsd_p: 7192 if (Subtarget.hasP8Altivec()) { 7193 CompareOpc = 967; 7194 isDot = 1; 7195 } 7196 else 7197 return false; 7198 7199 break; 7200 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 7201 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 7202 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 7203 case Intrinsic::ppc_altivec_vcmpgtud_p: 7204 if (Subtarget.hasP8Altivec()) { 7205 CompareOpc = 711; 7206 isDot = 1; 7207 } 7208 else 7209 return false; 7210 7211 break; 7212 7213 // Normal Comparisons. 7214 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 7215 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 7216 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 7217 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 7218 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 7219 case Intrinsic::ppc_altivec_vcmpequd: 7220 if (Subtarget.hasP8Altivec()) { 7221 CompareOpc = 199; 7222 isDot = 0; 7223 } 7224 else 7225 return false; 7226 7227 break; 7228 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 7229 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 7230 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 7231 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 7232 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 7233 case Intrinsic::ppc_altivec_vcmpgtsd: 7234 if (Subtarget.hasP8Altivec()) { 7235 CompareOpc = 967; 7236 isDot = 0; 7237 } 7238 else 7239 return false; 7240 7241 break; 7242 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 7243 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 7244 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 7245 case Intrinsic::ppc_altivec_vcmpgtud: 7246 if (Subtarget.hasP8Altivec()) { 7247 CompareOpc = 711; 7248 isDot = 0; 7249 } 7250 else 7251 return false; 7252 7253 break; 7254 } 7255 return true; 7256 } 7257 7258 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 7259 /// lower, do it, otherwise return null. 7260 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 7261 SelectionDAG &DAG) const { 7262 // If this is a lowered altivec predicate compare, CompareOpc is set to the 7263 // opcode number of the comparison. 7264 SDLoc dl(Op); 7265 int CompareOpc; 7266 bool isDot; 7267 if (!getAltivecCompareInfo(Op, CompareOpc, isDot, Subtarget)) 7268 return SDValue(); // Don't custom lower most intrinsics. 7269 7270 // If this is a non-dot comparison, make the VCMP node and we are done. 7271 if (!isDot) { 7272 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 7273 Op.getOperand(1), Op.getOperand(2), 7274 DAG.getConstant(CompareOpc, dl, MVT::i32)); 7275 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 7276 } 7277 7278 // Create the PPCISD altivec 'dot' comparison node. 7279 SDValue Ops[] = { 7280 Op.getOperand(2), // LHS 7281 Op.getOperand(3), // RHS 7282 DAG.getConstant(CompareOpc, dl, MVT::i32) 7283 }; 7284 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 7285 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 7286 7287 // Now that we have the comparison, emit a copy from the CR to a GPR. 7288 // This is flagged to the above dot comparison. 7289 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 7290 DAG.getRegister(PPC::CR6, MVT::i32), 7291 CompNode.getValue(1)); 7292 7293 // Unpack the result based on how the target uses it. 7294 unsigned BitNo; // Bit # of CR6. 7295 bool InvertBit; // Invert result? 7296 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 7297 default: // Can't happen, don't crash on invalid number though. 7298 case 0: // Return the value of the EQ bit of CR6. 7299 BitNo = 0; InvertBit = false; 7300 break; 7301 case 1: // Return the inverted value of the EQ bit of CR6. 7302 BitNo = 0; InvertBit = true; 7303 break; 7304 case 2: // Return the value of the LT bit of CR6. 7305 BitNo = 2; InvertBit = false; 7306 break; 7307 case 3: // Return the inverted value of the LT bit of CR6. 7308 BitNo = 2; InvertBit = true; 7309 break; 7310 } 7311 7312 // Shift the bit into the low position. 7313 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 7314 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 7315 // Isolate the bit. 7316 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 7317 DAG.getConstant(1, dl, MVT::i32)); 7318 7319 // If we are supposed to, toggle the bit. 7320 if (InvertBit) 7321 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 7322 DAG.getConstant(1, dl, MVT::i32)); 7323 return Flags; 7324 } 7325 7326 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 7327 SelectionDAG &DAG) const { 7328 SDLoc dl(Op); 7329 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 7330 // instructions), but for smaller types, we need to first extend up to v2i32 7331 // before doing going farther. 7332 if (Op.getValueType() == MVT::v2i64) { 7333 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 7334 if (ExtVT != MVT::v2i32) { 7335 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 7336 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 7337 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 7338 ExtVT.getVectorElementType(), 4))); 7339 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 7340 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 7341 DAG.getValueType(MVT::v2i32)); 7342 } 7343 7344 return Op; 7345 } 7346 7347 return SDValue(); 7348 } 7349 7350 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 7351 SelectionDAG &DAG) const { 7352 SDLoc dl(Op); 7353 // Create a stack slot that is 16-byte aligned. 7354 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7355 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7356 EVT PtrVT = getPointerTy(); 7357 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7358 7359 // Store the input value into Value#0 of the stack slot. 7360 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 7361 Op.getOperand(0), FIdx, MachinePointerInfo(), 7362 false, false, 0); 7363 // Load it out. 7364 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 7365 false, false, false, 0); 7366 } 7367 7368 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7369 SelectionDAG &DAG) const { 7370 SDLoc dl(Op); 7371 SDNode *N = Op.getNode(); 7372 7373 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 7374 "Unknown extract_vector_elt type"); 7375 7376 SDValue Value = N->getOperand(0); 7377 7378 // The first part of this is like the store lowering except that we don't 7379 // need to track the chain. 7380 7381 // The values are now known to be -1 (false) or 1 (true). To convert this 7382 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7383 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7384 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7385 7386 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7387 // understand how to form the extending load. 7388 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 7389 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 7390 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 7391 7392 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7393 7394 // Now convert to an integer and store. 7395 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7396 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7397 Value); 7398 7399 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7400 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7401 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FrameIdx); 7402 EVT PtrVT = getPointerTy(); 7403 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7404 7405 SDValue StoreChain = DAG.getEntryNode(); 7406 SmallVector<SDValue, 2> Ops; 7407 Ops.push_back(StoreChain); 7408 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7409 Ops.push_back(Value); 7410 Ops.push_back(FIdx); 7411 7412 SmallVector<EVT, 2> ValueVTs; 7413 ValueVTs.push_back(MVT::Other); // chain 7414 SDVTList VTs = DAG.getVTList(ValueVTs); 7415 7416 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7417 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7418 7419 // Extract the value requested. 7420 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7421 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7422 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7423 7424 SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7425 PtrInfo.getWithOffset(Offset), 7426 false, false, false, 0); 7427 7428 if (!Subtarget.useCRBits()) 7429 return IntVal; 7430 7431 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 7432 } 7433 7434 /// Lowering for QPX v4i1 loads 7435 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 7436 SelectionDAG &DAG) const { 7437 SDLoc dl(Op); 7438 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 7439 SDValue LoadChain = LN->getChain(); 7440 SDValue BasePtr = LN->getBasePtr(); 7441 7442 if (Op.getValueType() == MVT::v4f64 || 7443 Op.getValueType() == MVT::v4f32) { 7444 EVT MemVT = LN->getMemoryVT(); 7445 unsigned Alignment = LN->getAlignment(); 7446 7447 // If this load is properly aligned, then it is legal. 7448 if (Alignment >= MemVT.getStoreSize()) 7449 return Op; 7450 7451 EVT ScalarVT = Op.getValueType().getScalarType(), 7452 ScalarMemVT = MemVT.getScalarType(); 7453 unsigned Stride = ScalarMemVT.getStoreSize(); 7454 7455 SmallVector<SDValue, 8> Vals, LoadChains; 7456 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7457 SDValue Load; 7458 if (ScalarVT != ScalarMemVT) 7459 Load = 7460 DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 7461 BasePtr, 7462 LN->getPointerInfo().getWithOffset(Idx*Stride), 7463 ScalarMemVT, LN->isVolatile(), LN->isNonTemporal(), 7464 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7465 LN->getAAInfo()); 7466 else 7467 Load = 7468 DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 7469 LN->getPointerInfo().getWithOffset(Idx*Stride), 7470 LN->isVolatile(), LN->isNonTemporal(), 7471 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7472 LN->getAAInfo()); 7473 7474 if (Idx == 0 && LN->isIndexed()) { 7475 assert(LN->getAddressingMode() == ISD::PRE_INC && 7476 "Unknown addressing mode on vector load"); 7477 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 7478 LN->getAddressingMode()); 7479 } 7480 7481 Vals.push_back(Load); 7482 LoadChains.push_back(Load.getValue(1)); 7483 7484 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7485 DAG.getConstant(Stride, dl, 7486 BasePtr.getValueType())); 7487 } 7488 7489 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7490 SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, 7491 Op.getValueType(), Vals); 7492 7493 if (LN->isIndexed()) { 7494 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 7495 return DAG.getMergeValues(RetOps, dl); 7496 } 7497 7498 SDValue RetOps[] = { Value, TF }; 7499 return DAG.getMergeValues(RetOps, dl); 7500 } 7501 7502 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 7503 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 7504 7505 // To lower v4i1 from a byte array, we load the byte elements of the 7506 // vector and then reuse the BUILD_VECTOR logic. 7507 7508 SmallVector<SDValue, 4> VectElmts, VectElmtChains; 7509 for (unsigned i = 0; i < 4; ++i) { 7510 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7511 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7512 7513 VectElmts.push_back(DAG.getExtLoad(ISD::EXTLOAD, 7514 dl, MVT::i32, LoadChain, Idx, 7515 LN->getPointerInfo().getWithOffset(i), 7516 MVT::i8 /* memory type */, 7517 LN->isVolatile(), LN->isNonTemporal(), 7518 LN->isInvariant(), 7519 1 /* alignment */, LN->getAAInfo())); 7520 VectElmtChains.push_back(VectElmts[i].getValue(1)); 7521 } 7522 7523 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 7524 SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i1, VectElmts); 7525 7526 SDValue RVals[] = { Value, LoadChain }; 7527 return DAG.getMergeValues(RVals, dl); 7528 } 7529 7530 /// Lowering for QPX v4i1 stores 7531 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 7532 SelectionDAG &DAG) const { 7533 SDLoc dl(Op); 7534 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 7535 SDValue StoreChain = SN->getChain(); 7536 SDValue BasePtr = SN->getBasePtr(); 7537 SDValue Value = SN->getValue(); 7538 7539 if (Value.getValueType() == MVT::v4f64 || 7540 Value.getValueType() == MVT::v4f32) { 7541 EVT MemVT = SN->getMemoryVT(); 7542 unsigned Alignment = SN->getAlignment(); 7543 7544 // If this store is properly aligned, then it is legal. 7545 if (Alignment >= MemVT.getStoreSize()) 7546 return Op; 7547 7548 EVT ScalarVT = Value.getValueType().getScalarType(), 7549 ScalarMemVT = MemVT.getScalarType(); 7550 unsigned Stride = ScalarMemVT.getStoreSize(); 7551 7552 SmallVector<SDValue, 8> Stores; 7553 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7554 SDValue Ex = 7555 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 7556 DAG.getConstant(Idx, dl, getVectorIdxTy())); 7557 SDValue Store; 7558 if (ScalarVT != ScalarMemVT) 7559 Store = 7560 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 7561 SN->getPointerInfo().getWithOffset(Idx*Stride), 7562 ScalarMemVT, SN->isVolatile(), SN->isNonTemporal(), 7563 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7564 else 7565 Store = 7566 DAG.getStore(StoreChain, dl, Ex, BasePtr, 7567 SN->getPointerInfo().getWithOffset(Idx*Stride), 7568 SN->isVolatile(), SN->isNonTemporal(), 7569 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7570 7571 if (Idx == 0 && SN->isIndexed()) { 7572 assert(SN->getAddressingMode() == ISD::PRE_INC && 7573 "Unknown addressing mode on vector store"); 7574 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 7575 SN->getAddressingMode()); 7576 } 7577 7578 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7579 DAG.getConstant(Stride, dl, 7580 BasePtr.getValueType())); 7581 Stores.push_back(Store); 7582 } 7583 7584 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7585 7586 if (SN->isIndexed()) { 7587 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 7588 return DAG.getMergeValues(RetOps, dl); 7589 } 7590 7591 return TF; 7592 } 7593 7594 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 7595 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 7596 7597 // The values are now known to be -1 (false) or 1 (true). To convert this 7598 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7599 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7600 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7601 7602 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7603 // understand how to form the extending load. 7604 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); 7605 FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, 7606 FPHalfs, FPHalfs, FPHalfs, FPHalfs); 7607 7608 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7609 7610 // Now convert to an integer and store. 7611 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7612 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7613 Value); 7614 7615 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7616 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7617 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FrameIdx); 7618 EVT PtrVT = getPointerTy(); 7619 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7620 7621 SmallVector<SDValue, 2> Ops; 7622 Ops.push_back(StoreChain); 7623 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7624 Ops.push_back(Value); 7625 Ops.push_back(FIdx); 7626 7627 SmallVector<EVT, 2> ValueVTs; 7628 ValueVTs.push_back(MVT::Other); // chain 7629 SDVTList VTs = DAG.getVTList(ValueVTs); 7630 7631 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7632 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7633 7634 // Move data into the byte array. 7635 SmallVector<SDValue, 4> Loads, LoadChains; 7636 for (unsigned i = 0; i < 4; ++i) { 7637 unsigned Offset = 4*i; 7638 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7639 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7640 7641 Loads.push_back(DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7642 PtrInfo.getWithOffset(Offset), 7643 false, false, false, 0)); 7644 LoadChains.push_back(Loads[i].getValue(1)); 7645 } 7646 7647 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7648 7649 SmallVector<SDValue, 4> Stores; 7650 for (unsigned i = 0; i < 4; ++i) { 7651 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7652 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7653 7654 Stores.push_back(DAG.getTruncStore(StoreChain, dl, Loads[i], Idx, 7655 SN->getPointerInfo().getWithOffset(i), 7656 MVT::i8 /* memory type */, 7657 SN->isNonTemporal(), SN->isVolatile(), 7658 1 /* alignment */, SN->getAAInfo())); 7659 } 7660 7661 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7662 7663 return StoreChain; 7664 } 7665 7666 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 7667 SDLoc dl(Op); 7668 if (Op.getValueType() == MVT::v4i32) { 7669 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7670 7671 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 7672 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 7673 7674 SDValue RHSSwap = // = vrlw RHS, 16 7675 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 7676 7677 // Shrinkify inputs to v8i16. 7678 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 7679 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 7680 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 7681 7682 // Low parts multiplied together, generating 32-bit results (we ignore the 7683 // top parts). 7684 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 7685 LHS, RHS, DAG, dl, MVT::v4i32); 7686 7687 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 7688 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 7689 // Shift the high parts up 16 bits. 7690 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 7691 Neg16, DAG, dl); 7692 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 7693 } else if (Op.getValueType() == MVT::v8i16) { 7694 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7695 7696 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 7697 7698 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 7699 LHS, RHS, Zero, DAG, dl); 7700 } else if (Op.getValueType() == MVT::v16i8) { 7701 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7702 bool isLittleEndian = Subtarget.isLittleEndian(); 7703 7704 // Multiply the even 8-bit parts, producing 16-bit sums. 7705 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 7706 LHS, RHS, DAG, dl, MVT::v8i16); 7707 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 7708 7709 // Multiply the odd 8-bit parts, producing 16-bit sums. 7710 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 7711 LHS, RHS, DAG, dl, MVT::v8i16); 7712 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 7713 7714 // Merge the results together. Because vmuleub and vmuloub are 7715 // instructions with a big-endian bias, we must reverse the 7716 // element numbering and reverse the meaning of "odd" and "even" 7717 // when generating little endian code. 7718 int Ops[16]; 7719 for (unsigned i = 0; i != 8; ++i) { 7720 if (isLittleEndian) { 7721 Ops[i*2 ] = 2*i; 7722 Ops[i*2+1] = 2*i+16; 7723 } else { 7724 Ops[i*2 ] = 2*i+1; 7725 Ops[i*2+1] = 2*i+1+16; 7726 } 7727 } 7728 if (isLittleEndian) 7729 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 7730 else 7731 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 7732 } else { 7733 llvm_unreachable("Unknown mul to lower!"); 7734 } 7735 } 7736 7737 /// LowerOperation - Provide custom lowering hooks for some operations. 7738 /// 7739 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7740 switch (Op.getOpcode()) { 7741 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 7742 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 7743 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 7744 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 7745 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 7746 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 7747 case ISD::SETCC: return LowerSETCC(Op, DAG); 7748 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 7749 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 7750 case ISD::VASTART: 7751 return LowerVASTART(Op, DAG, Subtarget); 7752 7753 case ISD::VAARG: 7754 return LowerVAARG(Op, DAG, Subtarget); 7755 7756 case ISD::VACOPY: 7757 return LowerVACOPY(Op, DAG, Subtarget); 7758 7759 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget); 7760 case ISD::DYNAMIC_STACKALLOC: 7761 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget); 7762 7763 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 7764 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 7765 7766 case ISD::LOAD: return LowerLOAD(Op, DAG); 7767 case ISD::STORE: return LowerSTORE(Op, DAG); 7768 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 7769 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 7770 case ISD::FP_TO_UINT: 7771 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 7772 SDLoc(Op)); 7773 case ISD::UINT_TO_FP: 7774 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 7775 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 7776 7777 // Lower 64-bit shifts. 7778 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 7779 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 7780 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 7781 7782 // Vector-related lowering. 7783 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 7784 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 7785 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 7786 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 7787 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 7788 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 7789 case ISD::MUL: return LowerMUL(Op, DAG); 7790 7791 // For counter-based loop handling. 7792 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 7793 7794 // Frame & Return address. 7795 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 7796 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 7797 } 7798 } 7799 7800 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 7801 SmallVectorImpl<SDValue>&Results, 7802 SelectionDAG &DAG) const { 7803 SDLoc dl(N); 7804 switch (N->getOpcode()) { 7805 default: 7806 llvm_unreachable("Do not know how to custom type legalize this operation!"); 7807 case ISD::READCYCLECOUNTER: { 7808 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 7809 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 7810 7811 Results.push_back(RTB); 7812 Results.push_back(RTB.getValue(1)); 7813 Results.push_back(RTB.getValue(2)); 7814 break; 7815 } 7816 case ISD::INTRINSIC_W_CHAIN: { 7817 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 7818 Intrinsic::ppc_is_decremented_ctr_nonzero) 7819 break; 7820 7821 assert(N->getValueType(0) == MVT::i1 && 7822 "Unexpected result type for CTR decrement intrinsic"); 7823 EVT SVT = getSetCCResultType(*DAG.getContext(), N->getValueType(0)); 7824 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 7825 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 7826 N->getOperand(1)); 7827 7828 Results.push_back(NewInt); 7829 Results.push_back(NewInt.getValue(1)); 7830 break; 7831 } 7832 case ISD::VAARG: { 7833 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 7834 return; 7835 7836 EVT VT = N->getValueType(0); 7837 7838 if (VT == MVT::i64) { 7839 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget); 7840 7841 Results.push_back(NewNode); 7842 Results.push_back(NewNode.getValue(1)); 7843 } 7844 return; 7845 } 7846 case ISD::FP_ROUND_INREG: { 7847 assert(N->getValueType(0) == MVT::ppcf128); 7848 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 7849 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7850 MVT::f64, N->getOperand(0), 7851 DAG.getIntPtrConstant(0, dl)); 7852 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7853 MVT::f64, N->getOperand(0), 7854 DAG.getIntPtrConstant(1, dl)); 7855 7856 // Add the two halves of the long double in round-to-zero mode. 7857 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7858 7859 // We know the low half is about to be thrown away, so just use something 7860 // convenient. 7861 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 7862 FPreg, FPreg)); 7863 return; 7864 } 7865 case ISD::FP_TO_SINT: 7866 case ISD::FP_TO_UINT: 7867 // LowerFP_TO_INT() can only handle f32 and f64. 7868 if (N->getOperand(0).getValueType() == MVT::ppcf128) 7869 return; 7870 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 7871 return; 7872 } 7873 } 7874 7875 7876 //===----------------------------------------------------------------------===// 7877 // Other Lowering Code 7878 //===----------------------------------------------------------------------===// 7879 7880 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 7881 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 7882 Function *Func = Intrinsic::getDeclaration(M, Id); 7883 return Builder.CreateCall(Func, {}); 7884 } 7885 7886 // The mappings for emitLeading/TrailingFence is taken from 7887 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 7888 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 7889 AtomicOrdering Ord, bool IsStore, 7890 bool IsLoad) const { 7891 if (Ord == SequentiallyConsistent) 7892 return callIntrinsic(Builder, Intrinsic::ppc_sync); 7893 if (isAtLeastRelease(Ord)) 7894 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 7895 return nullptr; 7896 } 7897 7898 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 7899 AtomicOrdering Ord, bool IsStore, 7900 bool IsLoad) const { 7901 if (IsLoad && isAtLeastAcquire(Ord)) 7902 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 7903 // FIXME: this is too conservative, a dependent branch + isync is enough. 7904 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 7905 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 7906 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 7907 return nullptr; 7908 } 7909 7910 MachineBasicBlock * 7911 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 7912 unsigned AtomicSize, 7913 unsigned BinOpcode) const { 7914 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 7915 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 7916 7917 auto LoadMnemonic = PPC::LDARX; 7918 auto StoreMnemonic = PPC::STDCX; 7919 switch (AtomicSize) { 7920 default: 7921 llvm_unreachable("Unexpected size of atomic entity"); 7922 case 1: 7923 LoadMnemonic = PPC::LBARX; 7924 StoreMnemonic = PPC::STBCX; 7925 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 7926 break; 7927 case 2: 7928 LoadMnemonic = PPC::LHARX; 7929 StoreMnemonic = PPC::STHCX; 7930 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 7931 break; 7932 case 4: 7933 LoadMnemonic = PPC::LWARX; 7934 StoreMnemonic = PPC::STWCX; 7935 break; 7936 case 8: 7937 LoadMnemonic = PPC::LDARX; 7938 StoreMnemonic = PPC::STDCX; 7939 break; 7940 } 7941 7942 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7943 MachineFunction *F = BB->getParent(); 7944 MachineFunction::iterator It = BB; 7945 ++It; 7946 7947 unsigned dest = MI->getOperand(0).getReg(); 7948 unsigned ptrA = MI->getOperand(1).getReg(); 7949 unsigned ptrB = MI->getOperand(2).getReg(); 7950 unsigned incr = MI->getOperand(3).getReg(); 7951 DebugLoc dl = MI->getDebugLoc(); 7952 7953 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 7954 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 7955 F->insert(It, loopMBB); 7956 F->insert(It, exitMBB); 7957 exitMBB->splice(exitMBB->begin(), BB, 7958 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7959 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 7960 7961 MachineRegisterInfo &RegInfo = F->getRegInfo(); 7962 unsigned TmpReg = (!BinOpcode) ? incr : 7963 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 7964 : &PPC::GPRCRegClass); 7965 7966 // thisMBB: 7967 // ... 7968 // fallthrough --> loopMBB 7969 BB->addSuccessor(loopMBB); 7970 7971 // loopMBB: 7972 // l[wd]arx dest, ptr 7973 // add r0, dest, incr 7974 // st[wd]cx. r0, ptr 7975 // bne- loopMBB 7976 // fallthrough --> exitMBB 7977 BB = loopMBB; 7978 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 7979 .addReg(ptrA).addReg(ptrB); 7980 if (BinOpcode) 7981 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 7982 BuildMI(BB, dl, TII->get(StoreMnemonic)) 7983 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 7984 BuildMI(BB, dl, TII->get(PPC::BCC)) 7985 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 7986 BB->addSuccessor(loopMBB); 7987 BB->addSuccessor(exitMBB); 7988 7989 // exitMBB: 7990 // ... 7991 BB = exitMBB; 7992 return BB; 7993 } 7994 7995 MachineBasicBlock * 7996 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 7997 MachineBasicBlock *BB, 7998 bool is8bit, // operation 7999 unsigned BinOpcode) const { 8000 // If we support part-word atomic mnemonics, just use them 8001 if (Subtarget.hasPartwordAtomics()) 8002 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode); 8003 8004 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8005 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8006 // In 64 bit mode we have to use 64 bits for addresses, even though the 8007 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 8008 // registers without caring whether they're 32 or 64, but here we're 8009 // doing actual arithmetic on the addresses. 8010 bool is64bit = Subtarget.isPPC64(); 8011 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8012 8013 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8014 MachineFunction *F = BB->getParent(); 8015 MachineFunction::iterator It = BB; 8016 ++It; 8017 8018 unsigned dest = MI->getOperand(0).getReg(); 8019 unsigned ptrA = MI->getOperand(1).getReg(); 8020 unsigned ptrB = MI->getOperand(2).getReg(); 8021 unsigned incr = MI->getOperand(3).getReg(); 8022 DebugLoc dl = MI->getDebugLoc(); 8023 8024 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8025 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8026 F->insert(It, loopMBB); 8027 F->insert(It, exitMBB); 8028 exitMBB->splice(exitMBB->begin(), BB, 8029 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8030 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8031 8032 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8033 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8034 : &PPC::GPRCRegClass; 8035 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8036 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8037 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8038 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 8039 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8040 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8041 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8042 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8043 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 8044 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8045 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8046 unsigned Ptr1Reg; 8047 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 8048 8049 // thisMBB: 8050 // ... 8051 // fallthrough --> loopMBB 8052 BB->addSuccessor(loopMBB); 8053 8054 // The 4-byte load must be aligned, while a char or short may be 8055 // anywhere in the word. Hence all this nasty bookkeeping code. 8056 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8057 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8058 // xori shift, shift1, 24 [16] 8059 // rlwinm ptr, ptr1, 0, 0, 29 8060 // slw incr2, incr, shift 8061 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8062 // slw mask, mask2, shift 8063 // loopMBB: 8064 // lwarx tmpDest, ptr 8065 // add tmp, tmpDest, incr2 8066 // andc tmp2, tmpDest, mask 8067 // and tmp3, tmp, mask 8068 // or tmp4, tmp3, tmp2 8069 // stwcx. tmp4, ptr 8070 // bne- loopMBB 8071 // fallthrough --> exitMBB 8072 // srw dest, tmpDest, shift 8073 if (ptrA != ZeroReg) { 8074 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8075 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8076 .addReg(ptrA).addReg(ptrB); 8077 } else { 8078 Ptr1Reg = ptrB; 8079 } 8080 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8081 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8082 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8083 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8084 if (is64bit) 8085 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8086 .addReg(Ptr1Reg).addImm(0).addImm(61); 8087 else 8088 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8089 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8090 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 8091 .addReg(incr).addReg(ShiftReg); 8092 if (is8bit) 8093 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8094 else { 8095 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8096 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 8097 } 8098 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8099 .addReg(Mask2Reg).addReg(ShiftReg); 8100 8101 BB = loopMBB; 8102 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8103 .addReg(ZeroReg).addReg(PtrReg); 8104 if (BinOpcode) 8105 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 8106 .addReg(Incr2Reg).addReg(TmpDestReg); 8107 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 8108 .addReg(TmpDestReg).addReg(MaskReg); 8109 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 8110 .addReg(TmpReg).addReg(MaskReg); 8111 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 8112 .addReg(Tmp3Reg).addReg(Tmp2Reg); 8113 BuildMI(BB, dl, TII->get(PPC::STWCX)) 8114 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 8115 BuildMI(BB, dl, TII->get(PPC::BCC)) 8116 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8117 BB->addSuccessor(loopMBB); 8118 BB->addSuccessor(exitMBB); 8119 8120 // exitMBB: 8121 // ... 8122 BB = exitMBB; 8123 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 8124 .addReg(ShiftReg); 8125 return BB; 8126 } 8127 8128 llvm::MachineBasicBlock* 8129 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 8130 MachineBasicBlock *MBB) const { 8131 DebugLoc DL = MI->getDebugLoc(); 8132 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8133 8134 MachineFunction *MF = MBB->getParent(); 8135 MachineRegisterInfo &MRI = MF->getRegInfo(); 8136 8137 const BasicBlock *BB = MBB->getBasicBlock(); 8138 MachineFunction::iterator I = MBB; 8139 ++I; 8140 8141 // Memory Reference 8142 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8143 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8144 8145 unsigned DstReg = MI->getOperand(0).getReg(); 8146 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 8147 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 8148 unsigned mainDstReg = MRI.createVirtualRegister(RC); 8149 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 8150 8151 MVT PVT = getPointerTy(); 8152 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8153 "Invalid Pointer Size!"); 8154 // For v = setjmp(buf), we generate 8155 // 8156 // thisMBB: 8157 // SjLjSetup mainMBB 8158 // bl mainMBB 8159 // v_restore = 1 8160 // b sinkMBB 8161 // 8162 // mainMBB: 8163 // buf[LabelOffset] = LR 8164 // v_main = 0 8165 // 8166 // sinkMBB: 8167 // v = phi(main, restore) 8168 // 8169 8170 MachineBasicBlock *thisMBB = MBB; 8171 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 8172 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 8173 MF->insert(I, mainMBB); 8174 MF->insert(I, sinkMBB); 8175 8176 MachineInstrBuilder MIB; 8177 8178 // Transfer the remainder of BB and its successor edges to sinkMBB. 8179 sinkMBB->splice(sinkMBB->begin(), MBB, 8180 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8181 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 8182 8183 // Note that the structure of the jmp_buf used here is not compatible 8184 // with that used by libc, and is not designed to be. Specifically, it 8185 // stores only those 'reserved' registers that LLVM does not otherwise 8186 // understand how to spill. Also, by convention, by the time this 8187 // intrinsic is called, Clang has already stored the frame address in the 8188 // first slot of the buffer and stack address in the third. Following the 8189 // X86 target code, we'll store the jump address in the second slot. We also 8190 // need to save the TOC pointer (R2) to handle jumps between shared 8191 // libraries, and that will be stored in the fourth slot. The thread 8192 // identifier (R13) is not affected. 8193 8194 // thisMBB: 8195 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8196 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8197 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8198 8199 // Prepare IP either in reg. 8200 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 8201 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 8202 unsigned BufReg = MI->getOperand(1).getReg(); 8203 8204 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 8205 setUsesTOCBasePtr(*MBB->getParent()); 8206 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 8207 .addReg(PPC::X2) 8208 .addImm(TOCOffset) 8209 .addReg(BufReg); 8210 MIB.setMemRefs(MMOBegin, MMOEnd); 8211 } 8212 8213 // Naked functions never have a base pointer, and so we use r1. For all 8214 // other functions, this decision must be delayed until during PEI. 8215 unsigned BaseReg; 8216 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 8217 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 8218 else 8219 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 8220 8221 MIB = BuildMI(*thisMBB, MI, DL, 8222 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 8223 .addReg(BaseReg) 8224 .addImm(BPOffset) 8225 .addReg(BufReg); 8226 MIB.setMemRefs(MMOBegin, MMOEnd); 8227 8228 // Setup 8229 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 8230 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 8231 MIB.addRegMask(TRI->getNoPreservedMask()); 8232 8233 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 8234 8235 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 8236 .addMBB(mainMBB); 8237 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 8238 8239 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 8240 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 8241 8242 // mainMBB: 8243 // mainDstReg = 0 8244 MIB = 8245 BuildMI(mainMBB, DL, 8246 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 8247 8248 // Store IP 8249 if (Subtarget.isPPC64()) { 8250 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 8251 .addReg(LabelReg) 8252 .addImm(LabelOffset) 8253 .addReg(BufReg); 8254 } else { 8255 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 8256 .addReg(LabelReg) 8257 .addImm(LabelOffset) 8258 .addReg(BufReg); 8259 } 8260 8261 MIB.setMemRefs(MMOBegin, MMOEnd); 8262 8263 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 8264 mainMBB->addSuccessor(sinkMBB); 8265 8266 // sinkMBB: 8267 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 8268 TII->get(PPC::PHI), DstReg) 8269 .addReg(mainDstReg).addMBB(mainMBB) 8270 .addReg(restoreDstReg).addMBB(thisMBB); 8271 8272 MI->eraseFromParent(); 8273 return sinkMBB; 8274 } 8275 8276 MachineBasicBlock * 8277 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 8278 MachineBasicBlock *MBB) const { 8279 DebugLoc DL = MI->getDebugLoc(); 8280 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8281 8282 MachineFunction *MF = MBB->getParent(); 8283 MachineRegisterInfo &MRI = MF->getRegInfo(); 8284 8285 // Memory Reference 8286 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8287 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8288 8289 MVT PVT = getPointerTy(); 8290 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8291 "Invalid Pointer Size!"); 8292 8293 const TargetRegisterClass *RC = 8294 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 8295 unsigned Tmp = MRI.createVirtualRegister(RC); 8296 // Since FP is only updated here but NOT referenced, it's treated as GPR. 8297 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 8298 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 8299 unsigned BP = 8300 (PVT == MVT::i64) 8301 ? PPC::X30 8302 : (Subtarget.isSVR4ABI() && 8303 MF->getTarget().getRelocationModel() == Reloc::PIC_ 8304 ? PPC::R29 8305 : PPC::R30); 8306 8307 MachineInstrBuilder MIB; 8308 8309 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8310 const int64_t SPOffset = 2 * PVT.getStoreSize(); 8311 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8312 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8313 8314 unsigned BufReg = MI->getOperand(0).getReg(); 8315 8316 // Reload FP (the jumped-to function may not have had a 8317 // frame pointer, and if so, then its r31 will be restored 8318 // as necessary). 8319 if (PVT == MVT::i64) { 8320 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 8321 .addImm(0) 8322 .addReg(BufReg); 8323 } else { 8324 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 8325 .addImm(0) 8326 .addReg(BufReg); 8327 } 8328 MIB.setMemRefs(MMOBegin, MMOEnd); 8329 8330 // Reload IP 8331 if (PVT == MVT::i64) { 8332 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 8333 .addImm(LabelOffset) 8334 .addReg(BufReg); 8335 } else { 8336 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 8337 .addImm(LabelOffset) 8338 .addReg(BufReg); 8339 } 8340 MIB.setMemRefs(MMOBegin, MMOEnd); 8341 8342 // Reload SP 8343 if (PVT == MVT::i64) { 8344 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 8345 .addImm(SPOffset) 8346 .addReg(BufReg); 8347 } else { 8348 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 8349 .addImm(SPOffset) 8350 .addReg(BufReg); 8351 } 8352 MIB.setMemRefs(MMOBegin, MMOEnd); 8353 8354 // Reload BP 8355 if (PVT == MVT::i64) { 8356 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 8357 .addImm(BPOffset) 8358 .addReg(BufReg); 8359 } else { 8360 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 8361 .addImm(BPOffset) 8362 .addReg(BufReg); 8363 } 8364 MIB.setMemRefs(MMOBegin, MMOEnd); 8365 8366 // Reload TOC 8367 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 8368 setUsesTOCBasePtr(*MBB->getParent()); 8369 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 8370 .addImm(TOCOffset) 8371 .addReg(BufReg); 8372 8373 MIB.setMemRefs(MMOBegin, MMOEnd); 8374 } 8375 8376 // Jump 8377 BuildMI(*MBB, MI, DL, 8378 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 8379 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 8380 8381 MI->eraseFromParent(); 8382 return MBB; 8383 } 8384 8385 MachineBasicBlock * 8386 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 8387 MachineBasicBlock *BB) const { 8388 if (MI->getOpcode() == TargetOpcode::STACKMAP || 8389 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8390 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 8391 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8392 // Call lowering should have added an r2 operand to indicate a dependence 8393 // on the TOC base pointer value. It can't however, because there is no 8394 // way to mark the dependence as implicit there, and so the stackmap code 8395 // will confuse it with a regular operand. Instead, add the dependence 8396 // here. 8397 setUsesTOCBasePtr(*BB->getParent()); 8398 MI->addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 8399 } 8400 8401 return emitPatchPoint(MI, BB); 8402 } 8403 8404 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 8405 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 8406 return emitEHSjLjSetJmp(MI, BB); 8407 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 8408 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 8409 return emitEHSjLjLongJmp(MI, BB); 8410 } 8411 8412 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8413 8414 // To "insert" these instructions we actually have to insert their 8415 // control-flow patterns. 8416 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8417 MachineFunction::iterator It = BB; 8418 ++It; 8419 8420 MachineFunction *F = BB->getParent(); 8421 8422 if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 8423 MI->getOpcode() == PPC::SELECT_CC_I8 || 8424 MI->getOpcode() == PPC::SELECT_I4 || 8425 MI->getOpcode() == PPC::SELECT_I8)) { 8426 SmallVector<MachineOperand, 2> Cond; 8427 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8428 MI->getOpcode() == PPC::SELECT_CC_I8) 8429 Cond.push_back(MI->getOperand(4)); 8430 else 8431 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 8432 Cond.push_back(MI->getOperand(1)); 8433 8434 DebugLoc dl = MI->getDebugLoc(); 8435 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 8436 Cond, MI->getOperand(2).getReg(), 8437 MI->getOperand(3).getReg()); 8438 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8439 MI->getOpcode() == PPC::SELECT_CC_I8 || 8440 MI->getOpcode() == PPC::SELECT_CC_F4 || 8441 MI->getOpcode() == PPC::SELECT_CC_F8 || 8442 MI->getOpcode() == PPC::SELECT_CC_QFRC || 8443 MI->getOpcode() == PPC::SELECT_CC_QSRC || 8444 MI->getOpcode() == PPC::SELECT_CC_QBRC || 8445 MI->getOpcode() == PPC::SELECT_CC_VRRC || 8446 MI->getOpcode() == PPC::SELECT_CC_VSFRC || 8447 MI->getOpcode() == PPC::SELECT_CC_VSSRC || 8448 MI->getOpcode() == PPC::SELECT_CC_VSRC || 8449 MI->getOpcode() == PPC::SELECT_I4 || 8450 MI->getOpcode() == PPC::SELECT_I8 || 8451 MI->getOpcode() == PPC::SELECT_F4 || 8452 MI->getOpcode() == PPC::SELECT_F8 || 8453 MI->getOpcode() == PPC::SELECT_QFRC || 8454 MI->getOpcode() == PPC::SELECT_QSRC || 8455 MI->getOpcode() == PPC::SELECT_QBRC || 8456 MI->getOpcode() == PPC::SELECT_VRRC || 8457 MI->getOpcode() == PPC::SELECT_VSFRC || 8458 MI->getOpcode() == PPC::SELECT_VSSRC || 8459 MI->getOpcode() == PPC::SELECT_VSRC) { 8460 // The incoming instruction knows the destination vreg to set, the 8461 // condition code register to branch on, the true/false values to 8462 // select between, and a branch opcode to use. 8463 8464 // thisMBB: 8465 // ... 8466 // TrueVal = ... 8467 // cmpTY ccX, r1, r2 8468 // bCC copy1MBB 8469 // fallthrough --> copy0MBB 8470 MachineBasicBlock *thisMBB = BB; 8471 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8472 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8473 DebugLoc dl = MI->getDebugLoc(); 8474 F->insert(It, copy0MBB); 8475 F->insert(It, sinkMBB); 8476 8477 // Transfer the remainder of BB and its successor edges to sinkMBB. 8478 sinkMBB->splice(sinkMBB->begin(), BB, 8479 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8480 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8481 8482 // Next, add the true and fallthrough blocks as its successors. 8483 BB->addSuccessor(copy0MBB); 8484 BB->addSuccessor(sinkMBB); 8485 8486 if (MI->getOpcode() == PPC::SELECT_I4 || 8487 MI->getOpcode() == PPC::SELECT_I8 || 8488 MI->getOpcode() == PPC::SELECT_F4 || 8489 MI->getOpcode() == PPC::SELECT_F8 || 8490 MI->getOpcode() == PPC::SELECT_QFRC || 8491 MI->getOpcode() == PPC::SELECT_QSRC || 8492 MI->getOpcode() == PPC::SELECT_QBRC || 8493 MI->getOpcode() == PPC::SELECT_VRRC || 8494 MI->getOpcode() == PPC::SELECT_VSFRC || 8495 MI->getOpcode() == PPC::SELECT_VSSRC || 8496 MI->getOpcode() == PPC::SELECT_VSRC) { 8497 BuildMI(BB, dl, TII->get(PPC::BC)) 8498 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8499 } else { 8500 unsigned SelectPred = MI->getOperand(4).getImm(); 8501 BuildMI(BB, dl, TII->get(PPC::BCC)) 8502 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8503 } 8504 8505 // copy0MBB: 8506 // %FalseValue = ... 8507 // # fallthrough to sinkMBB 8508 BB = copy0MBB; 8509 8510 // Update machine-CFG edges 8511 BB->addSuccessor(sinkMBB); 8512 8513 // sinkMBB: 8514 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8515 // ... 8516 BB = sinkMBB; 8517 BuildMI(*BB, BB->begin(), dl, 8518 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 8519 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 8520 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 8521 } else if (MI->getOpcode() == PPC::ReadTB) { 8522 // To read the 64-bit time-base register on a 32-bit target, we read the 8523 // two halves. Should the counter have wrapped while it was being read, we 8524 // need to try again. 8525 // ... 8526 // readLoop: 8527 // mfspr Rx,TBU # load from TBU 8528 // mfspr Ry,TB # load from TB 8529 // mfspr Rz,TBU # load from TBU 8530 // cmpw crX,Rx,Rz # check if ‘old’=’new’ 8531 // bne readLoop # branch if they're not equal 8532 // ... 8533 8534 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 8535 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8536 DebugLoc dl = MI->getDebugLoc(); 8537 F->insert(It, readMBB); 8538 F->insert(It, sinkMBB); 8539 8540 // Transfer the remainder of BB and its successor edges to sinkMBB. 8541 sinkMBB->splice(sinkMBB->begin(), BB, 8542 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8543 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8544 8545 BB->addSuccessor(readMBB); 8546 BB = readMBB; 8547 8548 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8549 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 8550 unsigned LoReg = MI->getOperand(0).getReg(); 8551 unsigned HiReg = MI->getOperand(1).getReg(); 8552 8553 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 8554 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 8555 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 8556 8557 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 8558 8559 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 8560 .addReg(HiReg).addReg(ReadAgainReg); 8561 BuildMI(BB, dl, TII->get(PPC::BCC)) 8562 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 8563 8564 BB->addSuccessor(readMBB); 8565 BB->addSuccessor(sinkMBB); 8566 } 8567 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 8568 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 8569 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 8570 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 8571 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 8572 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 8573 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 8574 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 8575 8576 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 8577 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 8578 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 8579 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 8580 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 8581 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 8582 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 8583 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 8584 8585 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 8586 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 8587 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 8588 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 8589 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 8590 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 8591 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 8592 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 8593 8594 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 8595 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 8596 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 8597 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 8598 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 8599 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 8600 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 8601 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 8602 8603 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 8604 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 8605 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 8606 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 8607 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 8608 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 8609 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 8610 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 8611 8612 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 8613 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 8614 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 8615 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 8616 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 8617 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 8618 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 8619 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 8620 8621 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 8622 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 8623 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 8624 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 8625 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 8626 BB = EmitAtomicBinary(MI, BB, 4, 0); 8627 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 8628 BB = EmitAtomicBinary(MI, BB, 8, 0); 8629 8630 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 8631 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 8632 (Subtarget.hasPartwordAtomics() && 8633 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 8634 (Subtarget.hasPartwordAtomics() && 8635 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 8636 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 8637 8638 auto LoadMnemonic = PPC::LDARX; 8639 auto StoreMnemonic = PPC::STDCX; 8640 switch(MI->getOpcode()) { 8641 default: 8642 llvm_unreachable("Compare and swap of unknown size"); 8643 case PPC::ATOMIC_CMP_SWAP_I8: 8644 LoadMnemonic = PPC::LBARX; 8645 StoreMnemonic = PPC::STBCX; 8646 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 8647 break; 8648 case PPC::ATOMIC_CMP_SWAP_I16: 8649 LoadMnemonic = PPC::LHARX; 8650 StoreMnemonic = PPC::STHCX; 8651 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 8652 break; 8653 case PPC::ATOMIC_CMP_SWAP_I32: 8654 LoadMnemonic = PPC::LWARX; 8655 StoreMnemonic = PPC::STWCX; 8656 break; 8657 case PPC::ATOMIC_CMP_SWAP_I64: 8658 LoadMnemonic = PPC::LDARX; 8659 StoreMnemonic = PPC::STDCX; 8660 break; 8661 } 8662 unsigned dest = MI->getOperand(0).getReg(); 8663 unsigned ptrA = MI->getOperand(1).getReg(); 8664 unsigned ptrB = MI->getOperand(2).getReg(); 8665 unsigned oldval = MI->getOperand(3).getReg(); 8666 unsigned newval = MI->getOperand(4).getReg(); 8667 DebugLoc dl = MI->getDebugLoc(); 8668 8669 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 8670 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 8671 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 8672 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8673 F->insert(It, loop1MBB); 8674 F->insert(It, loop2MBB); 8675 F->insert(It, midMBB); 8676 F->insert(It, exitMBB); 8677 exitMBB->splice(exitMBB->begin(), BB, 8678 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8679 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8680 8681 // thisMBB: 8682 // ... 8683 // fallthrough --> loopMBB 8684 BB->addSuccessor(loop1MBB); 8685 8686 // loop1MBB: 8687 // l[bhwd]arx dest, ptr 8688 // cmp[wd] dest, oldval 8689 // bne- midMBB 8690 // loop2MBB: 8691 // st[bhwd]cx. newval, ptr 8692 // bne- loopMBB 8693 // b exitBB 8694 // midMBB: 8695 // st[bhwd]cx. dest, ptr 8696 // exitBB: 8697 BB = loop1MBB; 8698 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8699 .addReg(ptrA).addReg(ptrB); 8700 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 8701 .addReg(oldval).addReg(dest); 8702 BuildMI(BB, dl, TII->get(PPC::BCC)) 8703 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 8704 BB->addSuccessor(loop2MBB); 8705 BB->addSuccessor(midMBB); 8706 8707 BB = loop2MBB; 8708 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8709 .addReg(newval).addReg(ptrA).addReg(ptrB); 8710 BuildMI(BB, dl, TII->get(PPC::BCC)) 8711 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 8712 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 8713 BB->addSuccessor(loop1MBB); 8714 BB->addSuccessor(exitMBB); 8715 8716 BB = midMBB; 8717 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8718 .addReg(dest).addReg(ptrA).addReg(ptrB); 8719 BB->addSuccessor(exitMBB); 8720 8721 // exitMBB: 8722 // ... 8723 BB = exitMBB; 8724 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 8725 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 8726 // We must use 64-bit registers for addresses when targeting 64-bit, 8727 // since we're actually doing arithmetic on them. Other registers 8728 // can be 32-bit. 8729 bool is64bit = Subtarget.isPPC64(); 8730 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 8731 8732 unsigned dest = MI->getOperand(0).getReg(); 8733 unsigned ptrA = MI->getOperand(1).getReg(); 8734 unsigned ptrB = MI->getOperand(2).getReg(); 8735 unsigned oldval = MI->getOperand(3).getReg(); 8736 unsigned newval = MI->getOperand(4).getReg(); 8737 DebugLoc dl = MI->getDebugLoc(); 8738 8739 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 8740 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 8741 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 8742 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8743 F->insert(It, loop1MBB); 8744 F->insert(It, loop2MBB); 8745 F->insert(It, midMBB); 8746 F->insert(It, exitMBB); 8747 exitMBB->splice(exitMBB->begin(), BB, 8748 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8749 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8750 8751 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8752 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8753 : &PPC::GPRCRegClass; 8754 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8755 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8756 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8757 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 8758 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 8759 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 8760 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 8761 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8762 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8763 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8764 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8765 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8766 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8767 unsigned Ptr1Reg; 8768 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 8769 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8770 // thisMBB: 8771 // ... 8772 // fallthrough --> loopMBB 8773 BB->addSuccessor(loop1MBB); 8774 8775 // The 4-byte load must be aligned, while a char or short may be 8776 // anywhere in the word. Hence all this nasty bookkeeping code. 8777 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8778 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8779 // xori shift, shift1, 24 [16] 8780 // rlwinm ptr, ptr1, 0, 0, 29 8781 // slw newval2, newval, shift 8782 // slw oldval2, oldval,shift 8783 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8784 // slw mask, mask2, shift 8785 // and newval3, newval2, mask 8786 // and oldval3, oldval2, mask 8787 // loop1MBB: 8788 // lwarx tmpDest, ptr 8789 // and tmp, tmpDest, mask 8790 // cmpw tmp, oldval3 8791 // bne- midMBB 8792 // loop2MBB: 8793 // andc tmp2, tmpDest, mask 8794 // or tmp4, tmp2, newval3 8795 // stwcx. tmp4, ptr 8796 // bne- loop1MBB 8797 // b exitBB 8798 // midMBB: 8799 // stwcx. tmpDest, ptr 8800 // exitBB: 8801 // srw dest, tmpDest, shift 8802 if (ptrA != ZeroReg) { 8803 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8804 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8805 .addReg(ptrA).addReg(ptrB); 8806 } else { 8807 Ptr1Reg = ptrB; 8808 } 8809 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8810 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8811 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8812 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8813 if (is64bit) 8814 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8815 .addReg(Ptr1Reg).addImm(0).addImm(61); 8816 else 8817 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8818 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8819 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 8820 .addReg(newval).addReg(ShiftReg); 8821 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 8822 .addReg(oldval).addReg(ShiftReg); 8823 if (is8bit) 8824 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8825 else { 8826 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8827 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 8828 .addReg(Mask3Reg).addImm(65535); 8829 } 8830 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8831 .addReg(Mask2Reg).addReg(ShiftReg); 8832 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 8833 .addReg(NewVal2Reg).addReg(MaskReg); 8834 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 8835 .addReg(OldVal2Reg).addReg(MaskReg); 8836 8837 BB = loop1MBB; 8838 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8839 .addReg(ZeroReg).addReg(PtrReg); 8840 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 8841 .addReg(TmpDestReg).addReg(MaskReg); 8842 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 8843 .addReg(TmpReg).addReg(OldVal3Reg); 8844 BuildMI(BB, dl, TII->get(PPC::BCC)) 8845 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 8846 BB->addSuccessor(loop2MBB); 8847 BB->addSuccessor(midMBB); 8848 8849 BB = loop2MBB; 8850 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 8851 .addReg(TmpDestReg).addReg(MaskReg); 8852 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 8853 .addReg(Tmp2Reg).addReg(NewVal3Reg); 8854 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 8855 .addReg(ZeroReg).addReg(PtrReg); 8856 BuildMI(BB, dl, TII->get(PPC::BCC)) 8857 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 8858 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 8859 BB->addSuccessor(loop1MBB); 8860 BB->addSuccessor(exitMBB); 8861 8862 BB = midMBB; 8863 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 8864 .addReg(ZeroReg).addReg(PtrReg); 8865 BB->addSuccessor(exitMBB); 8866 8867 // exitMBB: 8868 // ... 8869 BB = exitMBB; 8870 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 8871 .addReg(ShiftReg); 8872 } else if (MI->getOpcode() == PPC::FADDrtz) { 8873 // This pseudo performs an FADD with rounding mode temporarily forced 8874 // to round-to-zero. We emit this via custom inserter since the FPSCR 8875 // is not modeled at the SelectionDAG level. 8876 unsigned Dest = MI->getOperand(0).getReg(); 8877 unsigned Src1 = MI->getOperand(1).getReg(); 8878 unsigned Src2 = MI->getOperand(2).getReg(); 8879 DebugLoc dl = MI->getDebugLoc(); 8880 8881 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8882 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 8883 8884 // Save FPSCR value. 8885 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 8886 8887 // Set rounding mode to round-to-zero. 8888 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 8889 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 8890 8891 // Perform addition. 8892 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 8893 8894 // Restore FPSCR value. 8895 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 8896 } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 8897 MI->getOpcode() == PPC::ANDIo_1_GT_BIT || 8898 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 8899 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) { 8900 unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 8901 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ? 8902 PPC::ANDIo8 : PPC::ANDIo; 8903 bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 8904 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8); 8905 8906 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8907 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 8908 &PPC::GPRCRegClass : 8909 &PPC::G8RCRegClass); 8910 8911 DebugLoc dl = MI->getDebugLoc(); 8912 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 8913 .addReg(MI->getOperand(1).getReg()).addImm(1); 8914 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 8915 MI->getOperand(0).getReg()) 8916 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 8917 } else if (MI->getOpcode() == PPC::TCHECK_RET) { 8918 DebugLoc Dl = MI->getDebugLoc(); 8919 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8920 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 8921 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 8922 return BB; 8923 } else { 8924 llvm_unreachable("Unexpected instr type to insert"); 8925 } 8926 8927 MI->eraseFromParent(); // The pseudo instruction is gone now. 8928 return BB; 8929 } 8930 8931 //===----------------------------------------------------------------------===// 8932 // Target Optimization Hooks 8933 //===----------------------------------------------------------------------===// 8934 8935 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand, 8936 DAGCombinerInfo &DCI, 8937 unsigned &RefinementSteps, 8938 bool &UseOneConstNR) const { 8939 EVT VT = Operand.getValueType(); 8940 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 8941 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 8942 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 8943 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 8944 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 8945 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 8946 // Convergence is quadratic, so we essentially double the number of digits 8947 // correct after every iteration. For both FRE and FRSQRTE, the minimum 8948 // architected relative accuracy is 2^-5. When hasRecipPrec(), this is 8949 // 2^-14. IEEE float has 23 digits and double has 52 digits. 8950 RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 8951 if (VT.getScalarType() == MVT::f64) 8952 ++RefinementSteps; 8953 UseOneConstNR = true; 8954 return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 8955 } 8956 return SDValue(); 8957 } 8958 8959 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, 8960 DAGCombinerInfo &DCI, 8961 unsigned &RefinementSteps) const { 8962 EVT VT = Operand.getValueType(); 8963 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 8964 (VT == MVT::f64 && Subtarget.hasFRE()) || 8965 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 8966 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 8967 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 8968 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 8969 // Convergence is quadratic, so we essentially double the number of digits 8970 // correct after every iteration. For both FRE and FRSQRTE, the minimum 8971 // architected relative accuracy is 2^-5. When hasRecipPrec(), this is 8972 // 2^-14. IEEE float has 23 digits and double has 52 digits. 8973 RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 8974 if (VT.getScalarType() == MVT::f64) 8975 ++RefinementSteps; 8976 return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 8977 } 8978 return SDValue(); 8979 } 8980 8981 bool PPCTargetLowering::combineRepeatedFPDivisors(unsigned NumUsers) const { 8982 // Note: This functionality is used only when unsafe-fp-math is enabled, and 8983 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 8984 // enabled for division), this functionality is redundant with the default 8985 // combiner logic (once the division -> reciprocal/multiply transformation 8986 // has taken place). As a result, this matters more for older cores than for 8987 // newer ones. 8988 8989 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 8990 // reciprocal if there are two or more FDIVs (for embedded cores with only 8991 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 8992 switch (Subtarget.getDarwinDirective()) { 8993 default: 8994 return NumUsers > 2; 8995 case PPC::DIR_440: 8996 case PPC::DIR_A2: 8997 case PPC::DIR_E500mc: 8998 case PPC::DIR_E5500: 8999 return NumUsers > 1; 9000 } 9001 } 9002 9003 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 9004 unsigned Bytes, int Dist, 9005 SelectionDAG &DAG) { 9006 if (VT.getSizeInBits() / 8 != Bytes) 9007 return false; 9008 9009 SDValue BaseLoc = Base->getBasePtr(); 9010 if (Loc.getOpcode() == ISD::FrameIndex) { 9011 if (BaseLoc.getOpcode() != ISD::FrameIndex) 9012 return false; 9013 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9014 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 9015 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 9016 int FS = MFI->getObjectSize(FI); 9017 int BFS = MFI->getObjectSize(BFI); 9018 if (FS != BFS || FS != (int)Bytes) return false; 9019 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 9020 } 9021 9022 // Handle X+C 9023 if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 9024 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 9025 return true; 9026 9027 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9028 const GlobalValue *GV1 = nullptr; 9029 const GlobalValue *GV2 = nullptr; 9030 int64_t Offset1 = 0; 9031 int64_t Offset2 = 0; 9032 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 9033 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 9034 if (isGA1 && isGA2 && GV1 == GV2) 9035 return Offset1 == (Offset2 + Dist*Bytes); 9036 return false; 9037 } 9038 9039 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 9040 // not enforce equality of the chain operands. 9041 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 9042 unsigned Bytes, int Dist, 9043 SelectionDAG &DAG) { 9044 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 9045 EVT VT = LS->getMemoryVT(); 9046 SDValue Loc = LS->getBasePtr(); 9047 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 9048 } 9049 9050 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 9051 EVT VT; 9052 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9053 default: return false; 9054 case Intrinsic::ppc_qpx_qvlfd: 9055 case Intrinsic::ppc_qpx_qvlfda: 9056 VT = MVT::v4f64; 9057 break; 9058 case Intrinsic::ppc_qpx_qvlfs: 9059 case Intrinsic::ppc_qpx_qvlfsa: 9060 VT = MVT::v4f32; 9061 break; 9062 case Intrinsic::ppc_qpx_qvlfcd: 9063 case Intrinsic::ppc_qpx_qvlfcda: 9064 VT = MVT::v2f64; 9065 break; 9066 case Intrinsic::ppc_qpx_qvlfcs: 9067 case Intrinsic::ppc_qpx_qvlfcsa: 9068 VT = MVT::v2f32; 9069 break; 9070 case Intrinsic::ppc_qpx_qvlfiwa: 9071 case Intrinsic::ppc_qpx_qvlfiwz: 9072 case Intrinsic::ppc_altivec_lvx: 9073 case Intrinsic::ppc_altivec_lvxl: 9074 case Intrinsic::ppc_vsx_lxvw4x: 9075 VT = MVT::v4i32; 9076 break; 9077 case Intrinsic::ppc_vsx_lxvd2x: 9078 VT = MVT::v2f64; 9079 break; 9080 case Intrinsic::ppc_altivec_lvebx: 9081 VT = MVT::i8; 9082 break; 9083 case Intrinsic::ppc_altivec_lvehx: 9084 VT = MVT::i16; 9085 break; 9086 case Intrinsic::ppc_altivec_lvewx: 9087 VT = MVT::i32; 9088 break; 9089 } 9090 9091 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 9092 } 9093 9094 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 9095 EVT VT; 9096 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9097 default: return false; 9098 case Intrinsic::ppc_qpx_qvstfd: 9099 case Intrinsic::ppc_qpx_qvstfda: 9100 VT = MVT::v4f64; 9101 break; 9102 case Intrinsic::ppc_qpx_qvstfs: 9103 case Intrinsic::ppc_qpx_qvstfsa: 9104 VT = MVT::v4f32; 9105 break; 9106 case Intrinsic::ppc_qpx_qvstfcd: 9107 case Intrinsic::ppc_qpx_qvstfcda: 9108 VT = MVT::v2f64; 9109 break; 9110 case Intrinsic::ppc_qpx_qvstfcs: 9111 case Intrinsic::ppc_qpx_qvstfcsa: 9112 VT = MVT::v2f32; 9113 break; 9114 case Intrinsic::ppc_qpx_qvstfiw: 9115 case Intrinsic::ppc_qpx_qvstfiwa: 9116 case Intrinsic::ppc_altivec_stvx: 9117 case Intrinsic::ppc_altivec_stvxl: 9118 case Intrinsic::ppc_vsx_stxvw4x: 9119 VT = MVT::v4i32; 9120 break; 9121 case Intrinsic::ppc_vsx_stxvd2x: 9122 VT = MVT::v2f64; 9123 break; 9124 case Intrinsic::ppc_altivec_stvebx: 9125 VT = MVT::i8; 9126 break; 9127 case Intrinsic::ppc_altivec_stvehx: 9128 VT = MVT::i16; 9129 break; 9130 case Intrinsic::ppc_altivec_stvewx: 9131 VT = MVT::i32; 9132 break; 9133 } 9134 9135 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 9136 } 9137 9138 return false; 9139 } 9140 9141 // Return true is there is a nearyby consecutive load to the one provided 9142 // (regardless of alignment). We search up and down the chain, looking though 9143 // token factors and other loads (but nothing else). As a result, a true result 9144 // indicates that it is safe to create a new consecutive load adjacent to the 9145 // load provided. 9146 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 9147 SDValue Chain = LD->getChain(); 9148 EVT VT = LD->getMemoryVT(); 9149 9150 SmallSet<SDNode *, 16> LoadRoots; 9151 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 9152 SmallSet<SDNode *, 16> Visited; 9153 9154 // First, search up the chain, branching to follow all token-factor operands. 9155 // If we find a consecutive load, then we're done, otherwise, record all 9156 // nodes just above the top-level loads and token factors. 9157 while (!Queue.empty()) { 9158 SDNode *ChainNext = Queue.pop_back_val(); 9159 if (!Visited.insert(ChainNext).second) 9160 continue; 9161 9162 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 9163 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9164 return true; 9165 9166 if (!Visited.count(ChainLD->getChain().getNode())) 9167 Queue.push_back(ChainLD->getChain().getNode()); 9168 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 9169 for (const SDUse &O : ChainNext->ops()) 9170 if (!Visited.count(O.getNode())) 9171 Queue.push_back(O.getNode()); 9172 } else 9173 LoadRoots.insert(ChainNext); 9174 } 9175 9176 // Second, search down the chain, starting from the top-level nodes recorded 9177 // in the first phase. These top-level nodes are the nodes just above all 9178 // loads and token factors. Starting with their uses, recursively look though 9179 // all loads (just the chain uses) and token factors to find a consecutive 9180 // load. 9181 Visited.clear(); 9182 Queue.clear(); 9183 9184 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 9185 IE = LoadRoots.end(); I != IE; ++I) { 9186 Queue.push_back(*I); 9187 9188 while (!Queue.empty()) { 9189 SDNode *LoadRoot = Queue.pop_back_val(); 9190 if (!Visited.insert(LoadRoot).second) 9191 continue; 9192 9193 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 9194 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9195 return true; 9196 9197 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 9198 UE = LoadRoot->use_end(); UI != UE; ++UI) 9199 if (((isa<MemSDNode>(*UI) && 9200 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 9201 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 9202 Queue.push_back(*UI); 9203 } 9204 } 9205 9206 return false; 9207 } 9208 9209 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 9210 DAGCombinerInfo &DCI) const { 9211 SelectionDAG &DAG = DCI.DAG; 9212 SDLoc dl(N); 9213 9214 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 9215 // If we're tracking CR bits, we need to be careful that we don't have: 9216 // trunc(binary-ops(zext(x), zext(y))) 9217 // or 9218 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 9219 // such that we're unnecessarily moving things into GPRs when it would be 9220 // better to keep them in CR bits. 9221 9222 // Note that trunc here can be an actual i1 trunc, or can be the effective 9223 // truncation that comes from a setcc or select_cc. 9224 if (N->getOpcode() == ISD::TRUNCATE && 9225 N->getValueType(0) != MVT::i1) 9226 return SDValue(); 9227 9228 if (N->getOperand(0).getValueType() != MVT::i32 && 9229 N->getOperand(0).getValueType() != MVT::i64) 9230 return SDValue(); 9231 9232 if (N->getOpcode() == ISD::SETCC || 9233 N->getOpcode() == ISD::SELECT_CC) { 9234 // If we're looking at a comparison, then we need to make sure that the 9235 // high bits (all except for the first) don't matter the result. 9236 ISD::CondCode CC = 9237 cast<CondCodeSDNode>(N->getOperand( 9238 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 9239 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 9240 9241 if (ISD::isSignedIntSetCC(CC)) { 9242 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 9243 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 9244 return SDValue(); 9245 } else if (ISD::isUnsignedIntSetCC(CC)) { 9246 if (!DAG.MaskedValueIsZero(N->getOperand(0), 9247 APInt::getHighBitsSet(OpBits, OpBits-1)) || 9248 !DAG.MaskedValueIsZero(N->getOperand(1), 9249 APInt::getHighBitsSet(OpBits, OpBits-1))) 9250 return SDValue(); 9251 } else { 9252 // This is neither a signed nor an unsigned comparison, just make sure 9253 // that the high bits are equal. 9254 APInt Op1Zero, Op1One; 9255 APInt Op2Zero, Op2One; 9256 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 9257 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 9258 9259 // We don't really care about what is known about the first bit (if 9260 // anything), so clear it in all masks prior to comparing them. 9261 Op1Zero.clearBit(0); Op1One.clearBit(0); 9262 Op2Zero.clearBit(0); Op2One.clearBit(0); 9263 9264 if (Op1Zero != Op2Zero || Op1One != Op2One) 9265 return SDValue(); 9266 } 9267 } 9268 9269 // We now know that the higher-order bits are irrelevant, we just need to 9270 // make sure that all of the intermediate operations are bit operations, and 9271 // all inputs are extensions. 9272 if (N->getOperand(0).getOpcode() != ISD::AND && 9273 N->getOperand(0).getOpcode() != ISD::OR && 9274 N->getOperand(0).getOpcode() != ISD::XOR && 9275 N->getOperand(0).getOpcode() != ISD::SELECT && 9276 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 9277 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 9278 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 9279 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 9280 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 9281 return SDValue(); 9282 9283 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 9284 N->getOperand(1).getOpcode() != ISD::AND && 9285 N->getOperand(1).getOpcode() != ISD::OR && 9286 N->getOperand(1).getOpcode() != ISD::XOR && 9287 N->getOperand(1).getOpcode() != ISD::SELECT && 9288 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 9289 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 9290 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 9291 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 9292 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 9293 return SDValue(); 9294 9295 SmallVector<SDValue, 4> Inputs; 9296 SmallVector<SDValue, 8> BinOps, PromOps; 9297 SmallPtrSet<SDNode *, 16> Visited; 9298 9299 for (unsigned i = 0; i < 2; ++i) { 9300 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9301 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9302 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9303 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9304 isa<ConstantSDNode>(N->getOperand(i))) 9305 Inputs.push_back(N->getOperand(i)); 9306 else 9307 BinOps.push_back(N->getOperand(i)); 9308 9309 if (N->getOpcode() == ISD::TRUNCATE) 9310 break; 9311 } 9312 9313 // Visit all inputs, collect all binary operations (and, or, xor and 9314 // select) that are all fed by extensions. 9315 while (!BinOps.empty()) { 9316 SDValue BinOp = BinOps.back(); 9317 BinOps.pop_back(); 9318 9319 if (!Visited.insert(BinOp.getNode()).second) 9320 continue; 9321 9322 PromOps.push_back(BinOp); 9323 9324 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9325 // The condition of the select is not promoted. 9326 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9327 continue; 9328 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9329 continue; 9330 9331 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9332 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9333 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9334 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9335 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9336 Inputs.push_back(BinOp.getOperand(i)); 9337 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9338 BinOp.getOperand(i).getOpcode() == ISD::OR || 9339 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9340 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9341 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 9342 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9343 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9344 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9345 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 9346 BinOps.push_back(BinOp.getOperand(i)); 9347 } else { 9348 // We have an input that is not an extension or another binary 9349 // operation; we'll abort this transformation. 9350 return SDValue(); 9351 } 9352 } 9353 } 9354 9355 // Make sure that this is a self-contained cluster of operations (which 9356 // is not quite the same thing as saying that everything has only one 9357 // use). 9358 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9359 if (isa<ConstantSDNode>(Inputs[i])) 9360 continue; 9361 9362 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9363 UE = Inputs[i].getNode()->use_end(); 9364 UI != UE; ++UI) { 9365 SDNode *User = *UI; 9366 if (User != N && !Visited.count(User)) 9367 return SDValue(); 9368 9369 // Make sure that we're not going to promote the non-output-value 9370 // operand(s) or SELECT or SELECT_CC. 9371 // FIXME: Although we could sometimes handle this, and it does occur in 9372 // practice that one of the condition inputs to the select is also one of 9373 // the outputs, we currently can't deal with this. 9374 if (User->getOpcode() == ISD::SELECT) { 9375 if (User->getOperand(0) == Inputs[i]) 9376 return SDValue(); 9377 } else if (User->getOpcode() == ISD::SELECT_CC) { 9378 if (User->getOperand(0) == Inputs[i] || 9379 User->getOperand(1) == Inputs[i]) 9380 return SDValue(); 9381 } 9382 } 9383 } 9384 9385 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9386 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9387 UE = PromOps[i].getNode()->use_end(); 9388 UI != UE; ++UI) { 9389 SDNode *User = *UI; 9390 if (User != N && !Visited.count(User)) 9391 return SDValue(); 9392 9393 // Make sure that we're not going to promote the non-output-value 9394 // operand(s) or SELECT or SELECT_CC. 9395 // FIXME: Although we could sometimes handle this, and it does occur in 9396 // practice that one of the condition inputs to the select is also one of 9397 // the outputs, we currently can't deal with this. 9398 if (User->getOpcode() == ISD::SELECT) { 9399 if (User->getOperand(0) == PromOps[i]) 9400 return SDValue(); 9401 } else if (User->getOpcode() == ISD::SELECT_CC) { 9402 if (User->getOperand(0) == PromOps[i] || 9403 User->getOperand(1) == PromOps[i]) 9404 return SDValue(); 9405 } 9406 } 9407 } 9408 9409 // Replace all inputs with the extension operand. 9410 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9411 // Constants may have users outside the cluster of to-be-promoted nodes, 9412 // and so we need to replace those as we do the promotions. 9413 if (isa<ConstantSDNode>(Inputs[i])) 9414 continue; 9415 else 9416 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 9417 } 9418 9419 // Replace all operations (these are all the same, but have a different 9420 // (i1) return type). DAG.getNode will validate that the types of 9421 // a binary operator match, so go through the list in reverse so that 9422 // we've likely promoted both operands first. Any intermediate truncations or 9423 // extensions disappear. 9424 while (!PromOps.empty()) { 9425 SDValue PromOp = PromOps.back(); 9426 PromOps.pop_back(); 9427 9428 if (PromOp.getOpcode() == ISD::TRUNCATE || 9429 PromOp.getOpcode() == ISD::SIGN_EXTEND || 9430 PromOp.getOpcode() == ISD::ZERO_EXTEND || 9431 PromOp.getOpcode() == ISD::ANY_EXTEND) { 9432 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 9433 PromOp.getOperand(0).getValueType() != MVT::i1) { 9434 // The operand is not yet ready (see comment below). 9435 PromOps.insert(PromOps.begin(), PromOp); 9436 continue; 9437 } 9438 9439 SDValue RepValue = PromOp.getOperand(0); 9440 if (isa<ConstantSDNode>(RepValue)) 9441 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 9442 9443 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 9444 continue; 9445 } 9446 9447 unsigned C; 9448 switch (PromOp.getOpcode()) { 9449 default: C = 0; break; 9450 case ISD::SELECT: C = 1; break; 9451 case ISD::SELECT_CC: C = 2; break; 9452 } 9453 9454 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9455 PromOp.getOperand(C).getValueType() != MVT::i1) || 9456 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9457 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 9458 // The to-be-promoted operands of this node have not yet been 9459 // promoted (this should be rare because we're going through the 9460 // list backward, but if one of the operands has several users in 9461 // this cluster of to-be-promoted nodes, it is possible). 9462 PromOps.insert(PromOps.begin(), PromOp); 9463 continue; 9464 } 9465 9466 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9467 PromOp.getNode()->op_end()); 9468 9469 // If there are any constant inputs, make sure they're replaced now. 9470 for (unsigned i = 0; i < 2; ++i) 9471 if (isa<ConstantSDNode>(Ops[C+i])) 9472 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 9473 9474 DAG.ReplaceAllUsesOfValueWith(PromOp, 9475 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 9476 } 9477 9478 // Now we're left with the initial truncation itself. 9479 if (N->getOpcode() == ISD::TRUNCATE) 9480 return N->getOperand(0); 9481 9482 // Otherwise, this is a comparison. The operands to be compared have just 9483 // changed type (to i1), but everything else is the same. 9484 return SDValue(N, 0); 9485 } 9486 9487 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 9488 DAGCombinerInfo &DCI) const { 9489 SelectionDAG &DAG = DCI.DAG; 9490 SDLoc dl(N); 9491 9492 // If we're tracking CR bits, we need to be careful that we don't have: 9493 // zext(binary-ops(trunc(x), trunc(y))) 9494 // or 9495 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 9496 // such that we're unnecessarily moving things into CR bits that can more 9497 // efficiently stay in GPRs. Note that if we're not certain that the high 9498 // bits are set as required by the final extension, we still may need to do 9499 // some masking to get the proper behavior. 9500 9501 // This same functionality is important on PPC64 when dealing with 9502 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 9503 // the return values of functions. Because it is so similar, it is handled 9504 // here as well. 9505 9506 if (N->getValueType(0) != MVT::i32 && 9507 N->getValueType(0) != MVT::i64) 9508 return SDValue(); 9509 9510 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 9511 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 9512 return SDValue(); 9513 9514 if (N->getOperand(0).getOpcode() != ISD::AND && 9515 N->getOperand(0).getOpcode() != ISD::OR && 9516 N->getOperand(0).getOpcode() != ISD::XOR && 9517 N->getOperand(0).getOpcode() != ISD::SELECT && 9518 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 9519 return SDValue(); 9520 9521 SmallVector<SDValue, 4> Inputs; 9522 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 9523 SmallPtrSet<SDNode *, 16> Visited; 9524 9525 // Visit all inputs, collect all binary operations (and, or, xor and 9526 // select) that are all fed by truncations. 9527 while (!BinOps.empty()) { 9528 SDValue BinOp = BinOps.back(); 9529 BinOps.pop_back(); 9530 9531 if (!Visited.insert(BinOp.getNode()).second) 9532 continue; 9533 9534 PromOps.push_back(BinOp); 9535 9536 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9537 // The condition of the select is not promoted. 9538 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9539 continue; 9540 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9541 continue; 9542 9543 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9544 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9545 Inputs.push_back(BinOp.getOperand(i)); 9546 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9547 BinOp.getOperand(i).getOpcode() == ISD::OR || 9548 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9549 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9550 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 9551 BinOps.push_back(BinOp.getOperand(i)); 9552 } else { 9553 // We have an input that is not a truncation or another binary 9554 // operation; we'll abort this transformation. 9555 return SDValue(); 9556 } 9557 } 9558 } 9559 9560 // The operands of a select that must be truncated when the select is 9561 // promoted because the operand is actually part of the to-be-promoted set. 9562 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 9563 9564 // Make sure that this is a self-contained cluster of operations (which 9565 // is not quite the same thing as saying that everything has only one 9566 // use). 9567 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9568 if (isa<ConstantSDNode>(Inputs[i])) 9569 continue; 9570 9571 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9572 UE = Inputs[i].getNode()->use_end(); 9573 UI != UE; ++UI) { 9574 SDNode *User = *UI; 9575 if (User != N && !Visited.count(User)) 9576 return SDValue(); 9577 9578 // If we're going to promote the non-output-value operand(s) or SELECT or 9579 // SELECT_CC, record them for truncation. 9580 if (User->getOpcode() == ISD::SELECT) { 9581 if (User->getOperand(0) == Inputs[i]) 9582 SelectTruncOp[0].insert(std::make_pair(User, 9583 User->getOperand(0).getValueType())); 9584 } else if (User->getOpcode() == ISD::SELECT_CC) { 9585 if (User->getOperand(0) == Inputs[i]) 9586 SelectTruncOp[0].insert(std::make_pair(User, 9587 User->getOperand(0).getValueType())); 9588 if (User->getOperand(1) == Inputs[i]) 9589 SelectTruncOp[1].insert(std::make_pair(User, 9590 User->getOperand(1).getValueType())); 9591 } 9592 } 9593 } 9594 9595 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9596 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9597 UE = PromOps[i].getNode()->use_end(); 9598 UI != UE; ++UI) { 9599 SDNode *User = *UI; 9600 if (User != N && !Visited.count(User)) 9601 return SDValue(); 9602 9603 // If we're going to promote the non-output-value operand(s) or SELECT or 9604 // SELECT_CC, record them for truncation. 9605 if (User->getOpcode() == ISD::SELECT) { 9606 if (User->getOperand(0) == PromOps[i]) 9607 SelectTruncOp[0].insert(std::make_pair(User, 9608 User->getOperand(0).getValueType())); 9609 } else if (User->getOpcode() == ISD::SELECT_CC) { 9610 if (User->getOperand(0) == PromOps[i]) 9611 SelectTruncOp[0].insert(std::make_pair(User, 9612 User->getOperand(0).getValueType())); 9613 if (User->getOperand(1) == PromOps[i]) 9614 SelectTruncOp[1].insert(std::make_pair(User, 9615 User->getOperand(1).getValueType())); 9616 } 9617 } 9618 } 9619 9620 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 9621 bool ReallyNeedsExt = false; 9622 if (N->getOpcode() != ISD::ANY_EXTEND) { 9623 // If all of the inputs are not already sign/zero extended, then 9624 // we'll still need to do that at the end. 9625 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9626 if (isa<ConstantSDNode>(Inputs[i])) 9627 continue; 9628 9629 unsigned OpBits = 9630 Inputs[i].getOperand(0).getValueSizeInBits(); 9631 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 9632 9633 if ((N->getOpcode() == ISD::ZERO_EXTEND && 9634 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 9635 APInt::getHighBitsSet(OpBits, 9636 OpBits-PromBits))) || 9637 (N->getOpcode() == ISD::SIGN_EXTEND && 9638 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 9639 (OpBits-(PromBits-1)))) { 9640 ReallyNeedsExt = true; 9641 break; 9642 } 9643 } 9644 } 9645 9646 // Replace all inputs, either with the truncation operand, or a 9647 // truncation or extension to the final output type. 9648 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9649 // Constant inputs need to be replaced with the to-be-promoted nodes that 9650 // use them because they might have users outside of the cluster of 9651 // promoted nodes. 9652 if (isa<ConstantSDNode>(Inputs[i])) 9653 continue; 9654 9655 SDValue InSrc = Inputs[i].getOperand(0); 9656 if (Inputs[i].getValueType() == N->getValueType(0)) 9657 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 9658 else if (N->getOpcode() == ISD::SIGN_EXTEND) 9659 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9660 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 9661 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9662 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9663 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 9664 else 9665 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 9666 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 9667 } 9668 9669 // Replace all operations (these are all the same, but have a different 9670 // (promoted) return type). DAG.getNode will validate that the types of 9671 // a binary operator match, so go through the list in reverse so that 9672 // we've likely promoted both operands first. 9673 while (!PromOps.empty()) { 9674 SDValue PromOp = PromOps.back(); 9675 PromOps.pop_back(); 9676 9677 unsigned C; 9678 switch (PromOp.getOpcode()) { 9679 default: C = 0; break; 9680 case ISD::SELECT: C = 1; break; 9681 case ISD::SELECT_CC: C = 2; break; 9682 } 9683 9684 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9685 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 9686 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9687 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 9688 // The to-be-promoted operands of this node have not yet been 9689 // promoted (this should be rare because we're going through the 9690 // list backward, but if one of the operands has several users in 9691 // this cluster of to-be-promoted nodes, it is possible). 9692 PromOps.insert(PromOps.begin(), PromOp); 9693 continue; 9694 } 9695 9696 // For SELECT and SELECT_CC nodes, we do a similar check for any 9697 // to-be-promoted comparison inputs. 9698 if (PromOp.getOpcode() == ISD::SELECT || 9699 PromOp.getOpcode() == ISD::SELECT_CC) { 9700 if ((SelectTruncOp[0].count(PromOp.getNode()) && 9701 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 9702 (SelectTruncOp[1].count(PromOp.getNode()) && 9703 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 9704 PromOps.insert(PromOps.begin(), PromOp); 9705 continue; 9706 } 9707 } 9708 9709 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9710 PromOp.getNode()->op_end()); 9711 9712 // If this node has constant inputs, then they'll need to be promoted here. 9713 for (unsigned i = 0; i < 2; ++i) { 9714 if (!isa<ConstantSDNode>(Ops[C+i])) 9715 continue; 9716 if (Ops[C+i].getValueType() == N->getValueType(0)) 9717 continue; 9718 9719 if (N->getOpcode() == ISD::SIGN_EXTEND) 9720 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9721 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9722 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9723 else 9724 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 9725 } 9726 9727 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 9728 // truncate them again to the original value type. 9729 if (PromOp.getOpcode() == ISD::SELECT || 9730 PromOp.getOpcode() == ISD::SELECT_CC) { 9731 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 9732 if (SI0 != SelectTruncOp[0].end()) 9733 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 9734 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 9735 if (SI1 != SelectTruncOp[1].end()) 9736 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 9737 } 9738 9739 DAG.ReplaceAllUsesOfValueWith(PromOp, 9740 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 9741 } 9742 9743 // Now we're left with the initial extension itself. 9744 if (!ReallyNeedsExt) 9745 return N->getOperand(0); 9746 9747 // To zero extend, just mask off everything except for the first bit (in the 9748 // i1 case). 9749 if (N->getOpcode() == ISD::ZERO_EXTEND) 9750 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 9751 DAG.getConstant(APInt::getLowBitsSet( 9752 N->getValueSizeInBits(0), PromBits), 9753 dl, N->getValueType(0))); 9754 9755 assert(N->getOpcode() == ISD::SIGN_EXTEND && 9756 "Invalid extension type"); 9757 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0)); 9758 SDValue ShiftCst = 9759 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 9760 return DAG.getNode(ISD::SRA, dl, N->getValueType(0), 9761 DAG.getNode(ISD::SHL, dl, N->getValueType(0), 9762 N->getOperand(0), ShiftCst), ShiftCst); 9763 } 9764 9765 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 9766 DAGCombinerInfo &DCI) const { 9767 assert((N->getOpcode() == ISD::SINT_TO_FP || 9768 N->getOpcode() == ISD::UINT_TO_FP) && 9769 "Need an int -> FP conversion node here"); 9770 9771 if (!Subtarget.has64BitSupport()) 9772 return SDValue(); 9773 9774 SelectionDAG &DAG = DCI.DAG; 9775 SDLoc dl(N); 9776 SDValue Op(N, 0); 9777 9778 // Don't handle ppc_fp128 here or i1 conversions. 9779 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 9780 return SDValue(); 9781 if (Op.getOperand(0).getValueType() == MVT::i1) 9782 return SDValue(); 9783 9784 // For i32 intermediate values, unfortunately, the conversion functions 9785 // leave the upper 32 bits of the value are undefined. Within the set of 9786 // scalar instructions, we have no method for zero- or sign-extending the 9787 // value. Thus, we cannot handle i32 intermediate values here. 9788 if (Op.getOperand(0).getValueType() == MVT::i32) 9789 return SDValue(); 9790 9791 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 9792 "UINT_TO_FP is supported only with FPCVT"); 9793 9794 // If we have FCFIDS, then use it when converting to single-precision. 9795 // Otherwise, convert to double-precision and then round. 9796 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 9797 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 9798 : PPCISD::FCFIDS) 9799 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 9800 : PPCISD::FCFID); 9801 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 9802 ? MVT::f32 9803 : MVT::f64; 9804 9805 // If we're converting from a float, to an int, and back to a float again, 9806 // then we don't need the store/load pair at all. 9807 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 9808 Subtarget.hasFPCVT()) || 9809 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 9810 SDValue Src = Op.getOperand(0).getOperand(0); 9811 if (Src.getValueType() == MVT::f32) { 9812 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 9813 DCI.AddToWorklist(Src.getNode()); 9814 } 9815 9816 unsigned FCTOp = 9817 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 9818 PPCISD::FCTIDUZ; 9819 9820 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 9821 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 9822 9823 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 9824 FP = DAG.getNode(ISD::FP_ROUND, dl, 9825 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 9826 DCI.AddToWorklist(FP.getNode()); 9827 } 9828 9829 return FP; 9830 } 9831 9832 return SDValue(); 9833 } 9834 9835 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 9836 // builtins) into loads with swaps. 9837 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 9838 DAGCombinerInfo &DCI) const { 9839 SelectionDAG &DAG = DCI.DAG; 9840 SDLoc dl(N); 9841 SDValue Chain; 9842 SDValue Base; 9843 MachineMemOperand *MMO; 9844 9845 switch (N->getOpcode()) { 9846 default: 9847 llvm_unreachable("Unexpected opcode for little endian VSX load"); 9848 case ISD::LOAD: { 9849 LoadSDNode *LD = cast<LoadSDNode>(N); 9850 Chain = LD->getChain(); 9851 Base = LD->getBasePtr(); 9852 MMO = LD->getMemOperand(); 9853 // If the MMO suggests this isn't a load of a full vector, leave 9854 // things alone. For a built-in, we have to make the change for 9855 // correctness, so if there is a size problem that will be a bug. 9856 if (MMO->getSize() < 16) 9857 return SDValue(); 9858 break; 9859 } 9860 case ISD::INTRINSIC_W_CHAIN: { 9861 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 9862 Chain = Intrin->getChain(); 9863 Base = Intrin->getBasePtr(); 9864 MMO = Intrin->getMemOperand(); 9865 break; 9866 } 9867 } 9868 9869 MVT VecTy = N->getValueType(0).getSimpleVT(); 9870 SDValue LoadOps[] = { Chain, Base }; 9871 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 9872 DAG.getVTList(VecTy, MVT::Other), 9873 LoadOps, VecTy, MMO); 9874 DCI.AddToWorklist(Load.getNode()); 9875 Chain = Load.getValue(1); 9876 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 9877 DAG.getVTList(VecTy, MVT::Other), Chain, Load); 9878 DCI.AddToWorklist(Swap.getNode()); 9879 return Swap; 9880 } 9881 9882 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 9883 // builtins) into stores with swaps. 9884 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 9885 DAGCombinerInfo &DCI) const { 9886 SelectionDAG &DAG = DCI.DAG; 9887 SDLoc dl(N); 9888 SDValue Chain; 9889 SDValue Base; 9890 unsigned SrcOpnd; 9891 MachineMemOperand *MMO; 9892 9893 switch (N->getOpcode()) { 9894 default: 9895 llvm_unreachable("Unexpected opcode for little endian VSX store"); 9896 case ISD::STORE: { 9897 StoreSDNode *ST = cast<StoreSDNode>(N); 9898 Chain = ST->getChain(); 9899 Base = ST->getBasePtr(); 9900 MMO = ST->getMemOperand(); 9901 SrcOpnd = 1; 9902 // If the MMO suggests this isn't a store of a full vector, leave 9903 // things alone. For a built-in, we have to make the change for 9904 // correctness, so if there is a size problem that will be a bug. 9905 if (MMO->getSize() < 16) 9906 return SDValue(); 9907 break; 9908 } 9909 case ISD::INTRINSIC_VOID: { 9910 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 9911 Chain = Intrin->getChain(); 9912 // Intrin->getBasePtr() oddly does not get what we want. 9913 Base = Intrin->getOperand(3); 9914 MMO = Intrin->getMemOperand(); 9915 SrcOpnd = 2; 9916 break; 9917 } 9918 } 9919 9920 SDValue Src = N->getOperand(SrcOpnd); 9921 MVT VecTy = Src.getValueType().getSimpleVT(); 9922 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 9923 DAG.getVTList(VecTy, MVT::Other), Chain, Src); 9924 DCI.AddToWorklist(Swap.getNode()); 9925 Chain = Swap.getValue(1); 9926 SDValue StoreOps[] = { Chain, Swap, Base }; 9927 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 9928 DAG.getVTList(MVT::Other), 9929 StoreOps, VecTy, MMO); 9930 DCI.AddToWorklist(Store.getNode()); 9931 return Store; 9932 } 9933 9934 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 9935 DAGCombinerInfo &DCI) const { 9936 SelectionDAG &DAG = DCI.DAG; 9937 SDLoc dl(N); 9938 switch (N->getOpcode()) { 9939 default: break; 9940 case PPCISD::SHL: 9941 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 9942 if (C->isNullValue()) // 0 << V -> 0. 9943 return N->getOperand(0); 9944 } 9945 break; 9946 case PPCISD::SRL: 9947 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 9948 if (C->isNullValue()) // 0 >>u V -> 0. 9949 return N->getOperand(0); 9950 } 9951 break; 9952 case PPCISD::SRA: 9953 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 9954 if (C->isNullValue() || // 0 >>s V -> 0. 9955 C->isAllOnesValue()) // -1 >>s V -> -1. 9956 return N->getOperand(0); 9957 } 9958 break; 9959 case ISD::SIGN_EXTEND: 9960 case ISD::ZERO_EXTEND: 9961 case ISD::ANY_EXTEND: 9962 return DAGCombineExtBoolTrunc(N, DCI); 9963 case ISD::TRUNCATE: 9964 case ISD::SETCC: 9965 case ISD::SELECT_CC: 9966 return DAGCombineTruncBoolExt(N, DCI); 9967 case ISD::SINT_TO_FP: 9968 case ISD::UINT_TO_FP: 9969 return combineFPToIntToFP(N, DCI); 9970 case ISD::STORE: { 9971 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 9972 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 9973 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 9974 N->getOperand(1).getValueType() == MVT::i32 && 9975 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 9976 SDValue Val = N->getOperand(1).getOperand(0); 9977 if (Val.getValueType() == MVT::f32) { 9978 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 9979 DCI.AddToWorklist(Val.getNode()); 9980 } 9981 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 9982 DCI.AddToWorklist(Val.getNode()); 9983 9984 SDValue Ops[] = { 9985 N->getOperand(0), Val, N->getOperand(2), 9986 DAG.getValueType(N->getOperand(1).getValueType()) 9987 }; 9988 9989 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 9990 DAG.getVTList(MVT::Other), Ops, 9991 cast<StoreSDNode>(N)->getMemoryVT(), 9992 cast<StoreSDNode>(N)->getMemOperand()); 9993 DCI.AddToWorklist(Val.getNode()); 9994 return Val; 9995 } 9996 9997 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 9998 if (cast<StoreSDNode>(N)->isUnindexed() && 9999 N->getOperand(1).getOpcode() == ISD::BSWAP && 10000 N->getOperand(1).getNode()->hasOneUse() && 10001 (N->getOperand(1).getValueType() == MVT::i32 || 10002 N->getOperand(1).getValueType() == MVT::i16 || 10003 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10004 N->getOperand(1).getValueType() == MVT::i64))) { 10005 SDValue BSwapOp = N->getOperand(1).getOperand(0); 10006 // Do an any-extend to 32-bits if this is a half-word input. 10007 if (BSwapOp.getValueType() == MVT::i16) 10008 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 10009 10010 SDValue Ops[] = { 10011 N->getOperand(0), BSwapOp, N->getOperand(2), 10012 DAG.getValueType(N->getOperand(1).getValueType()) 10013 }; 10014 return 10015 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 10016 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 10017 cast<StoreSDNode>(N)->getMemOperand()); 10018 } 10019 10020 // For little endian, VSX stores require generating xxswapd/lxvd2x. 10021 EVT VT = N->getOperand(1).getValueType(); 10022 if (VT.isSimple()) { 10023 MVT StoreVT = VT.getSimpleVT(); 10024 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10025 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 10026 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 10027 return expandVSXStoreForLE(N, DCI); 10028 } 10029 break; 10030 } 10031 case ISD::LOAD: { 10032 LoadSDNode *LD = cast<LoadSDNode>(N); 10033 EVT VT = LD->getValueType(0); 10034 10035 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10036 if (VT.isSimple()) { 10037 MVT LoadVT = VT.getSimpleVT(); 10038 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10039 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 10040 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 10041 return expandVSXLoadForLE(N, DCI); 10042 } 10043 10044 EVT MemVT = LD->getMemoryVT(); 10045 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 10046 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty); 10047 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 10048 unsigned ScalarABIAlignment = getDataLayout()->getABITypeAlignment(STy); 10049 if (LD->isUnindexed() && VT.isVector() && 10050 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 10051 // P8 and later hardware should just use LOAD. 10052 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 10053 VT == MVT::v4i32 || VT == MVT::v4f32)) || 10054 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 10055 LD->getAlignment() >= ScalarABIAlignment)) && 10056 LD->getAlignment() < ABIAlignment) { 10057 // This is a type-legal unaligned Altivec or QPX load. 10058 SDValue Chain = LD->getChain(); 10059 SDValue Ptr = LD->getBasePtr(); 10060 bool isLittleEndian = Subtarget.isLittleEndian(); 10061 10062 // This implements the loading of unaligned vectors as described in 10063 // the venerable Apple Velocity Engine overview. Specifically: 10064 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 10065 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 10066 // 10067 // The general idea is to expand a sequence of one or more unaligned 10068 // loads into an alignment-based permutation-control instruction (lvsl 10069 // or lvsr), a series of regular vector loads (which always truncate 10070 // their input address to an aligned address), and a series of 10071 // permutations. The results of these permutations are the requested 10072 // loaded values. The trick is that the last "extra" load is not taken 10073 // from the address you might suspect (sizeof(vector) bytes after the 10074 // last requested load), but rather sizeof(vector) - 1 bytes after the 10075 // last requested vector. The point of this is to avoid a page fault if 10076 // the base address happened to be aligned. This works because if the 10077 // base address is aligned, then adding less than a full vector length 10078 // will cause the last vector in the sequence to be (re)loaded. 10079 // Otherwise, the next vector will be fetched as you might suspect was 10080 // necessary. 10081 10082 // We might be able to reuse the permutation generation from 10083 // a different base address offset from this one by an aligned amount. 10084 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 10085 // optimization later. 10086 Intrinsic::ID Intr, IntrLD, IntrPerm; 10087 MVT PermCntlTy, PermTy, LDTy; 10088 if (Subtarget.hasAltivec()) { 10089 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 10090 Intrinsic::ppc_altivec_lvsl; 10091 IntrLD = Intrinsic::ppc_altivec_lvx; 10092 IntrPerm = Intrinsic::ppc_altivec_vperm; 10093 PermCntlTy = MVT::v16i8; 10094 PermTy = MVT::v4i32; 10095 LDTy = MVT::v4i32; 10096 } else { 10097 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 10098 Intrinsic::ppc_qpx_qvlpcls; 10099 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 10100 Intrinsic::ppc_qpx_qvlfs; 10101 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 10102 PermCntlTy = MVT::v4f64; 10103 PermTy = MVT::v4f64; 10104 LDTy = MemVT.getSimpleVT(); 10105 } 10106 10107 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 10108 10109 // Create the new MMO for the new base load. It is like the original MMO, 10110 // but represents an area in memory almost twice the vector size centered 10111 // on the original address. If the address is unaligned, we might start 10112 // reading up to (sizeof(vector)-1) bytes below the address of the 10113 // original unaligned load. 10114 MachineFunction &MF = DAG.getMachineFunction(); 10115 MachineMemOperand *BaseMMO = 10116 MF.getMachineMemOperand(LD->getMemOperand(), -MemVT.getStoreSize()+1, 10117 2*MemVT.getStoreSize()-1); 10118 10119 // Create the new base load. 10120 SDValue LDXIntID = DAG.getTargetConstant(IntrLD, dl, getPointerTy()); 10121 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 10122 SDValue BaseLoad = 10123 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10124 DAG.getVTList(PermTy, MVT::Other), 10125 BaseLoadOps, LDTy, BaseMMO); 10126 10127 // Note that the value of IncOffset (which is provided to the next 10128 // load's pointer info offset value, and thus used to calculate the 10129 // alignment), and the value of IncValue (which is actually used to 10130 // increment the pointer value) are different! This is because we 10131 // require the next load to appear to be aligned, even though it 10132 // is actually offset from the base pointer by a lesser amount. 10133 int IncOffset = VT.getSizeInBits() / 8; 10134 int IncValue = IncOffset; 10135 10136 // Walk (both up and down) the chain looking for another load at the real 10137 // (aligned) offset (the alignment of the other load does not matter in 10138 // this case). If found, then do not use the offset reduction trick, as 10139 // that will prevent the loads from being later combined (as they would 10140 // otherwise be duplicates). 10141 if (!findConsecutiveLoad(LD, DAG)) 10142 --IncValue; 10143 10144 SDValue Increment = DAG.getConstant(IncValue, dl, getPointerTy()); 10145 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 10146 10147 MachineMemOperand *ExtraMMO = 10148 MF.getMachineMemOperand(LD->getMemOperand(), 10149 1, 2*MemVT.getStoreSize()-1); 10150 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 10151 SDValue ExtraLoad = 10152 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10153 DAG.getVTList(PermTy, MVT::Other), 10154 ExtraLoadOps, LDTy, ExtraMMO); 10155 10156 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 10157 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 10158 10159 // Because vperm has a big-endian bias, we must reverse the order 10160 // of the input vectors and complement the permute control vector 10161 // when generating little endian code. We have already handled the 10162 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 10163 // and ExtraLoad here. 10164 SDValue Perm; 10165 if (isLittleEndian) 10166 Perm = BuildIntrinsicOp(IntrPerm, 10167 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 10168 else 10169 Perm = BuildIntrinsicOp(IntrPerm, 10170 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 10171 10172 if (VT != PermTy) 10173 Perm = Subtarget.hasAltivec() ? 10174 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 10175 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 10176 DAG.getTargetConstant(1, dl, MVT::i64)); 10177 // second argument is 1 because this rounding 10178 // is always exact. 10179 10180 // The output of the permutation is our loaded result, the TokenFactor is 10181 // our new chain. 10182 DCI.CombineTo(N, Perm, TF); 10183 return SDValue(N, 0); 10184 } 10185 } 10186 break; 10187 case ISD::INTRINSIC_WO_CHAIN: { 10188 bool isLittleEndian = Subtarget.isLittleEndian(); 10189 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10190 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 10191 : Intrinsic::ppc_altivec_lvsl); 10192 if ((IID == Intr || 10193 IID == Intrinsic::ppc_qpx_qvlpcld || 10194 IID == Intrinsic::ppc_qpx_qvlpcls) && 10195 N->getOperand(1)->getOpcode() == ISD::ADD) { 10196 SDValue Add = N->getOperand(1); 10197 10198 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 10199 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 10200 10201 if (DAG.MaskedValueIsZero( 10202 Add->getOperand(1), 10203 APInt::getAllOnesValue(Bits /* alignment */) 10204 .zext( 10205 Add.getValueType().getScalarType().getSizeInBits()))) { 10206 SDNode *BasePtr = Add->getOperand(0).getNode(); 10207 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10208 UE = BasePtr->use_end(); 10209 UI != UE; ++UI) { 10210 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10211 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 10212 // We've found another LVSL/LVSR, and this address is an aligned 10213 // multiple of that one. The results will be the same, so use the 10214 // one we've just found instead. 10215 10216 return SDValue(*UI, 0); 10217 } 10218 } 10219 } 10220 10221 if (isa<ConstantSDNode>(Add->getOperand(1))) { 10222 SDNode *BasePtr = Add->getOperand(0).getNode(); 10223 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10224 UE = BasePtr->use_end(); UI != UE; ++UI) { 10225 if (UI->getOpcode() == ISD::ADD && 10226 isa<ConstantSDNode>(UI->getOperand(1)) && 10227 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 10228 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 10229 (1ULL << Bits) == 0) { 10230 SDNode *OtherAdd = *UI; 10231 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 10232 VE = OtherAdd->use_end(); VI != VE; ++VI) { 10233 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10234 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 10235 return SDValue(*VI, 0); 10236 } 10237 } 10238 } 10239 } 10240 } 10241 } 10242 } 10243 10244 break; 10245 case ISD::INTRINSIC_W_CHAIN: { 10246 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10247 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10248 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10249 default: 10250 break; 10251 case Intrinsic::ppc_vsx_lxvw4x: 10252 case Intrinsic::ppc_vsx_lxvd2x: 10253 return expandVSXLoadForLE(N, DCI); 10254 } 10255 } 10256 break; 10257 } 10258 case ISD::INTRINSIC_VOID: { 10259 // For little endian, VSX stores require generating xxswapd/stxvd2x. 10260 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10261 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10262 default: 10263 break; 10264 case Intrinsic::ppc_vsx_stxvw4x: 10265 case Intrinsic::ppc_vsx_stxvd2x: 10266 return expandVSXStoreForLE(N, DCI); 10267 } 10268 } 10269 break; 10270 } 10271 case ISD::BSWAP: 10272 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 10273 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 10274 N->getOperand(0).hasOneUse() && 10275 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 10276 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10277 N->getValueType(0) == MVT::i64))) { 10278 SDValue Load = N->getOperand(0); 10279 LoadSDNode *LD = cast<LoadSDNode>(Load); 10280 // Create the byte-swapping load. 10281 SDValue Ops[] = { 10282 LD->getChain(), // Chain 10283 LD->getBasePtr(), // Ptr 10284 DAG.getValueType(N->getValueType(0)) // VT 10285 }; 10286 SDValue BSLoad = 10287 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 10288 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 10289 MVT::i64 : MVT::i32, MVT::Other), 10290 Ops, LD->getMemoryVT(), LD->getMemOperand()); 10291 10292 // If this is an i16 load, insert the truncate. 10293 SDValue ResVal = BSLoad; 10294 if (N->getValueType(0) == MVT::i16) 10295 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 10296 10297 // First, combine the bswap away. This makes the value produced by the 10298 // load dead. 10299 DCI.CombineTo(N, ResVal); 10300 10301 // Next, combine the load away, we give it a bogus result value but a real 10302 // chain result. The result value is dead because the bswap is dead. 10303 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 10304 10305 // Return N so it doesn't get rechecked! 10306 return SDValue(N, 0); 10307 } 10308 10309 break; 10310 case PPCISD::VCMP: { 10311 // If a VCMPo node already exists with exactly the same operands as this 10312 // node, use its result instead of this node (VCMPo computes both a CR6 and 10313 // a normal output). 10314 // 10315 if (!N->getOperand(0).hasOneUse() && 10316 !N->getOperand(1).hasOneUse() && 10317 !N->getOperand(2).hasOneUse()) { 10318 10319 // Scan all of the users of the LHS, looking for VCMPo's that match. 10320 SDNode *VCMPoNode = nullptr; 10321 10322 SDNode *LHSN = N->getOperand(0).getNode(); 10323 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 10324 UI != E; ++UI) 10325 if (UI->getOpcode() == PPCISD::VCMPo && 10326 UI->getOperand(1) == N->getOperand(1) && 10327 UI->getOperand(2) == N->getOperand(2) && 10328 UI->getOperand(0) == N->getOperand(0)) { 10329 VCMPoNode = *UI; 10330 break; 10331 } 10332 10333 // If there is no VCMPo node, or if the flag value has a single use, don't 10334 // transform this. 10335 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 10336 break; 10337 10338 // Look at the (necessarily single) use of the flag value. If it has a 10339 // chain, this transformation is more complex. Note that multiple things 10340 // could use the value result, which we should ignore. 10341 SDNode *FlagUser = nullptr; 10342 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 10343 FlagUser == nullptr; ++UI) { 10344 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 10345 SDNode *User = *UI; 10346 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 10347 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 10348 FlagUser = User; 10349 break; 10350 } 10351 } 10352 } 10353 10354 // If the user is a MFOCRF instruction, we know this is safe. 10355 // Otherwise we give up for right now. 10356 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 10357 return SDValue(VCMPoNode, 0); 10358 } 10359 break; 10360 } 10361 case ISD::BRCOND: { 10362 SDValue Cond = N->getOperand(1); 10363 SDValue Target = N->getOperand(2); 10364 10365 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10366 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 10367 Intrinsic::ppc_is_decremented_ctr_nonzero) { 10368 10369 // We now need to make the intrinsic dead (it cannot be instruction 10370 // selected). 10371 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 10372 assert(Cond.getNode()->hasOneUse() && 10373 "Counter decrement has more than one use"); 10374 10375 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 10376 N->getOperand(0), Target); 10377 } 10378 } 10379 break; 10380 case ISD::BR_CC: { 10381 // If this is a branch on an altivec predicate comparison, lower this so 10382 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 10383 // lowering is done pre-legalize, because the legalizer lowers the predicate 10384 // compare down to code that is difficult to reassemble. 10385 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 10386 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 10387 10388 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 10389 // value. If so, pass-through the AND to get to the intrinsic. 10390 if (LHS.getOpcode() == ISD::AND && 10391 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 10392 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 10393 Intrinsic::ppc_is_decremented_ctr_nonzero && 10394 isa<ConstantSDNode>(LHS.getOperand(1)) && 10395 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 10396 isZero()) 10397 LHS = LHS.getOperand(0); 10398 10399 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10400 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 10401 Intrinsic::ppc_is_decremented_ctr_nonzero && 10402 isa<ConstantSDNode>(RHS)) { 10403 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 10404 "Counter decrement comparison is not EQ or NE"); 10405 10406 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10407 bool isBDNZ = (CC == ISD::SETEQ && Val) || 10408 (CC == ISD::SETNE && !Val); 10409 10410 // We now need to make the intrinsic dead (it cannot be instruction 10411 // selected). 10412 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 10413 assert(LHS.getNode()->hasOneUse() && 10414 "Counter decrement has more than one use"); 10415 10416 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 10417 N->getOperand(0), N->getOperand(4)); 10418 } 10419 10420 int CompareOpc; 10421 bool isDot; 10422 10423 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10424 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 10425 getAltivecCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 10426 assert(isDot && "Can't compare against a vector result!"); 10427 10428 // If this is a comparison against something other than 0/1, then we know 10429 // that the condition is never/always true. 10430 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10431 if (Val != 0 && Val != 1) { 10432 if (CC == ISD::SETEQ) // Cond never true, remove branch. 10433 return N->getOperand(0); 10434 // Always !=, turn it into an unconditional branch. 10435 return DAG.getNode(ISD::BR, dl, MVT::Other, 10436 N->getOperand(0), N->getOperand(4)); 10437 } 10438 10439 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 10440 10441 // Create the PPCISD altivec 'dot' comparison node. 10442 SDValue Ops[] = { 10443 LHS.getOperand(2), // LHS of compare 10444 LHS.getOperand(3), // RHS of compare 10445 DAG.getConstant(CompareOpc, dl, MVT::i32) 10446 }; 10447 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 10448 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 10449 10450 // Unpack the result based on how the target uses it. 10451 PPC::Predicate CompOpc; 10452 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 10453 default: // Can't happen, don't crash on invalid number though. 10454 case 0: // Branch on the value of the EQ bit of CR6. 10455 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 10456 break; 10457 case 1: // Branch on the inverted value of the EQ bit of CR6. 10458 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 10459 break; 10460 case 2: // Branch on the value of the LT bit of CR6. 10461 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 10462 break; 10463 case 3: // Branch on the inverted value of the LT bit of CR6. 10464 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 10465 break; 10466 } 10467 10468 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 10469 DAG.getConstant(CompOpc, dl, MVT::i32), 10470 DAG.getRegister(PPC::CR6, MVT::i32), 10471 N->getOperand(4), CompNode.getValue(1)); 10472 } 10473 break; 10474 } 10475 } 10476 10477 return SDValue(); 10478 } 10479 10480 SDValue 10481 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 10482 SelectionDAG &DAG, 10483 std::vector<SDNode *> *Created) const { 10484 // fold (sdiv X, pow2) 10485 EVT VT = N->getValueType(0); 10486 if (VT == MVT::i64 && !Subtarget.isPPC64()) 10487 return SDValue(); 10488 if ((VT != MVT::i32 && VT != MVT::i64) || 10489 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 10490 return SDValue(); 10491 10492 SDLoc DL(N); 10493 SDValue N0 = N->getOperand(0); 10494 10495 bool IsNegPow2 = (-Divisor).isPowerOf2(); 10496 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 10497 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 10498 10499 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 10500 if (Created) 10501 Created->push_back(Op.getNode()); 10502 10503 if (IsNegPow2) { 10504 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 10505 if (Created) 10506 Created->push_back(Op.getNode()); 10507 } 10508 10509 return Op; 10510 } 10511 10512 //===----------------------------------------------------------------------===// 10513 // Inline Assembly Support 10514 //===----------------------------------------------------------------------===// 10515 10516 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 10517 APInt &KnownZero, 10518 APInt &KnownOne, 10519 const SelectionDAG &DAG, 10520 unsigned Depth) const { 10521 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 10522 switch (Op.getOpcode()) { 10523 default: break; 10524 case PPCISD::LBRX: { 10525 // lhbrx is known to have the top bits cleared out. 10526 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 10527 KnownZero = 0xFFFF0000; 10528 break; 10529 } 10530 case ISD::INTRINSIC_WO_CHAIN: { 10531 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 10532 default: break; 10533 case Intrinsic::ppc_altivec_vcmpbfp_p: 10534 case Intrinsic::ppc_altivec_vcmpeqfp_p: 10535 case Intrinsic::ppc_altivec_vcmpequb_p: 10536 case Intrinsic::ppc_altivec_vcmpequh_p: 10537 case Intrinsic::ppc_altivec_vcmpequw_p: 10538 case Intrinsic::ppc_altivec_vcmpequd_p: 10539 case Intrinsic::ppc_altivec_vcmpgefp_p: 10540 case Intrinsic::ppc_altivec_vcmpgtfp_p: 10541 case Intrinsic::ppc_altivec_vcmpgtsb_p: 10542 case Intrinsic::ppc_altivec_vcmpgtsh_p: 10543 case Intrinsic::ppc_altivec_vcmpgtsw_p: 10544 case Intrinsic::ppc_altivec_vcmpgtsd_p: 10545 case Intrinsic::ppc_altivec_vcmpgtub_p: 10546 case Intrinsic::ppc_altivec_vcmpgtuh_p: 10547 case Intrinsic::ppc_altivec_vcmpgtuw_p: 10548 case Intrinsic::ppc_altivec_vcmpgtud_p: 10549 KnownZero = ~1U; // All bits but the low one are known to be zero. 10550 break; 10551 } 10552 } 10553 } 10554 } 10555 10556 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 10557 switch (Subtarget.getDarwinDirective()) { 10558 default: break; 10559 case PPC::DIR_970: 10560 case PPC::DIR_PWR4: 10561 case PPC::DIR_PWR5: 10562 case PPC::DIR_PWR5X: 10563 case PPC::DIR_PWR6: 10564 case PPC::DIR_PWR6X: 10565 case PPC::DIR_PWR7: 10566 case PPC::DIR_PWR8: { 10567 if (!ML) 10568 break; 10569 10570 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 10571 10572 // For small loops (between 5 and 8 instructions), align to a 32-byte 10573 // boundary so that the entire loop fits in one instruction-cache line. 10574 uint64_t LoopSize = 0; 10575 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 10576 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) 10577 LoopSize += TII->GetInstSizeInBytes(J); 10578 10579 if (LoopSize > 16 && LoopSize <= 32) 10580 return 5; 10581 10582 break; 10583 } 10584 } 10585 10586 return TargetLowering::getPrefLoopAlignment(ML); 10587 } 10588 10589 /// getConstraintType - Given a constraint, return the type of 10590 /// constraint it is for this target. 10591 PPCTargetLowering::ConstraintType 10592 PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 10593 if (Constraint.size() == 1) { 10594 switch (Constraint[0]) { 10595 default: break; 10596 case 'b': 10597 case 'r': 10598 case 'f': 10599 case 'v': 10600 case 'y': 10601 return C_RegisterClass; 10602 case 'Z': 10603 // FIXME: While Z does indicate a memory constraint, it specifically 10604 // indicates an r+r address (used in conjunction with the 'y' modifier 10605 // in the replacement string). Currently, we're forcing the base 10606 // register to be r0 in the asm printer (which is interpreted as zero) 10607 // and forming the complete address in the second register. This is 10608 // suboptimal. 10609 return C_Memory; 10610 } 10611 } else if (Constraint == "wc") { // individual CR bits. 10612 return C_RegisterClass; 10613 } else if (Constraint == "wa" || Constraint == "wd" || 10614 Constraint == "wf" || Constraint == "ws") { 10615 return C_RegisterClass; // VSX registers. 10616 } 10617 return TargetLowering::getConstraintType(Constraint); 10618 } 10619 10620 /// Examine constraint type and operand type and determine a weight value. 10621 /// This object must already have been set up with the operand type 10622 /// and the current alternative constraint selected. 10623 TargetLowering::ConstraintWeight 10624 PPCTargetLowering::getSingleConstraintMatchWeight( 10625 AsmOperandInfo &info, const char *constraint) const { 10626 ConstraintWeight weight = CW_Invalid; 10627 Value *CallOperandVal = info.CallOperandVal; 10628 // If we don't have a value, we can't do a match, 10629 // but allow it at the lowest weight. 10630 if (!CallOperandVal) 10631 return CW_Default; 10632 Type *type = CallOperandVal->getType(); 10633 10634 // Look at the constraint type. 10635 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 10636 return CW_Register; // an individual CR bit. 10637 else if ((StringRef(constraint) == "wa" || 10638 StringRef(constraint) == "wd" || 10639 StringRef(constraint) == "wf") && 10640 type->isVectorTy()) 10641 return CW_Register; 10642 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 10643 return CW_Register; 10644 10645 switch (*constraint) { 10646 default: 10647 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 10648 break; 10649 case 'b': 10650 if (type->isIntegerTy()) 10651 weight = CW_Register; 10652 break; 10653 case 'f': 10654 if (type->isFloatTy()) 10655 weight = CW_Register; 10656 break; 10657 case 'd': 10658 if (type->isDoubleTy()) 10659 weight = CW_Register; 10660 break; 10661 case 'v': 10662 if (type->isVectorTy()) 10663 weight = CW_Register; 10664 break; 10665 case 'y': 10666 weight = CW_Register; 10667 break; 10668 case 'Z': 10669 weight = CW_Memory; 10670 break; 10671 } 10672 return weight; 10673 } 10674 10675 std::pair<unsigned, const TargetRegisterClass *> 10676 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 10677 const std::string &Constraint, 10678 MVT VT) const { 10679 if (Constraint.size() == 1) { 10680 // GCC RS6000 Constraint Letters 10681 switch (Constraint[0]) { 10682 case 'b': // R1-R31 10683 if (VT == MVT::i64 && Subtarget.isPPC64()) 10684 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 10685 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 10686 case 'r': // R0-R31 10687 if (VT == MVT::i64 && Subtarget.isPPC64()) 10688 return std::make_pair(0U, &PPC::G8RCRegClass); 10689 return std::make_pair(0U, &PPC::GPRCRegClass); 10690 case 'f': 10691 if (VT == MVT::f32 || VT == MVT::i32) 10692 return std::make_pair(0U, &PPC::F4RCRegClass); 10693 if (VT == MVT::f64 || VT == MVT::i64) 10694 return std::make_pair(0U, &PPC::F8RCRegClass); 10695 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 10696 return std::make_pair(0U, &PPC::QFRCRegClass); 10697 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 10698 return std::make_pair(0U, &PPC::QSRCRegClass); 10699 break; 10700 case 'v': 10701 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 10702 return std::make_pair(0U, &PPC::QFRCRegClass); 10703 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 10704 return std::make_pair(0U, &PPC::QSRCRegClass); 10705 return std::make_pair(0U, &PPC::VRRCRegClass); 10706 case 'y': // crrc 10707 return std::make_pair(0U, &PPC::CRRCRegClass); 10708 } 10709 } else if (Constraint == "wc") { // an individual CR bit. 10710 return std::make_pair(0U, &PPC::CRBITRCRegClass); 10711 } else if (Constraint == "wa" || Constraint == "wd" || 10712 Constraint == "wf") { 10713 return std::make_pair(0U, &PPC::VSRCRegClass); 10714 } else if (Constraint == "ws") { 10715 if (VT == MVT::f32) 10716 return std::make_pair(0U, &PPC::VSSRCRegClass); 10717 else 10718 return std::make_pair(0U, &PPC::VSFRCRegClass); 10719 } 10720 10721 std::pair<unsigned, const TargetRegisterClass *> R = 10722 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 10723 10724 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 10725 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 10726 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 10727 // register. 10728 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 10729 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 10730 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 10731 PPC::GPRCRegClass.contains(R.first)) 10732 return std::make_pair(TRI->getMatchingSuperReg(R.first, 10733 PPC::sub_32, &PPC::G8RCRegClass), 10734 &PPC::G8RCRegClass); 10735 10736 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 10737 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 10738 R.first = PPC::CR0; 10739 R.second = &PPC::CRRCRegClass; 10740 } 10741 10742 return R; 10743 } 10744 10745 10746 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 10747 /// vector. If it is invalid, don't add anything to Ops. 10748 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 10749 std::string &Constraint, 10750 std::vector<SDValue>&Ops, 10751 SelectionDAG &DAG) const { 10752 SDValue Result; 10753 10754 // Only support length 1 constraints. 10755 if (Constraint.length() > 1) return; 10756 10757 char Letter = Constraint[0]; 10758 switch (Letter) { 10759 default: break; 10760 case 'I': 10761 case 'J': 10762 case 'K': 10763 case 'L': 10764 case 'M': 10765 case 'N': 10766 case 'O': 10767 case 'P': { 10768 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 10769 if (!CST) return; // Must be an immediate to match. 10770 SDLoc dl(Op); 10771 int64_t Value = CST->getSExtValue(); 10772 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 10773 // numbers are printed as such. 10774 switch (Letter) { 10775 default: llvm_unreachable("Unknown constraint letter!"); 10776 case 'I': // "I" is a signed 16-bit constant. 10777 if (isInt<16>(Value)) 10778 Result = DAG.getTargetConstant(Value, dl, TCVT); 10779 break; 10780 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 10781 if (isShiftedUInt<16, 16>(Value)) 10782 Result = DAG.getTargetConstant(Value, dl, TCVT); 10783 break; 10784 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 10785 if (isShiftedInt<16, 16>(Value)) 10786 Result = DAG.getTargetConstant(Value, dl, TCVT); 10787 break; 10788 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 10789 if (isUInt<16>(Value)) 10790 Result = DAG.getTargetConstant(Value, dl, TCVT); 10791 break; 10792 case 'M': // "M" is a constant that is greater than 31. 10793 if (Value > 31) 10794 Result = DAG.getTargetConstant(Value, dl, TCVT); 10795 break; 10796 case 'N': // "N" is a positive constant that is an exact power of two. 10797 if (Value > 0 && isPowerOf2_64(Value)) 10798 Result = DAG.getTargetConstant(Value, dl, TCVT); 10799 break; 10800 case 'O': // "O" is the constant zero. 10801 if (Value == 0) 10802 Result = DAG.getTargetConstant(Value, dl, TCVT); 10803 break; 10804 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 10805 if (isInt<16>(-Value)) 10806 Result = DAG.getTargetConstant(Value, dl, TCVT); 10807 break; 10808 } 10809 break; 10810 } 10811 } 10812 10813 if (Result.getNode()) { 10814 Ops.push_back(Result); 10815 return; 10816 } 10817 10818 // Handle standard constraint letters. 10819 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 10820 } 10821 10822 // isLegalAddressingMode - Return true if the addressing mode represented 10823 // by AM is legal for this target, for a load/store of the specified type. 10824 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 10825 Type *Ty) const { 10826 // PPC does not allow r+i addressing modes for vectors! 10827 if (Ty->isVectorTy() && AM.BaseOffs != 0) 10828 return false; 10829 10830 // PPC allows a sign-extended 16-bit immediate field. 10831 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 10832 return false; 10833 10834 // No global is ever allowed as a base. 10835 if (AM.BaseGV) 10836 return false; 10837 10838 // PPC only support r+r, 10839 switch (AM.Scale) { 10840 case 0: // "r+i" or just "i", depending on HasBaseReg. 10841 break; 10842 case 1: 10843 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 10844 return false; 10845 // Otherwise we have r+r or r+i. 10846 break; 10847 case 2: 10848 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 10849 return false; 10850 // Allow 2*r as r+r. 10851 break; 10852 default: 10853 // No other scales are supported. 10854 return false; 10855 } 10856 10857 return true; 10858 } 10859 10860 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 10861 SelectionDAG &DAG) const { 10862 MachineFunction &MF = DAG.getMachineFunction(); 10863 MachineFrameInfo *MFI = MF.getFrameInfo(); 10864 MFI->setReturnAddressIsTaken(true); 10865 10866 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 10867 return SDValue(); 10868 10869 SDLoc dl(Op); 10870 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10871 10872 // Make sure the function does not optimize away the store of the RA to 10873 // the stack. 10874 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 10875 FuncInfo->setLRStoreRequired(); 10876 bool isPPC64 = Subtarget.isPPC64(); 10877 10878 if (Depth > 0) { 10879 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 10880 SDValue Offset = 10881 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 10882 isPPC64 ? MVT::i64 : MVT::i32); 10883 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 10884 DAG.getNode(ISD::ADD, dl, getPointerTy(), 10885 FrameAddr, Offset), 10886 MachinePointerInfo(), false, false, false, 0); 10887 } 10888 10889 // Just load the return address off the stack. 10890 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 10891 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 10892 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 10893 } 10894 10895 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 10896 SelectionDAG &DAG) const { 10897 SDLoc dl(Op); 10898 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10899 10900 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 10901 bool isPPC64 = PtrVT == MVT::i64; 10902 10903 MachineFunction &MF = DAG.getMachineFunction(); 10904 MachineFrameInfo *MFI = MF.getFrameInfo(); 10905 MFI->setFrameAddressIsTaken(true); 10906 10907 // Naked functions never have a frame pointer, and so we use r1. For all 10908 // other functions, this decision must be delayed until during PEI. 10909 unsigned FrameReg; 10910 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 10911 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 10912 else 10913 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 10914 10915 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 10916 PtrVT); 10917 while (Depth--) 10918 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 10919 FrameAddr, MachinePointerInfo(), false, false, 10920 false, 0); 10921 return FrameAddr; 10922 } 10923 10924 // FIXME? Maybe this could be a TableGen attribute on some registers and 10925 // this table could be generated automatically from RegInfo. 10926 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, 10927 EVT VT) const { 10928 bool isPPC64 = Subtarget.isPPC64(); 10929 bool isDarwinABI = Subtarget.isDarwinABI(); 10930 10931 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 10932 (!isPPC64 && VT != MVT::i32)) 10933 report_fatal_error("Invalid register global variable type"); 10934 10935 bool is64Bit = isPPC64 && VT == MVT::i64; 10936 unsigned Reg = StringSwitch<unsigned>(RegName) 10937 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 10938 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 10939 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 10940 (is64Bit ? PPC::X13 : PPC::R13)) 10941 .Default(0); 10942 10943 if (Reg) 10944 return Reg; 10945 report_fatal_error("Invalid register name global variable"); 10946 } 10947 10948 bool 10949 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 10950 // The PowerPC target isn't yet aware of offsets. 10951 return false; 10952 } 10953 10954 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 10955 const CallInst &I, 10956 unsigned Intrinsic) const { 10957 10958 switch (Intrinsic) { 10959 case Intrinsic::ppc_qpx_qvlfd: 10960 case Intrinsic::ppc_qpx_qvlfs: 10961 case Intrinsic::ppc_qpx_qvlfcd: 10962 case Intrinsic::ppc_qpx_qvlfcs: 10963 case Intrinsic::ppc_qpx_qvlfiwa: 10964 case Intrinsic::ppc_qpx_qvlfiwz: 10965 case Intrinsic::ppc_altivec_lvx: 10966 case Intrinsic::ppc_altivec_lvxl: 10967 case Intrinsic::ppc_altivec_lvebx: 10968 case Intrinsic::ppc_altivec_lvehx: 10969 case Intrinsic::ppc_altivec_lvewx: 10970 case Intrinsic::ppc_vsx_lxvd2x: 10971 case Intrinsic::ppc_vsx_lxvw4x: { 10972 EVT VT; 10973 switch (Intrinsic) { 10974 case Intrinsic::ppc_altivec_lvebx: 10975 VT = MVT::i8; 10976 break; 10977 case Intrinsic::ppc_altivec_lvehx: 10978 VT = MVT::i16; 10979 break; 10980 case Intrinsic::ppc_altivec_lvewx: 10981 VT = MVT::i32; 10982 break; 10983 case Intrinsic::ppc_vsx_lxvd2x: 10984 VT = MVT::v2f64; 10985 break; 10986 case Intrinsic::ppc_qpx_qvlfd: 10987 VT = MVT::v4f64; 10988 break; 10989 case Intrinsic::ppc_qpx_qvlfs: 10990 VT = MVT::v4f32; 10991 break; 10992 case Intrinsic::ppc_qpx_qvlfcd: 10993 VT = MVT::v2f64; 10994 break; 10995 case Intrinsic::ppc_qpx_qvlfcs: 10996 VT = MVT::v2f32; 10997 break; 10998 default: 10999 VT = MVT::v4i32; 11000 break; 11001 } 11002 11003 Info.opc = ISD::INTRINSIC_W_CHAIN; 11004 Info.memVT = VT; 11005 Info.ptrVal = I.getArgOperand(0); 11006 Info.offset = -VT.getStoreSize()+1; 11007 Info.size = 2*VT.getStoreSize()-1; 11008 Info.align = 1; 11009 Info.vol = false; 11010 Info.readMem = true; 11011 Info.writeMem = false; 11012 return true; 11013 } 11014 case Intrinsic::ppc_qpx_qvlfda: 11015 case Intrinsic::ppc_qpx_qvlfsa: 11016 case Intrinsic::ppc_qpx_qvlfcda: 11017 case Intrinsic::ppc_qpx_qvlfcsa: 11018 case Intrinsic::ppc_qpx_qvlfiwaa: 11019 case Intrinsic::ppc_qpx_qvlfiwza: { 11020 EVT VT; 11021 switch (Intrinsic) { 11022 case Intrinsic::ppc_qpx_qvlfda: 11023 VT = MVT::v4f64; 11024 break; 11025 case Intrinsic::ppc_qpx_qvlfsa: 11026 VT = MVT::v4f32; 11027 break; 11028 case Intrinsic::ppc_qpx_qvlfcda: 11029 VT = MVT::v2f64; 11030 break; 11031 case Intrinsic::ppc_qpx_qvlfcsa: 11032 VT = MVT::v2f32; 11033 break; 11034 default: 11035 VT = MVT::v4i32; 11036 break; 11037 } 11038 11039 Info.opc = ISD::INTRINSIC_W_CHAIN; 11040 Info.memVT = VT; 11041 Info.ptrVal = I.getArgOperand(0); 11042 Info.offset = 0; 11043 Info.size = VT.getStoreSize(); 11044 Info.align = 1; 11045 Info.vol = false; 11046 Info.readMem = true; 11047 Info.writeMem = false; 11048 return true; 11049 } 11050 case Intrinsic::ppc_qpx_qvstfd: 11051 case Intrinsic::ppc_qpx_qvstfs: 11052 case Intrinsic::ppc_qpx_qvstfcd: 11053 case Intrinsic::ppc_qpx_qvstfcs: 11054 case Intrinsic::ppc_qpx_qvstfiw: 11055 case Intrinsic::ppc_altivec_stvx: 11056 case Intrinsic::ppc_altivec_stvxl: 11057 case Intrinsic::ppc_altivec_stvebx: 11058 case Intrinsic::ppc_altivec_stvehx: 11059 case Intrinsic::ppc_altivec_stvewx: 11060 case Intrinsic::ppc_vsx_stxvd2x: 11061 case Intrinsic::ppc_vsx_stxvw4x: { 11062 EVT VT; 11063 switch (Intrinsic) { 11064 case Intrinsic::ppc_altivec_stvebx: 11065 VT = MVT::i8; 11066 break; 11067 case Intrinsic::ppc_altivec_stvehx: 11068 VT = MVT::i16; 11069 break; 11070 case Intrinsic::ppc_altivec_stvewx: 11071 VT = MVT::i32; 11072 break; 11073 case Intrinsic::ppc_vsx_stxvd2x: 11074 VT = MVT::v2f64; 11075 break; 11076 case Intrinsic::ppc_qpx_qvstfd: 11077 VT = MVT::v4f64; 11078 break; 11079 case Intrinsic::ppc_qpx_qvstfs: 11080 VT = MVT::v4f32; 11081 break; 11082 case Intrinsic::ppc_qpx_qvstfcd: 11083 VT = MVT::v2f64; 11084 break; 11085 case Intrinsic::ppc_qpx_qvstfcs: 11086 VT = MVT::v2f32; 11087 break; 11088 default: 11089 VT = MVT::v4i32; 11090 break; 11091 } 11092 11093 Info.opc = ISD::INTRINSIC_VOID; 11094 Info.memVT = VT; 11095 Info.ptrVal = I.getArgOperand(1); 11096 Info.offset = -VT.getStoreSize()+1; 11097 Info.size = 2*VT.getStoreSize()-1; 11098 Info.align = 1; 11099 Info.vol = false; 11100 Info.readMem = false; 11101 Info.writeMem = true; 11102 return true; 11103 } 11104 case Intrinsic::ppc_qpx_qvstfda: 11105 case Intrinsic::ppc_qpx_qvstfsa: 11106 case Intrinsic::ppc_qpx_qvstfcda: 11107 case Intrinsic::ppc_qpx_qvstfcsa: 11108 case Intrinsic::ppc_qpx_qvstfiwa: { 11109 EVT VT; 11110 switch (Intrinsic) { 11111 case Intrinsic::ppc_qpx_qvstfda: 11112 VT = MVT::v4f64; 11113 break; 11114 case Intrinsic::ppc_qpx_qvstfsa: 11115 VT = MVT::v4f32; 11116 break; 11117 case Intrinsic::ppc_qpx_qvstfcda: 11118 VT = MVT::v2f64; 11119 break; 11120 case Intrinsic::ppc_qpx_qvstfcsa: 11121 VT = MVT::v2f32; 11122 break; 11123 default: 11124 VT = MVT::v4i32; 11125 break; 11126 } 11127 11128 Info.opc = ISD::INTRINSIC_VOID; 11129 Info.memVT = VT; 11130 Info.ptrVal = I.getArgOperand(1); 11131 Info.offset = 0; 11132 Info.size = VT.getStoreSize(); 11133 Info.align = 1; 11134 Info.vol = false; 11135 Info.readMem = false; 11136 Info.writeMem = true; 11137 return true; 11138 } 11139 default: 11140 break; 11141 } 11142 11143 return false; 11144 } 11145 11146 /// getOptimalMemOpType - Returns the target specific optimal type for load 11147 /// and store operations as a result of memset, memcpy, and memmove 11148 /// lowering. If DstAlign is zero that means it's safe to destination 11149 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 11150 /// means there isn't a need to check it against alignment requirement, 11151 /// probably because the source does not need to be loaded. If 'IsMemset' is 11152 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 11153 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 11154 /// source is constant so it does not need to be loaded. 11155 /// It returns EVT::Other if the type should be determined using generic 11156 /// target-independent logic. 11157 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 11158 unsigned DstAlign, unsigned SrcAlign, 11159 bool IsMemset, bool ZeroMemset, 11160 bool MemcpyStrSrc, 11161 MachineFunction &MF) const { 11162 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 11163 const Function *F = MF.getFunction(); 11164 // When expanding a memset, require at least two QPX instructions to cover 11165 // the cost of loading the value to be stored from the constant pool. 11166 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 11167 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 11168 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 11169 return MVT::v4f64; 11170 } 11171 11172 // We should use Altivec/VSX loads and stores when available. For unaligned 11173 // addresses, unaligned VSX loads are only fast starting with the P8. 11174 if (Subtarget.hasAltivec() && Size >= 16 && 11175 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 11176 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 11177 return MVT::v4i32; 11178 } 11179 11180 if (Subtarget.isPPC64()) { 11181 return MVT::i64; 11182 } 11183 11184 return MVT::i32; 11185 } 11186 11187 /// \brief Returns true if it is beneficial to convert a load of a constant 11188 /// to just the constant itself. 11189 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 11190 Type *Ty) const { 11191 assert(Ty->isIntegerTy()); 11192 11193 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 11194 if (BitSize == 0 || BitSize > 64) 11195 return false; 11196 return true; 11197 } 11198 11199 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11200 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11201 return false; 11202 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11203 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11204 return NumBits1 == 64 && NumBits2 == 32; 11205 } 11206 11207 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11208 if (!VT1.isInteger() || !VT2.isInteger()) 11209 return false; 11210 unsigned NumBits1 = VT1.getSizeInBits(); 11211 unsigned NumBits2 = VT2.getSizeInBits(); 11212 return NumBits1 == 64 && NumBits2 == 32; 11213 } 11214 11215 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 11216 // Generally speaking, zexts are not free, but they are free when they can be 11217 // folded with other operations. 11218 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 11219 EVT MemVT = LD->getMemoryVT(); 11220 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 11221 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 11222 (LD->getExtensionType() == ISD::NON_EXTLOAD || 11223 LD->getExtensionType() == ISD::ZEXTLOAD)) 11224 return true; 11225 } 11226 11227 // FIXME: Add other cases... 11228 // - 32-bit shifts with a zext to i64 11229 // - zext after ctlz, bswap, etc. 11230 // - zext after and by a constant mask 11231 11232 return TargetLowering::isZExtFree(Val, VT2); 11233 } 11234 11235 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 11236 assert(VT.isFloatingPoint()); 11237 return true; 11238 } 11239 11240 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11241 return isInt<16>(Imm) || isUInt<16>(Imm); 11242 } 11243 11244 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11245 return isInt<16>(Imm) || isUInt<16>(Imm); 11246 } 11247 11248 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 11249 unsigned, 11250 unsigned, 11251 bool *Fast) const { 11252 if (DisablePPCUnaligned) 11253 return false; 11254 11255 // PowerPC supports unaligned memory access for simple non-vector types. 11256 // Although accessing unaligned addresses is not as efficient as accessing 11257 // aligned addresses, it is generally more efficient than manual expansion, 11258 // and generally only traps for software emulation when crossing page 11259 // boundaries. 11260 11261 if (!VT.isSimple()) 11262 return false; 11263 11264 if (VT.getSimpleVT().isVector()) { 11265 if (Subtarget.hasVSX()) { 11266 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 11267 VT != MVT::v4f32 && VT != MVT::v4i32) 11268 return false; 11269 } else { 11270 return false; 11271 } 11272 } 11273 11274 if (VT == MVT::ppcf128) 11275 return false; 11276 11277 if (Fast) 11278 *Fast = true; 11279 11280 return true; 11281 } 11282 11283 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 11284 VT = VT.getScalarType(); 11285 11286 if (!VT.isSimple()) 11287 return false; 11288 11289 switch (VT.getSimpleVT().SimpleTy) { 11290 case MVT::f32: 11291 case MVT::f64: 11292 return true; 11293 default: 11294 break; 11295 } 11296 11297 return false; 11298 } 11299 11300 const MCPhysReg * 11301 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 11302 // LR is a callee-save register, but we must treat it as clobbered by any call 11303 // site. Hence we include LR in the scratch registers, which are in turn added 11304 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 11305 // to CTR, which is used by any indirect call. 11306 static const MCPhysReg ScratchRegs[] = { 11307 PPC::X12, PPC::LR8, PPC::CTR8, 0 11308 }; 11309 11310 return ScratchRegs; 11311 } 11312 11313 bool 11314 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 11315 EVT VT , unsigned DefinedValues) const { 11316 if (VT == MVT::v2i64) 11317 return false; 11318 11319 if (Subtarget.hasQPX()) { 11320 if (VT == MVT::v4f32 || VT == MVT::v4f64 || VT == MVT::v4i1) 11321 return true; 11322 } 11323 11324 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 11325 } 11326 11327 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 11328 if (DisableILPPref || Subtarget.enableMachineScheduler()) 11329 return TargetLowering::getSchedulingPreference(N); 11330 11331 return Sched::ILP; 11332 } 11333 11334 // Create a fast isel object. 11335 FastISel * 11336 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 11337 const TargetLibraryInfo *LibInfo) const { 11338 return PPC::createFastISel(FuncInfo, LibInfo); 11339 } 11340