1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCMachineFunctionInfo.h" 17 #include "PPCPerfectShuffle.h" 18 #include "PPCTargetMachine.h" 19 #include "PPCTargetObjectFile.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/StringSwitch.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineLoopInfo.h" 28 #include "llvm/CodeGen/MachineRegisterInfo.h" 29 #include "llvm/CodeGen/SelectionDAG.h" 30 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 31 #include "llvm/IR/CallingConv.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DerivedTypes.h" 34 #include "llvm/IR/Function.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include "llvm/Target/TargetOptions.h" 41 using namespace llvm; 42 43 // FIXME: Remove this once soft-float is supported. 44 static cl::opt<bool> DisablePPCFloatInVariadic("disable-ppc-float-in-variadic", 45 cl::desc("disable saving float registers for va_start on PPC"), cl::Hidden); 46 47 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 48 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 49 50 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 51 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 52 53 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 54 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 55 56 // FIXME: Remove this once the bug has been fixed! 57 extern cl::opt<bool> ANDIGlueBug; 58 59 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM) 60 : TargetLowering(TM), 61 Subtarget(*TM.getSubtargetImpl()) { 62 // Use _setjmp/_longjmp instead of setjmp/longjmp. 63 setUseUnderscoreSetJmp(true); 64 setUseUnderscoreLongJmp(true); 65 66 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 67 // arguments are at least 4/8 bytes aligned. 68 bool isPPC64 = Subtarget.isPPC64(); 69 setMinStackArgumentAlignment(isPPC64 ? 8:4); 70 71 // Set up the register classes. 72 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 73 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 74 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 75 76 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 77 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 78 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 79 80 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 81 82 // PowerPC has pre-inc load and store's. 83 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 84 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 85 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 86 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 88 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 89 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 90 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 91 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 92 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 93 94 if (Subtarget.useCRBits()) { 95 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 96 97 if (isPPC64 || Subtarget.hasFPCVT()) { 98 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 99 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 100 isPPC64 ? MVT::i64 : MVT::i32); 101 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 102 AddPromotedToType (ISD::UINT_TO_FP, MVT::i1, 103 isPPC64 ? MVT::i64 : MVT::i32); 104 } else { 105 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 106 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 107 } 108 109 // PowerPC does not support direct load / store of condition registers 110 setOperationAction(ISD::LOAD, MVT::i1, Custom); 111 setOperationAction(ISD::STORE, MVT::i1, Custom); 112 113 // FIXME: Remove this once the ANDI glue bug is fixed: 114 if (ANDIGlueBug) 115 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 116 117 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 118 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 119 setTruncStoreAction(MVT::i64, MVT::i1, Expand); 120 setTruncStoreAction(MVT::i32, MVT::i1, Expand); 121 setTruncStoreAction(MVT::i16, MVT::i1, Expand); 122 setTruncStoreAction(MVT::i8, MVT::i1, Expand); 123 124 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 125 } 126 127 // This is used in the ppcf128->int sequence. Note it has different semantics 128 // from FP_ROUND: that rounds to nearest, this rounds to zero. 129 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 130 131 // We do not currently implement these libm ops for PowerPC. 132 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 133 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 134 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 135 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 136 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 137 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 138 139 // PowerPC has no SREM/UREM instructions 140 setOperationAction(ISD::SREM, MVT::i32, Expand); 141 setOperationAction(ISD::UREM, MVT::i32, Expand); 142 setOperationAction(ISD::SREM, MVT::i64, Expand); 143 setOperationAction(ISD::UREM, MVT::i64, Expand); 144 145 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 146 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 147 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 148 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 149 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 150 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 151 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 152 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 153 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 154 155 // We don't support sin/cos/sqrt/fmod/pow 156 setOperationAction(ISD::FSIN , MVT::f64, Expand); 157 setOperationAction(ISD::FCOS , MVT::f64, Expand); 158 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 159 setOperationAction(ISD::FREM , MVT::f64, Expand); 160 setOperationAction(ISD::FPOW , MVT::f64, Expand); 161 setOperationAction(ISD::FMA , MVT::f64, Legal); 162 setOperationAction(ISD::FSIN , MVT::f32, Expand); 163 setOperationAction(ISD::FCOS , MVT::f32, Expand); 164 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 165 setOperationAction(ISD::FREM , MVT::f32, Expand); 166 setOperationAction(ISD::FPOW , MVT::f32, Expand); 167 setOperationAction(ISD::FMA , MVT::f32, Legal); 168 169 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 170 171 // If we're enabling GP optimizations, use hardware square root 172 if (!Subtarget.hasFSQRT() && 173 !(TM.Options.UnsafeFPMath && 174 Subtarget.hasFRSQRTE() && Subtarget.hasFRE())) 175 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 176 177 if (!Subtarget.hasFSQRT() && 178 !(TM.Options.UnsafeFPMath && 179 Subtarget.hasFRSQRTES() && Subtarget.hasFRES())) 180 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 181 182 if (Subtarget.hasFCPSGN()) { 183 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 184 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 185 } else { 186 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 187 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 188 } 189 190 if (Subtarget.hasFPRND()) { 191 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 192 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 193 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 194 setOperationAction(ISD::FROUND, MVT::f64, Legal); 195 196 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 197 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 198 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 199 setOperationAction(ISD::FROUND, MVT::f32, Legal); 200 } 201 202 // PowerPC does not have BSWAP, CTPOP or CTTZ 203 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 204 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 205 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 206 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 207 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 208 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 209 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 210 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 211 212 if (Subtarget.hasPOPCNTD()) { 213 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 214 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 215 } else { 216 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 217 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 218 } 219 220 // PowerPC does not have ROTR 221 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 222 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 223 224 if (!Subtarget.useCRBits()) { 225 // PowerPC does not have Select 226 setOperationAction(ISD::SELECT, MVT::i32, Expand); 227 setOperationAction(ISD::SELECT, MVT::i64, Expand); 228 setOperationAction(ISD::SELECT, MVT::f32, Expand); 229 setOperationAction(ISD::SELECT, MVT::f64, Expand); 230 } 231 232 // PowerPC wants to turn select_cc of FP into fsel when possible. 233 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 234 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 235 236 // PowerPC wants to optimize integer setcc a bit 237 if (!Subtarget.useCRBits()) 238 setOperationAction(ISD::SETCC, MVT::i32, Custom); 239 240 // PowerPC does not have BRCOND which requires SetCC 241 if (!Subtarget.useCRBits()) 242 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 243 244 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 245 246 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 247 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 248 249 // PowerPC does not have [U|S]INT_TO_FP 250 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 251 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 252 253 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 254 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 255 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 256 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 257 258 // We cannot sextinreg(i1). Expand to shifts. 259 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 260 261 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 262 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 263 // support continuation, user-level threading, and etc.. As a result, no 264 // other SjLj exception interfaces are implemented and please don't build 265 // your own exception handling based on them. 266 // LLVM/Clang supports zero-cost DWARF exception handling. 267 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 268 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 269 270 // We want to legalize GlobalAddress and ConstantPool nodes into the 271 // appropriate instructions to materialize the address. 272 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 273 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 274 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 275 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 276 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 277 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 278 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 279 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 280 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 281 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 282 283 // TRAP is legal. 284 setOperationAction(ISD::TRAP, MVT::Other, Legal); 285 286 // TRAMPOLINE is custom lowered. 287 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 288 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 289 290 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 291 setOperationAction(ISD::VASTART , MVT::Other, Custom); 292 293 if (Subtarget.isSVR4ABI()) { 294 if (isPPC64) { 295 // VAARG always uses double-word chunks, so promote anything smaller. 296 setOperationAction(ISD::VAARG, MVT::i1, Promote); 297 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 298 setOperationAction(ISD::VAARG, MVT::i8, Promote); 299 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 300 setOperationAction(ISD::VAARG, MVT::i16, Promote); 301 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 302 setOperationAction(ISD::VAARG, MVT::i32, Promote); 303 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 304 setOperationAction(ISD::VAARG, MVT::Other, Expand); 305 } else { 306 // VAARG is custom lowered with the 32-bit SVR4 ABI. 307 setOperationAction(ISD::VAARG, MVT::Other, Custom); 308 setOperationAction(ISD::VAARG, MVT::i64, Custom); 309 } 310 } else 311 setOperationAction(ISD::VAARG, MVT::Other, Expand); 312 313 if (Subtarget.isSVR4ABI() && !isPPC64) 314 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 315 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 316 else 317 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 318 319 // Use the default implementation. 320 setOperationAction(ISD::VAEND , MVT::Other, Expand); 321 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 322 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 323 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 324 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 325 326 // We want to custom lower some of our intrinsics. 327 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 328 329 // To handle counter-based loop conditions. 330 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 331 332 // Comparisons that require checking two conditions. 333 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 334 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 335 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 336 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 337 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 338 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 339 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 340 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 341 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 342 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 343 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 344 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 345 346 if (Subtarget.has64BitSupport()) { 347 // They also have instructions for converting between i64 and fp. 348 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 349 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 350 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 351 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 352 // This is just the low 32 bits of a (signed) fp->i64 conversion. 353 // We cannot do this with Promote because i64 is not a legal type. 354 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 355 356 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 357 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 358 } else { 359 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 360 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 361 } 362 363 // With the instructions enabled under FPCVT, we can do everything. 364 if (Subtarget.hasFPCVT()) { 365 if (Subtarget.has64BitSupport()) { 366 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 367 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 368 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 369 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 370 } 371 372 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 373 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 374 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 375 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 376 } 377 378 if (Subtarget.use64BitRegs()) { 379 // 64-bit PowerPC implementations can support i64 types directly 380 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 381 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 382 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 383 // 64-bit PowerPC wants to expand i128 shifts itself. 384 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 385 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 386 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 387 } else { 388 // 32-bit PowerPC wants to expand i64 shifts itself. 389 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 390 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 391 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 392 } 393 394 if (Subtarget.hasAltivec()) { 395 // First set operation action for all vector types to expand. Then we 396 // will selectively turn on ones that can be effectively codegen'd. 397 for (MVT VT : MVT::vector_valuetypes()) { 398 // add/sub are legal for all supported vector VT's. 399 setOperationAction(ISD::ADD , VT, Legal); 400 setOperationAction(ISD::SUB , VT, Legal); 401 402 // We promote all shuffles to v16i8. 403 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 404 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 405 406 // We promote all non-typed operations to v4i32. 407 setOperationAction(ISD::AND , VT, Promote); 408 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 409 setOperationAction(ISD::OR , VT, Promote); 410 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 411 setOperationAction(ISD::XOR , VT, Promote); 412 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 413 setOperationAction(ISD::LOAD , VT, Promote); 414 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 415 setOperationAction(ISD::SELECT, VT, Promote); 416 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 417 setOperationAction(ISD::STORE, VT, Promote); 418 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 419 420 // No other operations are legal. 421 setOperationAction(ISD::MUL , VT, Expand); 422 setOperationAction(ISD::SDIV, VT, Expand); 423 setOperationAction(ISD::SREM, VT, Expand); 424 setOperationAction(ISD::UDIV, VT, Expand); 425 setOperationAction(ISD::UREM, VT, Expand); 426 setOperationAction(ISD::FDIV, VT, Expand); 427 setOperationAction(ISD::FREM, VT, Expand); 428 setOperationAction(ISD::FNEG, VT, Expand); 429 setOperationAction(ISD::FSQRT, VT, Expand); 430 setOperationAction(ISD::FLOG, VT, Expand); 431 setOperationAction(ISD::FLOG10, VT, Expand); 432 setOperationAction(ISD::FLOG2, VT, Expand); 433 setOperationAction(ISD::FEXP, VT, Expand); 434 setOperationAction(ISD::FEXP2, VT, Expand); 435 setOperationAction(ISD::FSIN, VT, Expand); 436 setOperationAction(ISD::FCOS, VT, Expand); 437 setOperationAction(ISD::FABS, VT, Expand); 438 setOperationAction(ISD::FPOWI, VT, Expand); 439 setOperationAction(ISD::FFLOOR, VT, Expand); 440 setOperationAction(ISD::FCEIL, VT, Expand); 441 setOperationAction(ISD::FTRUNC, VT, Expand); 442 setOperationAction(ISD::FRINT, VT, Expand); 443 setOperationAction(ISD::FNEARBYINT, VT, Expand); 444 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 445 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 446 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 447 setOperationAction(ISD::MULHU, VT, Expand); 448 setOperationAction(ISD::MULHS, VT, Expand); 449 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 450 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 451 setOperationAction(ISD::UDIVREM, VT, Expand); 452 setOperationAction(ISD::SDIVREM, VT, Expand); 453 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 454 setOperationAction(ISD::FPOW, VT, Expand); 455 setOperationAction(ISD::BSWAP, VT, Expand); 456 setOperationAction(ISD::CTPOP, VT, Expand); 457 setOperationAction(ISD::CTLZ, VT, Expand); 458 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 459 setOperationAction(ISD::CTTZ, VT, Expand); 460 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 461 setOperationAction(ISD::VSELECT, VT, Expand); 462 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 463 464 for (MVT InnerVT : MVT::vector_valuetypes()) 465 setTruncStoreAction(VT, InnerVT, Expand); 466 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 467 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 468 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 469 } 470 471 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 472 // with merges, splats, etc. 473 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 474 475 setOperationAction(ISD::AND , MVT::v4i32, Legal); 476 setOperationAction(ISD::OR , MVT::v4i32, Legal); 477 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 478 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 479 setOperationAction(ISD::SELECT, MVT::v4i32, 480 Subtarget.useCRBits() ? Legal : Expand); 481 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 482 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 483 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 484 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 485 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 486 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 487 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 488 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 489 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 490 491 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 492 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 493 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 494 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 495 496 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 497 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 498 499 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 500 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 501 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 502 } 503 504 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 505 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 506 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 507 508 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 509 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 510 511 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 512 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 513 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 514 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 515 516 // Altivec does not contain unordered floating-point compare instructions 517 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 518 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 519 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 520 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 521 522 if (Subtarget.hasVSX()) { 523 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 524 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 525 526 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 527 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 528 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 529 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 530 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 531 532 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 533 534 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 535 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 536 537 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 538 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 539 540 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 541 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 542 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 543 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 544 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 545 546 // Share the Altivec comparison restrictions. 547 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 548 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 549 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 550 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 551 552 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 553 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 554 555 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 556 557 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 558 559 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 560 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 561 562 // VSX v2i64 only supports non-arithmetic operations. 563 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 564 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 565 566 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 567 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 568 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 569 570 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 571 572 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 573 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 574 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 575 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 576 577 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 578 579 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 580 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 581 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 582 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 583 584 // Vector operation legalization checks the result type of 585 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 586 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 587 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 588 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 589 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 590 591 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 592 } 593 } 594 595 if (Subtarget.has64BitSupport()) 596 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 597 598 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 599 600 if (!isPPC64) { 601 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 602 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 603 } 604 605 setBooleanContents(ZeroOrOneBooleanContent); 606 // Altivec instructions set fields to all zeros or all ones. 607 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 608 609 if (!isPPC64) { 610 // These libcalls are not available in 32-bit. 611 setLibcallName(RTLIB::SHL_I128, nullptr); 612 setLibcallName(RTLIB::SRL_I128, nullptr); 613 setLibcallName(RTLIB::SRA_I128, nullptr); 614 } 615 616 if (isPPC64) { 617 setStackPointerRegisterToSaveRestore(PPC::X1); 618 setExceptionPointerRegister(PPC::X3); 619 setExceptionSelectorRegister(PPC::X4); 620 } else { 621 setStackPointerRegisterToSaveRestore(PPC::R1); 622 setExceptionPointerRegister(PPC::R3); 623 setExceptionSelectorRegister(PPC::R4); 624 } 625 626 // We have target-specific dag combine patterns for the following nodes: 627 setTargetDAGCombine(ISD::SINT_TO_FP); 628 if (Subtarget.hasFPCVT()) 629 setTargetDAGCombine(ISD::UINT_TO_FP); 630 setTargetDAGCombine(ISD::LOAD); 631 setTargetDAGCombine(ISD::STORE); 632 setTargetDAGCombine(ISD::BR_CC); 633 if (Subtarget.useCRBits()) 634 setTargetDAGCombine(ISD::BRCOND); 635 setTargetDAGCombine(ISD::BSWAP); 636 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 637 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 638 setTargetDAGCombine(ISD::INTRINSIC_VOID); 639 640 setTargetDAGCombine(ISD::SIGN_EXTEND); 641 setTargetDAGCombine(ISD::ZERO_EXTEND); 642 setTargetDAGCombine(ISD::ANY_EXTEND); 643 644 if (Subtarget.useCRBits()) { 645 setTargetDAGCombine(ISD::TRUNCATE); 646 setTargetDAGCombine(ISD::SETCC); 647 setTargetDAGCombine(ISD::SELECT_CC); 648 } 649 650 // Use reciprocal estimates. 651 if (TM.Options.UnsafeFPMath) { 652 setTargetDAGCombine(ISD::FDIV); 653 setTargetDAGCombine(ISD::FSQRT); 654 } 655 656 // Darwin long double math library functions have $LDBL128 appended. 657 if (Subtarget.isDarwin()) { 658 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 659 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 660 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 661 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 662 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 663 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 664 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 665 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 666 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 667 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 668 } 669 670 // With 32 condition bits, we don't need to sink (and duplicate) compares 671 // aggressively in CodeGenPrep. 672 if (Subtarget.useCRBits()) 673 setHasMultipleConditionRegisters(); 674 675 setMinFunctionAlignment(2); 676 if (Subtarget.isDarwin()) 677 setPrefFunctionAlignment(4); 678 679 switch (Subtarget.getDarwinDirective()) { 680 default: break; 681 case PPC::DIR_970: 682 case PPC::DIR_A2: 683 case PPC::DIR_E500mc: 684 case PPC::DIR_E5500: 685 case PPC::DIR_PWR4: 686 case PPC::DIR_PWR5: 687 case PPC::DIR_PWR5X: 688 case PPC::DIR_PWR6: 689 case PPC::DIR_PWR6X: 690 case PPC::DIR_PWR7: 691 case PPC::DIR_PWR8: 692 setPrefFunctionAlignment(4); 693 setPrefLoopAlignment(4); 694 break; 695 } 696 697 setInsertFencesForAtomic(true); 698 699 if (Subtarget.enableMachineScheduler()) 700 setSchedulingPreference(Sched::Source); 701 else 702 setSchedulingPreference(Sched::Hybrid); 703 704 computeRegisterProperties(); 705 706 // The Freescale cores do better with aggressive inlining of memcpy and 707 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 708 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 709 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 710 MaxStoresPerMemset = 32; 711 MaxStoresPerMemsetOptSize = 16; 712 MaxStoresPerMemcpy = 32; 713 MaxStoresPerMemcpyOptSize = 8; 714 MaxStoresPerMemmove = 32; 715 MaxStoresPerMemmoveOptSize = 8; 716 } 717 } 718 719 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 720 /// the desired ByVal argument alignment. 721 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 722 unsigned MaxMaxAlign) { 723 if (MaxAlign == MaxMaxAlign) 724 return; 725 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 726 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 727 MaxAlign = 32; 728 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 729 MaxAlign = 16; 730 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 731 unsigned EltAlign = 0; 732 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 733 if (EltAlign > MaxAlign) 734 MaxAlign = EltAlign; 735 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 736 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 737 unsigned EltAlign = 0; 738 getMaxByValAlign(STy->getElementType(i), EltAlign, MaxMaxAlign); 739 if (EltAlign > MaxAlign) 740 MaxAlign = EltAlign; 741 if (MaxAlign == MaxMaxAlign) 742 break; 743 } 744 } 745 } 746 747 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 748 /// function arguments in the caller parameter area. 749 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { 750 // Darwin passes everything on 4 byte boundary. 751 if (Subtarget.isDarwin()) 752 return 4; 753 754 // 16byte and wider vectors are passed on 16byte boundary. 755 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 756 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 757 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 758 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 759 return Align; 760 } 761 762 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 763 switch (Opcode) { 764 default: return nullptr; 765 case PPCISD::FSEL: return "PPCISD::FSEL"; 766 case PPCISD::FCFID: return "PPCISD::FCFID"; 767 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 768 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 769 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 770 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 771 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 772 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 773 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 774 case PPCISD::FRE: return "PPCISD::FRE"; 775 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 776 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 777 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 778 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 779 case PPCISD::VPERM: return "PPCISD::VPERM"; 780 case PPCISD::CMPB: return "PPCISD::CMPB"; 781 case PPCISD::Hi: return "PPCISD::Hi"; 782 case PPCISD::Lo: return "PPCISD::Lo"; 783 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 784 case PPCISD::LOAD: return "PPCISD::LOAD"; 785 case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; 786 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 787 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 788 case PPCISD::SRL: return "PPCISD::SRL"; 789 case PPCISD::SRA: return "PPCISD::SRA"; 790 case PPCISD::SHL: return "PPCISD::SHL"; 791 case PPCISD::CALL: return "PPCISD::CALL"; 792 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 793 case PPCISD::CALL_TLS: return "PPCISD::CALL_TLS"; 794 case PPCISD::CALL_NOP_TLS: return "PPCISD::CALL_NOP_TLS"; 795 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 796 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 797 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 798 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 799 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 800 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 801 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 802 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 803 case PPCISD::VCMP: return "PPCISD::VCMP"; 804 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 805 case PPCISD::LBRX: return "PPCISD::LBRX"; 806 case PPCISD::STBRX: return "PPCISD::STBRX"; 807 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 808 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 809 case PPCISD::LARX: return "PPCISD::LARX"; 810 case PPCISD::STCX: return "PPCISD::STCX"; 811 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 812 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 813 case PPCISD::BDZ: return "PPCISD::BDZ"; 814 case PPCISD::MFFS: return "PPCISD::MFFS"; 815 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 816 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 817 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 818 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 819 case PPCISD::ADDIS_TOC_HA: return "PPCISD::ADDIS_TOC_HA"; 820 case PPCISD::LD_TOC_L: return "PPCISD::LD_TOC_L"; 821 case PPCISD::ADDI_TOC_L: return "PPCISD::ADDI_TOC_L"; 822 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 823 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 824 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 825 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 826 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 827 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 828 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 829 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 830 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 831 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 832 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 833 case PPCISD::SC: return "PPCISD::SC"; 834 } 835 } 836 837 EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 838 if (!VT.isVector()) 839 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 840 return VT.changeVectorElementTypeToInteger(); 841 } 842 843 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 844 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 845 return true; 846 } 847 848 //===----------------------------------------------------------------------===// 849 // Node matching predicates, for use by the tblgen matching code. 850 //===----------------------------------------------------------------------===// 851 852 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 853 static bool isFloatingPointZero(SDValue Op) { 854 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 855 return CFP->getValueAPF().isZero(); 856 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 857 // Maybe this has already been legalized into the constant pool? 858 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 859 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 860 return CFP->getValueAPF().isZero(); 861 } 862 return false; 863 } 864 865 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 866 /// true if Op is undef or if it matches the specified value. 867 static bool isConstantOrUndef(int Op, int Val) { 868 return Op < 0 || Op == Val; 869 } 870 871 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 872 /// VPKUHUM instruction. 873 /// The ShuffleKind distinguishes between big-endian operations with 874 /// two different inputs (0), either-endian operations with two identical 875 /// inputs (1), and little-endian operantion with two different inputs (2). 876 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 877 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 878 SelectionDAG &DAG) { 879 bool IsLE = DAG.getSubtarget().getDataLayout()->isLittleEndian(); 880 if (ShuffleKind == 0) { 881 if (IsLE) 882 return false; 883 for (unsigned i = 0; i != 16; ++i) 884 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 885 return false; 886 } else if (ShuffleKind == 2) { 887 if (!IsLE) 888 return false; 889 for (unsigned i = 0; i != 16; ++i) 890 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 891 return false; 892 } else if (ShuffleKind == 1) { 893 unsigned j = IsLE ? 0 : 1; 894 for (unsigned i = 0; i != 8; ++i) 895 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 896 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 897 return false; 898 } 899 return true; 900 } 901 902 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 903 /// VPKUWUM instruction. 904 /// The ShuffleKind distinguishes between big-endian operations with 905 /// two different inputs (0), either-endian operations with two identical 906 /// inputs (1), and little-endian operantion with two different inputs (2). 907 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 908 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 909 SelectionDAG &DAG) { 910 bool IsLE = DAG.getSubtarget().getDataLayout()->isLittleEndian(); 911 if (ShuffleKind == 0) { 912 if (IsLE) 913 return false; 914 for (unsigned i = 0; i != 16; i += 2) 915 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 916 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 917 return false; 918 } else if (ShuffleKind == 2) { 919 if (!IsLE) 920 return false; 921 for (unsigned i = 0; i != 16; i += 2) 922 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 923 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 924 return false; 925 } else if (ShuffleKind == 1) { 926 unsigned j = IsLE ? 0 : 2; 927 for (unsigned i = 0; i != 8; i += 2) 928 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 929 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 930 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 931 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 932 return false; 933 } 934 return true; 935 } 936 937 /// isVMerge - Common function, used to match vmrg* shuffles. 938 /// 939 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 940 unsigned LHSStart, unsigned RHSStart) { 941 if (N->getValueType(0) != MVT::v16i8) 942 return false; 943 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 944 "Unsupported merge size!"); 945 946 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 947 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 948 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 949 LHSStart+j+i*UnitSize) || 950 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 951 RHSStart+j+i*UnitSize)) 952 return false; 953 } 954 return true; 955 } 956 957 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 958 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 959 /// The ShuffleKind distinguishes between big-endian merges with two 960 /// different inputs (0), either-endian merges with two identical inputs (1), 961 /// and little-endian merges with two different inputs (2). For the latter, 962 /// the input operands are swapped (see PPCInstrAltivec.td). 963 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 964 unsigned ShuffleKind, SelectionDAG &DAG) { 965 if (DAG.getSubtarget().getDataLayout()->isLittleEndian()) { 966 if (ShuffleKind == 1) // unary 967 return isVMerge(N, UnitSize, 0, 0); 968 else if (ShuffleKind == 2) // swapped 969 return isVMerge(N, UnitSize, 0, 16); 970 else 971 return false; 972 } else { 973 if (ShuffleKind == 1) // unary 974 return isVMerge(N, UnitSize, 8, 8); 975 else if (ShuffleKind == 0) // normal 976 return isVMerge(N, UnitSize, 8, 24); 977 else 978 return false; 979 } 980 } 981 982 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 983 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 984 /// The ShuffleKind distinguishes between big-endian merges with two 985 /// different inputs (0), either-endian merges with two identical inputs (1), 986 /// and little-endian merges with two different inputs (2). For the latter, 987 /// the input operands are swapped (see PPCInstrAltivec.td). 988 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 989 unsigned ShuffleKind, SelectionDAG &DAG) { 990 if (DAG.getSubtarget().getDataLayout()->isLittleEndian()) { 991 if (ShuffleKind == 1) // unary 992 return isVMerge(N, UnitSize, 8, 8); 993 else if (ShuffleKind == 2) // swapped 994 return isVMerge(N, UnitSize, 8, 24); 995 else 996 return false; 997 } else { 998 if (ShuffleKind == 1) // unary 999 return isVMerge(N, UnitSize, 0, 0); 1000 else if (ShuffleKind == 0) // normal 1001 return isVMerge(N, UnitSize, 0, 16); 1002 else 1003 return false; 1004 } 1005 } 1006 1007 1008 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1009 /// amount, otherwise return -1. 1010 /// The ShuffleKind distinguishes between big-endian operations with two 1011 /// different inputs (0), either-endian operations with two identical inputs 1012 /// (1), and little-endian operations with two different inputs (2). For the 1013 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1014 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1015 SelectionDAG &DAG) { 1016 if (N->getValueType(0) != MVT::v16i8) 1017 return -1; 1018 1019 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1020 1021 // Find the first non-undef value in the shuffle mask. 1022 unsigned i; 1023 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1024 /*search*/; 1025 1026 if (i == 16) return -1; // all undef. 1027 1028 // Otherwise, check to see if the rest of the elements are consecutively 1029 // numbered from this value. 1030 unsigned ShiftAmt = SVOp->getMaskElt(i); 1031 if (ShiftAmt < i) return -1; 1032 1033 ShiftAmt -= i; 1034 bool isLE = DAG.getTarget().getSubtargetImpl()->getDataLayout()-> 1035 isLittleEndian(); 1036 1037 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1038 // Check the rest of the elements to see if they are consecutive. 1039 for (++i; i != 16; ++i) 1040 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1041 return -1; 1042 } else if (ShuffleKind == 1) { 1043 // Check the rest of the elements to see if they are consecutive. 1044 for (++i; i != 16; ++i) 1045 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1046 return -1; 1047 } else 1048 return -1; 1049 1050 if (ShuffleKind == 2 && isLE) 1051 ShiftAmt = 16 - ShiftAmt; 1052 1053 return ShiftAmt; 1054 } 1055 1056 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1057 /// specifies a splat of a single element that is suitable for input to 1058 /// VSPLTB/VSPLTH/VSPLTW. 1059 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1060 assert(N->getValueType(0) == MVT::v16i8 && 1061 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1062 1063 // This is a splat operation if each element of the permute is the same, and 1064 // if the value doesn't reference the second vector. 1065 unsigned ElementBase = N->getMaskElt(0); 1066 1067 // FIXME: Handle UNDEF elements too! 1068 if (ElementBase >= 16) 1069 return false; 1070 1071 // Check that the indices are consecutive, in the case of a multi-byte element 1072 // splatted with a v16i8 mask. 1073 for (unsigned i = 1; i != EltSize; ++i) 1074 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1075 return false; 1076 1077 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1078 if (N->getMaskElt(i) < 0) continue; 1079 for (unsigned j = 0; j != EltSize; ++j) 1080 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1081 return false; 1082 } 1083 return true; 1084 } 1085 1086 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 1087 /// are -0.0. 1088 bool PPC::isAllNegativeZeroVector(SDNode *N) { 1089 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 1090 1091 APInt APVal, APUndef; 1092 unsigned BitSize; 1093 bool HasAnyUndefs; 1094 1095 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) 1096 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 1097 return CFP->getValueAPF().isNegZero(); 1098 1099 return false; 1100 } 1101 1102 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1103 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1104 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1105 SelectionDAG &DAG) { 1106 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1107 assert(isSplatShuffleMask(SVOp, EltSize)); 1108 if (DAG.getSubtarget().getDataLayout()->isLittleEndian()) 1109 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1110 else 1111 return SVOp->getMaskElt(0) / EltSize; 1112 } 1113 1114 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1115 /// by using a vspltis[bhw] instruction of the specified element size, return 1116 /// the constant being splatted. The ByteSize field indicates the number of 1117 /// bytes of each element [124] -> [bhw]. 1118 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1119 SDValue OpVal(nullptr, 0); 1120 1121 // If ByteSize of the splat is bigger than the element size of the 1122 // build_vector, then we have a case where we are checking for a splat where 1123 // multiple elements of the buildvector are folded together into a single 1124 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1125 unsigned EltSize = 16/N->getNumOperands(); 1126 if (EltSize < ByteSize) { 1127 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1128 SDValue UniquedVals[4]; 1129 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1130 1131 // See if all of the elements in the buildvector agree across. 1132 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1133 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1134 // If the element isn't a constant, bail fully out. 1135 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1136 1137 1138 if (!UniquedVals[i&(Multiple-1)].getNode()) 1139 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1140 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1141 return SDValue(); // no match. 1142 } 1143 1144 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1145 // either constant or undef values that are identical for each chunk. See 1146 // if these chunks can form into a larger vspltis*. 1147 1148 // Check to see if all of the leading entries are either 0 or -1. If 1149 // neither, then this won't fit into the immediate field. 1150 bool LeadingZero = true; 1151 bool LeadingOnes = true; 1152 for (unsigned i = 0; i != Multiple-1; ++i) { 1153 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1154 1155 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 1156 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 1157 } 1158 // Finally, check the least significant entry. 1159 if (LeadingZero) { 1160 if (!UniquedVals[Multiple-1].getNode()) 1161 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 1162 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1163 if (Val < 16) 1164 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 1165 } 1166 if (LeadingOnes) { 1167 if (!UniquedVals[Multiple-1].getNode()) 1168 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 1169 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1170 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1171 return DAG.getTargetConstant(Val, MVT::i32); 1172 } 1173 1174 return SDValue(); 1175 } 1176 1177 // Check to see if this buildvec has a single non-undef value in its elements. 1178 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1179 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1180 if (!OpVal.getNode()) 1181 OpVal = N->getOperand(i); 1182 else if (OpVal != N->getOperand(i)) 1183 return SDValue(); 1184 } 1185 1186 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1187 1188 unsigned ValSizeInBytes = EltSize; 1189 uint64_t Value = 0; 1190 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1191 Value = CN->getZExtValue(); 1192 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1193 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1194 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1195 } 1196 1197 // If the splat value is larger than the element value, then we can never do 1198 // this splat. The only case that we could fit the replicated bits into our 1199 // immediate field for would be zero, and we prefer to use vxor for it. 1200 if (ValSizeInBytes < ByteSize) return SDValue(); 1201 1202 // If the element value is larger than the splat value, cut it in half and 1203 // check to see if the two halves are equal. Continue doing this until we 1204 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 1205 while (ValSizeInBytes > ByteSize) { 1206 ValSizeInBytes >>= 1; 1207 1208 // If the top half equals the bottom half, we're still ok. 1209 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 1210 (Value & ((1 << (8*ValSizeInBytes))-1))) 1211 return SDValue(); 1212 } 1213 1214 // Properly sign extend the value. 1215 int MaskVal = SignExtend32(Value, ByteSize * 8); 1216 1217 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1218 if (MaskVal == 0) return SDValue(); 1219 1220 // Finally, if this value fits in a 5 bit sext field, return it 1221 if (SignExtend32<5>(MaskVal) == MaskVal) 1222 return DAG.getTargetConstant(MaskVal, MVT::i32); 1223 return SDValue(); 1224 } 1225 1226 //===----------------------------------------------------------------------===// 1227 // Addressing Mode Selection 1228 //===----------------------------------------------------------------------===// 1229 1230 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1231 /// or 64-bit immediate, and if the value can be accurately represented as a 1232 /// sign extension from a 16-bit value. If so, this returns true and the 1233 /// immediate. 1234 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1235 if (!isa<ConstantSDNode>(N)) 1236 return false; 1237 1238 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1239 if (N->getValueType(0) == MVT::i32) 1240 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1241 else 1242 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1243 } 1244 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1245 return isIntS16Immediate(Op.getNode(), Imm); 1246 } 1247 1248 1249 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1250 /// can be represented as an indexed [r+r] operation. Returns false if it 1251 /// can be more efficiently represented with [r+imm]. 1252 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1253 SDValue &Index, 1254 SelectionDAG &DAG) const { 1255 short imm = 0; 1256 if (N.getOpcode() == ISD::ADD) { 1257 if (isIntS16Immediate(N.getOperand(1), imm)) 1258 return false; // r+i 1259 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1260 return false; // r+i 1261 1262 Base = N.getOperand(0); 1263 Index = N.getOperand(1); 1264 return true; 1265 } else if (N.getOpcode() == ISD::OR) { 1266 if (isIntS16Immediate(N.getOperand(1), imm)) 1267 return false; // r+i can fold it if we can. 1268 1269 // If this is an or of disjoint bitfields, we can codegen this as an add 1270 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1271 // disjoint. 1272 APInt LHSKnownZero, LHSKnownOne; 1273 APInt RHSKnownZero, RHSKnownOne; 1274 DAG.computeKnownBits(N.getOperand(0), 1275 LHSKnownZero, LHSKnownOne); 1276 1277 if (LHSKnownZero.getBoolValue()) { 1278 DAG.computeKnownBits(N.getOperand(1), 1279 RHSKnownZero, RHSKnownOne); 1280 // If all of the bits are known zero on the LHS or RHS, the add won't 1281 // carry. 1282 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1283 Base = N.getOperand(0); 1284 Index = N.getOperand(1); 1285 return true; 1286 } 1287 } 1288 } 1289 1290 return false; 1291 } 1292 1293 // If we happen to be doing an i64 load or store into a stack slot that has 1294 // less than a 4-byte alignment, then the frame-index elimination may need to 1295 // use an indexed load or store instruction (because the offset may not be a 1296 // multiple of 4). The extra register needed to hold the offset comes from the 1297 // register scavenger, and it is possible that the scavenger will need to use 1298 // an emergency spill slot. As a result, we need to make sure that a spill slot 1299 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1300 // stack slot. 1301 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1302 // FIXME: This does not handle the LWA case. 1303 if (VT != MVT::i64) 1304 return; 1305 1306 // NOTE: We'll exclude negative FIs here, which come from argument 1307 // lowering, because there are no known test cases triggering this problem 1308 // using packed structures (or similar). We can remove this exclusion if 1309 // we find such a test case. The reason why this is so test-case driven is 1310 // because this entire 'fixup' is only to prevent crashes (from the 1311 // register scavenger) on not-really-valid inputs. For example, if we have: 1312 // %a = alloca i1 1313 // %b = bitcast i1* %a to i64* 1314 // store i64* a, i64 b 1315 // then the store should really be marked as 'align 1', but is not. If it 1316 // were marked as 'align 1' then the indexed form would have been 1317 // instruction-selected initially, and the problem this 'fixup' is preventing 1318 // won't happen regardless. 1319 if (FrameIdx < 0) 1320 return; 1321 1322 MachineFunction &MF = DAG.getMachineFunction(); 1323 MachineFrameInfo *MFI = MF.getFrameInfo(); 1324 1325 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1326 if (Align >= 4) 1327 return; 1328 1329 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1330 FuncInfo->setHasNonRISpills(); 1331 } 1332 1333 /// Returns true if the address N can be represented by a base register plus 1334 /// a signed 16-bit displacement [r+imm], and if it is not better 1335 /// represented as reg+reg. If Aligned is true, only accept displacements 1336 /// suitable for STD and friends, i.e. multiples of 4. 1337 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1338 SDValue &Base, 1339 SelectionDAG &DAG, 1340 bool Aligned) const { 1341 // FIXME dl should come from parent load or store, not from address 1342 SDLoc dl(N); 1343 // If this can be more profitably realized as r+r, fail. 1344 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1345 return false; 1346 1347 if (N.getOpcode() == ISD::ADD) { 1348 short imm = 0; 1349 if (isIntS16Immediate(N.getOperand(1), imm) && 1350 (!Aligned || (imm & 3) == 0)) { 1351 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1352 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1353 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1354 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1355 } else { 1356 Base = N.getOperand(0); 1357 } 1358 return true; // [r+i] 1359 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1360 // Match LOAD (ADD (X, Lo(G))). 1361 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1362 && "Cannot handle constant offsets yet!"); 1363 Disp = N.getOperand(1).getOperand(0); // The global address. 1364 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1365 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1366 Disp.getOpcode() == ISD::TargetConstantPool || 1367 Disp.getOpcode() == ISD::TargetJumpTable); 1368 Base = N.getOperand(0); 1369 return true; // [&g+r] 1370 } 1371 } else if (N.getOpcode() == ISD::OR) { 1372 short imm = 0; 1373 if (isIntS16Immediate(N.getOperand(1), imm) && 1374 (!Aligned || (imm & 3) == 0)) { 1375 // If this is an or of disjoint bitfields, we can codegen this as an add 1376 // (for better address arithmetic) if the LHS and RHS of the OR are 1377 // provably disjoint. 1378 APInt LHSKnownZero, LHSKnownOne; 1379 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1380 1381 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1382 // If all of the bits are known zero on the LHS or RHS, the add won't 1383 // carry. 1384 if (FrameIndexSDNode *FI = 1385 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1386 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1387 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1388 } else { 1389 Base = N.getOperand(0); 1390 } 1391 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1392 return true; 1393 } 1394 } 1395 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1396 // Loading from a constant address. 1397 1398 // If this address fits entirely in a 16-bit sext immediate field, codegen 1399 // this as "d, 0" 1400 short Imm; 1401 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1402 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 1403 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1404 CN->getValueType(0)); 1405 return true; 1406 } 1407 1408 // Handle 32-bit sext immediates with LIS + addr mode. 1409 if ((CN->getValueType(0) == MVT::i32 || 1410 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1411 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1412 int Addr = (int)CN->getZExtValue(); 1413 1414 // Otherwise, break this down into an LIS + disp. 1415 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 1416 1417 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 1418 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1419 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1420 return true; 1421 } 1422 } 1423 1424 Disp = DAG.getTargetConstant(0, getPointerTy()); 1425 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1426 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1427 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1428 } else 1429 Base = N; 1430 return true; // [r+0] 1431 } 1432 1433 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1434 /// represented as an indexed [r+r] operation. 1435 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1436 SDValue &Index, 1437 SelectionDAG &DAG) const { 1438 // Check to see if we can easily represent this as an [r+r] address. This 1439 // will fail if it thinks that the address is more profitably represented as 1440 // reg+imm, e.g. where imm = 0. 1441 if (SelectAddressRegReg(N, Base, Index, DAG)) 1442 return true; 1443 1444 // If the operand is an addition, always emit this as [r+r], since this is 1445 // better (for code size, and execution, as the memop does the add for free) 1446 // than emitting an explicit add. 1447 if (N.getOpcode() == ISD::ADD) { 1448 Base = N.getOperand(0); 1449 Index = N.getOperand(1); 1450 return true; 1451 } 1452 1453 // Otherwise, do it the hard way, using R0 as the base register. 1454 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1455 N.getValueType()); 1456 Index = N; 1457 return true; 1458 } 1459 1460 /// getPreIndexedAddressParts - returns true by value, base pointer and 1461 /// offset pointer and addressing mode by reference if the node's address 1462 /// can be legally represented as pre-indexed load / store address. 1463 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1464 SDValue &Offset, 1465 ISD::MemIndexedMode &AM, 1466 SelectionDAG &DAG) const { 1467 if (DisablePPCPreinc) return false; 1468 1469 bool isLoad = true; 1470 SDValue Ptr; 1471 EVT VT; 1472 unsigned Alignment; 1473 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1474 Ptr = LD->getBasePtr(); 1475 VT = LD->getMemoryVT(); 1476 Alignment = LD->getAlignment(); 1477 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1478 Ptr = ST->getBasePtr(); 1479 VT = ST->getMemoryVT(); 1480 Alignment = ST->getAlignment(); 1481 isLoad = false; 1482 } else 1483 return false; 1484 1485 // PowerPC doesn't have preinc load/store instructions for vectors. 1486 if (VT.isVector()) 1487 return false; 1488 1489 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1490 1491 // Common code will reject creating a pre-inc form if the base pointer 1492 // is a frame index, or if N is a store and the base pointer is either 1493 // the same as or a predecessor of the value being stored. Check for 1494 // those situations here, and try with swapped Base/Offset instead. 1495 bool Swap = false; 1496 1497 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1498 Swap = true; 1499 else if (!isLoad) { 1500 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1501 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1502 Swap = true; 1503 } 1504 1505 if (Swap) 1506 std::swap(Base, Offset); 1507 1508 AM = ISD::PRE_INC; 1509 return true; 1510 } 1511 1512 // LDU/STU can only handle immediates that are a multiple of 4. 1513 if (VT != MVT::i64) { 1514 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1515 return false; 1516 } else { 1517 // LDU/STU need an address with at least 4-byte alignment. 1518 if (Alignment < 4) 1519 return false; 1520 1521 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1522 return false; 1523 } 1524 1525 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1526 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1527 // sext i32 to i64 when addr mode is r+i. 1528 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1529 LD->getExtensionType() == ISD::SEXTLOAD && 1530 isa<ConstantSDNode>(Offset)) 1531 return false; 1532 } 1533 1534 AM = ISD::PRE_INC; 1535 return true; 1536 } 1537 1538 //===----------------------------------------------------------------------===// 1539 // LowerOperation implementation 1540 //===----------------------------------------------------------------------===// 1541 1542 /// GetLabelAccessInfo - Return true if we should reference labels using a 1543 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1544 static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, 1545 unsigned &LoOpFlags, 1546 const GlobalValue *GV = nullptr) { 1547 HiOpFlags = PPCII::MO_HA; 1548 LoOpFlags = PPCII::MO_LO; 1549 1550 // Don't use the pic base if not in PIC relocation model. 1551 bool isPIC = TM.getRelocationModel() == Reloc::PIC_; 1552 1553 if (isPIC) { 1554 HiOpFlags |= PPCII::MO_PIC_FLAG; 1555 LoOpFlags |= PPCII::MO_PIC_FLAG; 1556 } 1557 1558 // If this is a reference to a global value that requires a non-lazy-ptr, make 1559 // sure that instruction lowering adds it. 1560 if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) { 1561 HiOpFlags |= PPCII::MO_NLP_FLAG; 1562 LoOpFlags |= PPCII::MO_NLP_FLAG; 1563 1564 if (GV->hasHiddenVisibility()) { 1565 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1566 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1567 } 1568 } 1569 1570 return isPIC; 1571 } 1572 1573 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1574 SelectionDAG &DAG) { 1575 EVT PtrVT = HiPart.getValueType(); 1576 SDValue Zero = DAG.getConstant(0, PtrVT); 1577 SDLoc DL(HiPart); 1578 1579 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1580 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1581 1582 // With PIC, the first instruction is actually "GR+hi(&G)". 1583 if (isPIC) 1584 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1585 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1586 1587 // Generate non-pic code that has direct accesses to the constant pool. 1588 // The address of the global is just (hi(&g)+lo(&g)). 1589 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1590 } 1591 1592 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1593 SelectionDAG &DAG) const { 1594 EVT PtrVT = Op.getValueType(); 1595 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1596 const Constant *C = CP->getConstVal(); 1597 1598 // 64-bit SVR4 ABI code is always position-independent. 1599 // The actual address of the GlobalValue is stored in the TOC. 1600 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1601 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 1602 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA, 1603 DAG.getRegister(PPC::X2, MVT::i64)); 1604 } 1605 1606 unsigned MOHiFlag, MOLoFlag; 1607 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1608 1609 if (isPIC && Subtarget.isSVR4ABI()) { 1610 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 1611 PPCII::MO_PIC_FLAG); 1612 SDLoc DL(CP); 1613 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i32, GA, 1614 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT)); 1615 } 1616 1617 SDValue CPIHi = 1618 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 1619 SDValue CPILo = 1620 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 1621 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 1622 } 1623 1624 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1625 EVT PtrVT = Op.getValueType(); 1626 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1627 1628 // 64-bit SVR4 ABI code is always position-independent. 1629 // The actual address of the GlobalValue is stored in the TOC. 1630 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1631 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1632 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA, 1633 DAG.getRegister(PPC::X2, MVT::i64)); 1634 } 1635 1636 unsigned MOHiFlag, MOLoFlag; 1637 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1638 1639 if (isPIC && Subtarget.isSVR4ABI()) { 1640 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 1641 PPCII::MO_PIC_FLAG); 1642 SDLoc DL(GA); 1643 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), PtrVT, GA, 1644 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT)); 1645 } 1646 1647 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 1648 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 1649 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 1650 } 1651 1652 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1653 SelectionDAG &DAG) const { 1654 EVT PtrVT = Op.getValueType(); 1655 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 1656 const BlockAddress *BA = BASDN->getBlockAddress(); 1657 1658 // 64-bit SVR4 ABI code is always position-independent. 1659 // The actual BlockAddress is stored in the TOC. 1660 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1661 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 1662 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(BASDN), MVT::i64, GA, 1663 DAG.getRegister(PPC::X2, MVT::i64)); 1664 } 1665 1666 unsigned MOHiFlag, MOLoFlag; 1667 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1668 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 1669 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 1670 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 1671 } 1672 1673 // Generate a call to __tls_get_addr for the given GOT entry Op. 1674 std::pair<SDValue,SDValue> 1675 PPCTargetLowering::lowerTLSCall(SDValue Op, SDLoc dl, 1676 SelectionDAG &DAG) const { 1677 1678 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); 1679 TargetLowering::ArgListTy Args; 1680 TargetLowering::ArgListEntry Entry; 1681 Entry.Node = Op; 1682 Entry.Ty = IntPtrTy; 1683 Args.push_back(Entry); 1684 1685 TargetLowering::CallLoweringInfo CLI(DAG); 1686 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()) 1687 .setCallee(CallingConv::C, IntPtrTy, 1688 DAG.getTargetExternalSymbol("__tls_get_addr", getPointerTy()), 1689 std::move(Args), 0); 1690 1691 return LowerCallTo(CLI); 1692 } 1693 1694 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1695 SelectionDAG &DAG) const { 1696 1697 // FIXME: TLS addresses currently use medium model code sequences, 1698 // which is the most useful form. Eventually support for small and 1699 // large models could be added if users need it, at the cost of 1700 // additional complexity. 1701 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1702 SDLoc dl(GA); 1703 const GlobalValue *GV = GA->getGlobal(); 1704 EVT PtrVT = getPointerTy(); 1705 bool is64bit = Subtarget.isPPC64(); 1706 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 1707 PICLevel::Level picLevel = M->getPICLevel(); 1708 1709 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 1710 1711 if (Model == TLSModel::LocalExec) { 1712 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1713 PPCII::MO_TPREL_HA); 1714 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1715 PPCII::MO_TPREL_LO); 1716 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 1717 is64bit ? MVT::i64 : MVT::i32); 1718 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 1719 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 1720 } 1721 1722 if (Model == TLSModel::InitialExec) { 1723 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1724 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1725 PPCII::MO_TLS); 1726 SDValue GOTPtr; 1727 if (is64bit) { 1728 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1729 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 1730 PtrVT, GOTReg, TGA); 1731 } else 1732 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 1733 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 1734 PtrVT, TGA, GOTPtr); 1735 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 1736 } 1737 1738 if (Model == TLSModel::GeneralDynamic) { 1739 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1740 PPCII::MO_TLSGD); 1741 SDValue GOTPtr; 1742 if (is64bit) { 1743 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1744 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 1745 GOTReg, TGA); 1746 } else { 1747 if (picLevel == PICLevel::Small) 1748 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 1749 else 1750 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 1751 } 1752 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT, 1753 GOTPtr, TGA); 1754 std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG); 1755 return CallResult.first; 1756 } 1757 1758 if (Model == TLSModel::LocalDynamic) { 1759 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1760 PPCII::MO_TLSLD); 1761 SDValue GOTPtr; 1762 if (is64bit) { 1763 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1764 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 1765 GOTReg, TGA); 1766 } else { 1767 if (picLevel == PICLevel::Small) 1768 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 1769 else 1770 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 1771 } 1772 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT, 1773 GOTPtr, TGA); 1774 std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG); 1775 SDValue TLSAddr = CallResult.first; 1776 SDValue Chain = CallResult.second; 1777 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT, 1778 Chain, TLSAddr, TGA); 1779 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 1780 } 1781 1782 llvm_unreachable("Unknown TLS model!"); 1783 } 1784 1785 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1786 SelectionDAG &DAG) const { 1787 EVT PtrVT = Op.getValueType(); 1788 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1789 SDLoc DL(GSDN); 1790 const GlobalValue *GV = GSDN->getGlobal(); 1791 1792 // 64-bit SVR4 ABI code is always position-independent. 1793 // The actual address of the GlobalValue is stored in the TOC. 1794 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1795 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 1796 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA, 1797 DAG.getRegister(PPC::X2, MVT::i64)); 1798 } 1799 1800 unsigned MOHiFlag, MOLoFlag; 1801 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV); 1802 1803 if (isPIC && Subtarget.isSVR4ABI()) { 1804 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 1805 GSDN->getOffset(), 1806 PPCII::MO_PIC_FLAG); 1807 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i32, GA, 1808 DAG.getNode(PPCISD::GlobalBaseReg, DL, MVT::i32)); 1809 } 1810 1811 SDValue GAHi = 1812 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 1813 SDValue GALo = 1814 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 1815 1816 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 1817 1818 // If the global reference is actually to a non-lazy-pointer, we have to do an 1819 // extra load to get the address of the global. 1820 if (MOHiFlag & PPCII::MO_NLP_FLAG) 1821 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 1822 false, false, false, 0); 1823 return Ptr; 1824 } 1825 1826 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1827 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1828 SDLoc dl(Op); 1829 1830 if (Op.getValueType() == MVT::v2i64) { 1831 // When the operands themselves are v2i64 values, we need to do something 1832 // special because VSX has no underlying comparison operations for these. 1833 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 1834 // Equality can be handled by casting to the legal type for Altivec 1835 // comparisons, everything else needs to be expanded. 1836 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 1837 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 1838 DAG.getSetCC(dl, MVT::v4i32, 1839 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 1840 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 1841 CC)); 1842 } 1843 1844 return SDValue(); 1845 } 1846 1847 // We handle most of these in the usual way. 1848 return Op; 1849 } 1850 1851 // If we're comparing for equality to zero, expose the fact that this is 1852 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1853 // fold the new nodes. 1854 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1855 if (C->isNullValue() && CC == ISD::SETEQ) { 1856 EVT VT = Op.getOperand(0).getValueType(); 1857 SDValue Zext = Op.getOperand(0); 1858 if (VT.bitsLT(MVT::i32)) { 1859 VT = MVT::i32; 1860 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1861 } 1862 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1863 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1864 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1865 DAG.getConstant(Log2b, MVT::i32)); 1866 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1867 } 1868 // Leave comparisons against 0 and -1 alone for now, since they're usually 1869 // optimized. FIXME: revisit this when we can custom lower all setcc 1870 // optimizations. 1871 if (C->isAllOnesValue() || C->isNullValue()) 1872 return SDValue(); 1873 } 1874 1875 // If we have an integer seteq/setne, turn it into a compare against zero 1876 // by xor'ing the rhs with the lhs, which is faster than setting a 1877 // condition register, reading it back out, and masking the correct bit. The 1878 // normal approach here uses sub to do this instead of xor. Using xor exposes 1879 // the result to other bit-twiddling opportunities. 1880 EVT LHSVT = Op.getOperand(0).getValueType(); 1881 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1882 EVT VT = Op.getValueType(); 1883 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1884 Op.getOperand(1)); 1885 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1886 } 1887 return SDValue(); 1888 } 1889 1890 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1891 const PPCSubtarget &Subtarget) const { 1892 SDNode *Node = Op.getNode(); 1893 EVT VT = Node->getValueType(0); 1894 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1895 SDValue InChain = Node->getOperand(0); 1896 SDValue VAListPtr = Node->getOperand(1); 1897 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1898 SDLoc dl(Node); 1899 1900 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 1901 1902 // gpr_index 1903 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1904 VAListPtr, MachinePointerInfo(SV), MVT::i8, 1905 false, false, false, 0); 1906 InChain = GprIndex.getValue(1); 1907 1908 if (VT == MVT::i64) { 1909 // Check if GprIndex is even 1910 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 1911 DAG.getConstant(1, MVT::i32)); 1912 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 1913 DAG.getConstant(0, MVT::i32), ISD::SETNE); 1914 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 1915 DAG.getConstant(1, MVT::i32)); 1916 // Align GprIndex to be even if it isn't 1917 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 1918 GprIndex); 1919 } 1920 1921 // fpr index is 1 byte after gpr 1922 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1923 DAG.getConstant(1, MVT::i32)); 1924 1925 // fpr 1926 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1927 FprPtr, MachinePointerInfo(SV), MVT::i8, 1928 false, false, false, 0); 1929 InChain = FprIndex.getValue(1); 1930 1931 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1932 DAG.getConstant(8, MVT::i32)); 1933 1934 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1935 DAG.getConstant(4, MVT::i32)); 1936 1937 // areas 1938 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 1939 MachinePointerInfo(), false, false, 1940 false, 0); 1941 InChain = OverflowArea.getValue(1); 1942 1943 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 1944 MachinePointerInfo(), false, false, 1945 false, 0); 1946 InChain = RegSaveArea.getValue(1); 1947 1948 // select overflow_area if index > 8 1949 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 1950 DAG.getConstant(8, MVT::i32), ISD::SETLT); 1951 1952 // adjustment constant gpr_index * 4/8 1953 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 1954 VT.isInteger() ? GprIndex : FprIndex, 1955 DAG.getConstant(VT.isInteger() ? 4 : 8, 1956 MVT::i32)); 1957 1958 // OurReg = RegSaveArea + RegConstant 1959 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 1960 RegConstant); 1961 1962 // Floating types are 32 bytes into RegSaveArea 1963 if (VT.isFloatingPoint()) 1964 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 1965 DAG.getConstant(32, MVT::i32)); 1966 1967 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 1968 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 1969 VT.isInteger() ? GprIndex : FprIndex, 1970 DAG.getConstant(VT == MVT::i64 ? 2 : 1, 1971 MVT::i32)); 1972 1973 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 1974 VT.isInteger() ? VAListPtr : FprPtr, 1975 MachinePointerInfo(SV), 1976 MVT::i8, false, false, 0); 1977 1978 // determine if we should load from reg_save_area or overflow_area 1979 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 1980 1981 // increase overflow_area by 4/8 if gpr/fpr > 8 1982 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 1983 DAG.getConstant(VT.isInteger() ? 4 : 8, 1984 MVT::i32)); 1985 1986 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 1987 OverflowAreaPlusN); 1988 1989 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 1990 OverflowAreaPtr, 1991 MachinePointerInfo(), 1992 MVT::i32, false, false, 0); 1993 1994 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 1995 false, false, false, 0); 1996 } 1997 1998 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 1999 const PPCSubtarget &Subtarget) const { 2000 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2001 2002 // We have to copy the entire va_list struct: 2003 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2004 return DAG.getMemcpy(Op.getOperand(0), Op, 2005 Op.getOperand(1), Op.getOperand(2), 2006 DAG.getConstant(12, MVT::i32), 8, false, true, 2007 MachinePointerInfo(), MachinePointerInfo()); 2008 } 2009 2010 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2011 SelectionDAG &DAG) const { 2012 return Op.getOperand(0); 2013 } 2014 2015 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2016 SelectionDAG &DAG) const { 2017 SDValue Chain = Op.getOperand(0); 2018 SDValue Trmp = Op.getOperand(1); // trampoline 2019 SDValue FPtr = Op.getOperand(2); // nested function 2020 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2021 SDLoc dl(Op); 2022 2023 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2024 bool isPPC64 = (PtrVT == MVT::i64); 2025 Type *IntPtrTy = 2026 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType( 2027 *DAG.getContext()); 2028 2029 TargetLowering::ArgListTy Args; 2030 TargetLowering::ArgListEntry Entry; 2031 2032 Entry.Ty = IntPtrTy; 2033 Entry.Node = Trmp; Args.push_back(Entry); 2034 2035 // TrampSize == (isPPC64 ? 48 : 40); 2036 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 2037 isPPC64 ? MVT::i64 : MVT::i32); 2038 Args.push_back(Entry); 2039 2040 Entry.Node = FPtr; Args.push_back(Entry); 2041 Entry.Node = Nest; Args.push_back(Entry); 2042 2043 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2044 TargetLowering::CallLoweringInfo CLI(DAG); 2045 CLI.setDebugLoc(dl).setChain(Chain) 2046 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2047 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2048 std::move(Args), 0); 2049 2050 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2051 return CallResult.second; 2052 } 2053 2054 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 2055 const PPCSubtarget &Subtarget) const { 2056 MachineFunction &MF = DAG.getMachineFunction(); 2057 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2058 2059 SDLoc dl(Op); 2060 2061 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2062 // vastart just stores the address of the VarArgsFrameIndex slot into the 2063 // memory location argument. 2064 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2065 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2066 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2067 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2068 MachinePointerInfo(SV), 2069 false, false, 0); 2070 } 2071 2072 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2073 // We suppose the given va_list is already allocated. 2074 // 2075 // typedef struct { 2076 // char gpr; /* index into the array of 8 GPRs 2077 // * stored in the register save area 2078 // * gpr=0 corresponds to r3, 2079 // * gpr=1 to r4, etc. 2080 // */ 2081 // char fpr; /* index into the array of 8 FPRs 2082 // * stored in the register save area 2083 // * fpr=0 corresponds to f1, 2084 // * fpr=1 to f2, etc. 2085 // */ 2086 // char *overflow_arg_area; 2087 // /* location on stack that holds 2088 // * the next overflow argument 2089 // */ 2090 // char *reg_save_area; 2091 // /* where r3:r10 and f1:f8 (if saved) 2092 // * are stored 2093 // */ 2094 // } va_list[1]; 2095 2096 2097 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32); 2098 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32); 2099 2100 2101 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2102 2103 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2104 PtrVT); 2105 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2106 PtrVT); 2107 2108 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2109 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 2110 2111 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2112 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 2113 2114 uint64_t FPROffset = 1; 2115 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 2116 2117 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2118 2119 // Store first byte : number of int regs 2120 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2121 Op.getOperand(1), 2122 MachinePointerInfo(SV), 2123 MVT::i8, false, false, 0); 2124 uint64_t nextOffset = FPROffset; 2125 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2126 ConstFPROffset); 2127 2128 // Store second byte : number of float regs 2129 SDValue secondStore = 2130 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2131 MachinePointerInfo(SV, nextOffset), MVT::i8, 2132 false, false, 0); 2133 nextOffset += StackOffset; 2134 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2135 2136 // Store second word : arguments given on stack 2137 SDValue thirdStore = 2138 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2139 MachinePointerInfo(SV, nextOffset), 2140 false, false, 0); 2141 nextOffset += FrameOffset; 2142 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2143 2144 // Store third word : arguments given in registers 2145 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2146 MachinePointerInfo(SV, nextOffset), 2147 false, false, 0); 2148 2149 } 2150 2151 #include "PPCGenCallingConv.inc" 2152 2153 // Function whose sole purpose is to kill compiler warnings 2154 // stemming from unused functions included from PPCGenCallingConv.inc. 2155 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2156 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2157 } 2158 2159 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2160 CCValAssign::LocInfo &LocInfo, 2161 ISD::ArgFlagsTy &ArgFlags, 2162 CCState &State) { 2163 return true; 2164 } 2165 2166 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2167 MVT &LocVT, 2168 CCValAssign::LocInfo &LocInfo, 2169 ISD::ArgFlagsTy &ArgFlags, 2170 CCState &State) { 2171 static const MCPhysReg ArgRegs[] = { 2172 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2173 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2174 }; 2175 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2176 2177 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 2178 2179 // Skip one register if the first unallocated register has an even register 2180 // number and there are still argument registers available which have not been 2181 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2182 // need to skip a register if RegNum is odd. 2183 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2184 State.AllocateReg(ArgRegs[RegNum]); 2185 } 2186 2187 // Always return false here, as this function only makes sure that the first 2188 // unallocated register has an odd register number and does not actually 2189 // allocate a register for the current argument. 2190 return false; 2191 } 2192 2193 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2194 MVT &LocVT, 2195 CCValAssign::LocInfo &LocInfo, 2196 ISD::ArgFlagsTy &ArgFlags, 2197 CCState &State) { 2198 static const MCPhysReg ArgRegs[] = { 2199 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2200 PPC::F8 2201 }; 2202 2203 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2204 2205 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 2206 2207 // If there is only one Floating-point register left we need to put both f64 2208 // values of a split ppc_fp128 value on the stack. 2209 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2210 State.AllocateReg(ArgRegs[RegNum]); 2211 } 2212 2213 // Always return false here, as this function only makes sure that the two f64 2214 // values a ppc_fp128 value is split into are both passed in registers or both 2215 // passed on the stack and does not actually allocate a register for the 2216 // current argument. 2217 return false; 2218 } 2219 2220 /// GetFPR - Get the set of FP registers that should be allocated for arguments, 2221 /// on Darwin. 2222 static const MCPhysReg *GetFPR() { 2223 static const MCPhysReg FPR[] = { 2224 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2225 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 2226 }; 2227 2228 return FPR; 2229 } 2230 2231 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2232 /// the stack. 2233 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2234 unsigned PtrByteSize) { 2235 unsigned ArgSize = ArgVT.getStoreSize(); 2236 if (Flags.isByVal()) 2237 ArgSize = Flags.getByValSize(); 2238 2239 // Round up to multiples of the pointer size, except for array members, 2240 // which are always packed. 2241 if (!Flags.isInConsecutiveRegs()) 2242 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2243 2244 return ArgSize; 2245 } 2246 2247 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2248 /// on the stack. 2249 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2250 ISD::ArgFlagsTy Flags, 2251 unsigned PtrByteSize) { 2252 unsigned Align = PtrByteSize; 2253 2254 // Altivec parameters are padded to a 16 byte boundary. 2255 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2256 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2257 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) 2258 Align = 16; 2259 2260 // ByVal parameters are aligned as requested. 2261 if (Flags.isByVal()) { 2262 unsigned BVAlign = Flags.getByValAlign(); 2263 if (BVAlign > PtrByteSize) { 2264 if (BVAlign % PtrByteSize != 0) 2265 llvm_unreachable( 2266 "ByVal alignment is not a multiple of the pointer size"); 2267 2268 Align = BVAlign; 2269 } 2270 } 2271 2272 // Array members are always packed to their original alignment. 2273 if (Flags.isInConsecutiveRegs()) { 2274 // If the array member was split into multiple registers, the first 2275 // needs to be aligned to the size of the full type. (Except for 2276 // ppcf128, which is only aligned as its f64 components.) 2277 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2278 Align = OrigVT.getStoreSize(); 2279 else 2280 Align = ArgVT.getStoreSize(); 2281 } 2282 2283 return Align; 2284 } 2285 2286 /// CalculateStackSlotUsed - Return whether this argument will use its 2287 /// stack slot (instead of being passed in registers). ArgOffset, 2288 /// AvailableFPRs, and AvailableVRs must hold the current argument 2289 /// position, and will be updated to account for this argument. 2290 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2291 ISD::ArgFlagsTy Flags, 2292 unsigned PtrByteSize, 2293 unsigned LinkageSize, 2294 unsigned ParamAreaSize, 2295 unsigned &ArgOffset, 2296 unsigned &AvailableFPRs, 2297 unsigned &AvailableVRs) { 2298 bool UseMemory = false; 2299 2300 // Respect alignment of argument on the stack. 2301 unsigned Align = 2302 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2303 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2304 // If there's no space left in the argument save area, we must 2305 // use memory (this check also catches zero-sized arguments). 2306 if (ArgOffset >= LinkageSize + ParamAreaSize) 2307 UseMemory = true; 2308 2309 // Allocate argument on the stack. 2310 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2311 if (Flags.isInConsecutiveRegsLast()) 2312 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2313 // If we overran the argument save area, we must use memory 2314 // (this check catches arguments passed partially in memory) 2315 if (ArgOffset > LinkageSize + ParamAreaSize) 2316 UseMemory = true; 2317 2318 // However, if the argument is actually passed in an FPR or a VR, 2319 // we don't use memory after all. 2320 if (!Flags.isByVal()) { 2321 if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 2322 if (AvailableFPRs > 0) { 2323 --AvailableFPRs; 2324 return false; 2325 } 2326 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2327 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2328 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) 2329 if (AvailableVRs > 0) { 2330 --AvailableVRs; 2331 return false; 2332 } 2333 } 2334 2335 return UseMemory; 2336 } 2337 2338 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2339 /// ensure minimum alignment required for target. 2340 static unsigned EnsureStackAlignment(const TargetMachine &Target, 2341 unsigned NumBytes) { 2342 unsigned TargetAlign = 2343 Target.getSubtargetImpl()->getFrameLowering()->getStackAlignment(); 2344 unsigned AlignMask = TargetAlign - 1; 2345 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2346 return NumBytes; 2347 } 2348 2349 SDValue 2350 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 2351 CallingConv::ID CallConv, bool isVarArg, 2352 const SmallVectorImpl<ISD::InputArg> 2353 &Ins, 2354 SDLoc dl, SelectionDAG &DAG, 2355 SmallVectorImpl<SDValue> &InVals) 2356 const { 2357 if (Subtarget.isSVR4ABI()) { 2358 if (Subtarget.isPPC64()) 2359 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2360 dl, DAG, InVals); 2361 else 2362 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2363 dl, DAG, InVals); 2364 } else { 2365 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2366 dl, DAG, InVals); 2367 } 2368 } 2369 2370 SDValue 2371 PPCTargetLowering::LowerFormalArguments_32SVR4( 2372 SDValue Chain, 2373 CallingConv::ID CallConv, bool isVarArg, 2374 const SmallVectorImpl<ISD::InputArg> 2375 &Ins, 2376 SDLoc dl, SelectionDAG &DAG, 2377 SmallVectorImpl<SDValue> &InVals) const { 2378 2379 // 32-bit SVR4 ABI Stack Frame Layout: 2380 // +-----------------------------------+ 2381 // +--> | Back chain | 2382 // | +-----------------------------------+ 2383 // | | Floating-point register save area | 2384 // | +-----------------------------------+ 2385 // | | General register save area | 2386 // | +-----------------------------------+ 2387 // | | CR save word | 2388 // | +-----------------------------------+ 2389 // | | VRSAVE save word | 2390 // | +-----------------------------------+ 2391 // | | Alignment padding | 2392 // | +-----------------------------------+ 2393 // | | Vector register save area | 2394 // | +-----------------------------------+ 2395 // | | Local variable space | 2396 // | +-----------------------------------+ 2397 // | | Parameter list area | 2398 // | +-----------------------------------+ 2399 // | | LR save word | 2400 // | +-----------------------------------+ 2401 // SP--> +--- | Back chain | 2402 // +-----------------------------------+ 2403 // 2404 // Specifications: 2405 // System V Application Binary Interface PowerPC Processor Supplement 2406 // AltiVec Technology Programming Interface Manual 2407 2408 MachineFunction &MF = DAG.getMachineFunction(); 2409 MachineFrameInfo *MFI = MF.getFrameInfo(); 2410 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2411 2412 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2413 // Potential tail calls could cause overwriting of argument stack slots. 2414 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2415 (CallConv == CallingConv::Fast)); 2416 unsigned PtrByteSize = 4; 2417 2418 // Assign locations to all of the incoming arguments. 2419 SmallVector<CCValAssign, 16> ArgLocs; 2420 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2421 *DAG.getContext()); 2422 2423 // Reserve space for the linkage area on the stack. 2424 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(false, false, false); 2425 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2426 2427 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2428 2429 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2430 CCValAssign &VA = ArgLocs[i]; 2431 2432 // Arguments stored in registers. 2433 if (VA.isRegLoc()) { 2434 const TargetRegisterClass *RC; 2435 EVT ValVT = VA.getValVT(); 2436 2437 switch (ValVT.getSimpleVT().SimpleTy) { 2438 default: 2439 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2440 case MVT::i1: 2441 case MVT::i32: 2442 RC = &PPC::GPRCRegClass; 2443 break; 2444 case MVT::f32: 2445 RC = &PPC::F4RCRegClass; 2446 break; 2447 case MVT::f64: 2448 if (Subtarget.hasVSX()) 2449 RC = &PPC::VSFRCRegClass; 2450 else 2451 RC = &PPC::F8RCRegClass; 2452 break; 2453 case MVT::v16i8: 2454 case MVT::v8i16: 2455 case MVT::v4i32: 2456 case MVT::v4f32: 2457 RC = &PPC::VRRCRegClass; 2458 break; 2459 case MVT::v2f64: 2460 case MVT::v2i64: 2461 RC = &PPC::VSHRCRegClass; 2462 break; 2463 } 2464 2465 // Transform the arguments stored in physical registers into virtual ones. 2466 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2467 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2468 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2469 2470 if (ValVT == MVT::i1) 2471 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2472 2473 InVals.push_back(ArgValue); 2474 } else { 2475 // Argument stored in memory. 2476 assert(VA.isMemLoc()); 2477 2478 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2479 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2480 isImmutable); 2481 2482 // Create load nodes to retrieve arguments from the stack. 2483 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2484 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2485 MachinePointerInfo(), 2486 false, false, false, 0)); 2487 } 2488 } 2489 2490 // Assign locations to all of the incoming aggregate by value arguments. 2491 // Aggregates passed by value are stored in the local variable space of the 2492 // caller's stack frame, right above the parameter list area. 2493 SmallVector<CCValAssign, 16> ByValArgLocs; 2494 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2495 ByValArgLocs, *DAG.getContext()); 2496 2497 // Reserve stack space for the allocations in CCInfo. 2498 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2499 2500 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2501 2502 // Area that is at least reserved in the caller of this function. 2503 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2504 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2505 2506 // Set the size that is at least reserved in caller of this function. Tail 2507 // call optimized function's reserved stack space needs to be aligned so that 2508 // taking the difference between two stack areas will result in an aligned 2509 // stack. 2510 MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea); 2511 FuncInfo->setMinReservedArea(MinReservedArea); 2512 2513 SmallVector<SDValue, 8> MemOps; 2514 2515 // If the function takes variable number of arguments, make a frame index for 2516 // the start of the first vararg value... for expansion of llvm.va_start. 2517 if (isVarArg) { 2518 static const MCPhysReg GPArgRegs[] = { 2519 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2520 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2521 }; 2522 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2523 2524 static const MCPhysReg FPArgRegs[] = { 2525 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2526 PPC::F8 2527 }; 2528 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2529 if (DisablePPCFloatInVariadic) 2530 NumFPArgRegs = 0; 2531 2532 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, 2533 NumGPArgRegs)); 2534 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, 2535 NumFPArgRegs)); 2536 2537 // Make room for NumGPArgRegs and NumFPArgRegs. 2538 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2539 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 2540 2541 FuncInfo->setVarArgsStackOffset( 2542 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2543 CCInfo.getNextStackOffset(), true)); 2544 2545 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2546 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2547 2548 // The fixed integer arguments of a variadic function are stored to the 2549 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2550 // the result of va_next. 2551 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2552 // Get an existing live-in vreg, or add a new one. 2553 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2554 if (!VReg) 2555 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2556 2557 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2558 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2559 MachinePointerInfo(), false, false, 0); 2560 MemOps.push_back(Store); 2561 // Increment the address by four for the next argument to store 2562 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2563 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2564 } 2565 2566 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2567 // is set. 2568 // The double arguments are stored to the VarArgsFrameIndex 2569 // on the stack. 2570 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2571 // Get an existing live-in vreg, or add a new one. 2572 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2573 if (!VReg) 2574 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2575 2576 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2577 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2578 MachinePointerInfo(), false, false, 0); 2579 MemOps.push_back(Store); 2580 // Increment the address by eight for the next argument to store 2581 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, 2582 PtrVT); 2583 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2584 } 2585 } 2586 2587 if (!MemOps.empty()) 2588 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 2589 2590 return Chain; 2591 } 2592 2593 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2594 // value to MVT::i64 and then truncate to the correct register size. 2595 SDValue 2596 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 2597 SelectionDAG &DAG, SDValue ArgVal, 2598 SDLoc dl) const { 2599 if (Flags.isSExt()) 2600 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 2601 DAG.getValueType(ObjectVT)); 2602 else if (Flags.isZExt()) 2603 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 2604 DAG.getValueType(ObjectVT)); 2605 2606 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 2607 } 2608 2609 SDValue 2610 PPCTargetLowering::LowerFormalArguments_64SVR4( 2611 SDValue Chain, 2612 CallingConv::ID CallConv, bool isVarArg, 2613 const SmallVectorImpl<ISD::InputArg> 2614 &Ins, 2615 SDLoc dl, SelectionDAG &DAG, 2616 SmallVectorImpl<SDValue> &InVals) const { 2617 // TODO: add description of PPC stack frame format, or at least some docs. 2618 // 2619 bool isELFv2ABI = Subtarget.isELFv2ABI(); 2620 bool isLittleEndian = Subtarget.isLittleEndian(); 2621 MachineFunction &MF = DAG.getMachineFunction(); 2622 MachineFrameInfo *MFI = MF.getFrameInfo(); 2623 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2624 2625 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2626 // Potential tail calls could cause overwriting of argument stack slots. 2627 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2628 (CallConv == CallingConv::Fast)); 2629 unsigned PtrByteSize = 8; 2630 2631 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false, 2632 isELFv2ABI); 2633 2634 static const MCPhysReg GPR[] = { 2635 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2636 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2637 }; 2638 2639 static const MCPhysReg *FPR = GetFPR(); 2640 2641 static const MCPhysReg VR[] = { 2642 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2643 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2644 }; 2645 static const MCPhysReg VSRH[] = { 2646 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 2647 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 2648 }; 2649 2650 const unsigned Num_GPR_Regs = array_lengthof(GPR); 2651 const unsigned Num_FPR_Regs = 13; 2652 const unsigned Num_VR_Regs = array_lengthof(VR); 2653 2654 // Do a first pass over the arguments to determine whether the ABI 2655 // guarantees that our caller has allocated the parameter save area 2656 // on its stack frame. In the ELFv1 ABI, this is always the case; 2657 // in the ELFv2 ABI, it is true if this is a vararg function or if 2658 // any parameter is located in a stack slot. 2659 2660 bool HasParameterArea = !isELFv2ABI || isVarArg; 2661 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 2662 unsigned NumBytes = LinkageSize; 2663 unsigned AvailableFPRs = Num_FPR_Regs; 2664 unsigned AvailableVRs = Num_VR_Regs; 2665 for (unsigned i = 0, e = Ins.size(); i != e; ++i) 2666 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 2667 PtrByteSize, LinkageSize, ParamAreaSize, 2668 NumBytes, AvailableFPRs, AvailableVRs)) 2669 HasParameterArea = true; 2670 2671 // Add DAG nodes to load the arguments or copy them out of registers. On 2672 // entry to a function on PPC, the arguments start after the linkage area, 2673 // although the first ones are often in registers. 2674 2675 unsigned ArgOffset = LinkageSize; 2676 unsigned GPR_idx, FPR_idx = 0, VR_idx = 0; 2677 SmallVector<SDValue, 8> MemOps; 2678 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2679 unsigned CurArgIdx = 0; 2680 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2681 SDValue ArgVal; 2682 bool needsLoad = false; 2683 EVT ObjectVT = Ins[ArgNo].VT; 2684 EVT OrigVT = Ins[ArgNo].ArgVT; 2685 unsigned ObjSize = ObjectVT.getStoreSize(); 2686 unsigned ArgSize = ObjSize; 2687 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2688 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2689 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2690 2691 /* Respect alignment of argument on the stack. */ 2692 unsigned Align = 2693 CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 2694 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2695 unsigned CurArgOffset = ArgOffset; 2696 2697 /* Compute GPR index associated with argument offset. */ 2698 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 2699 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 2700 2701 // FIXME the codegen can be much improved in some cases. 2702 // We do not have to keep everything in memory. 2703 if (Flags.isByVal()) { 2704 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2705 ObjSize = Flags.getByValSize(); 2706 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2707 // Empty aggregate parameters do not take up registers. Examples: 2708 // struct { } a; 2709 // union { } b; 2710 // int c[0]; 2711 // etc. However, we have to provide a place-holder in InVals, so 2712 // pretend we have an 8-byte item at the current address for that 2713 // purpose. 2714 if (!ObjSize) { 2715 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2716 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2717 InVals.push_back(FIN); 2718 continue; 2719 } 2720 2721 // Create a stack object covering all stack doublewords occupied 2722 // by the argument. If the argument is (fully or partially) on 2723 // the stack, or if the argument is fully in registers but the 2724 // caller has allocated the parameter save anyway, we can refer 2725 // directly to the caller's stack frame. Otherwise, create a 2726 // local copy in our own frame. 2727 int FI; 2728 if (HasParameterArea || 2729 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 2730 FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true); 2731 else 2732 FI = MFI->CreateStackObject(ArgSize, Align, false); 2733 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2734 2735 // Handle aggregates smaller than 8 bytes. 2736 if (ObjSize < PtrByteSize) { 2737 // The value of the object is its address, which differs from the 2738 // address of the enclosing doubleword on big-endian systems. 2739 SDValue Arg = FIN; 2740 if (!isLittleEndian) { 2741 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, PtrVT); 2742 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 2743 } 2744 InVals.push_back(Arg); 2745 2746 if (GPR_idx != Num_GPR_Regs) { 2747 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2748 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2749 SDValue Store; 2750 2751 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 2752 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 2753 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 2754 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 2755 MachinePointerInfo(FuncArg), 2756 ObjType, false, false, 0); 2757 } else { 2758 // For sizes that don't fit a truncating store (3, 5, 6, 7), 2759 // store the whole register as-is to the parameter save area 2760 // slot. 2761 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2762 MachinePointerInfo(FuncArg), 2763 false, false, 0); 2764 } 2765 2766 MemOps.push_back(Store); 2767 } 2768 // Whether we copied from a register or not, advance the offset 2769 // into the parameter save area by a full doubleword. 2770 ArgOffset += PtrByteSize; 2771 continue; 2772 } 2773 2774 // The value of the object is its address, which is the address of 2775 // its first stack doubleword. 2776 InVals.push_back(FIN); 2777 2778 // Store whatever pieces of the object are in registers to memory. 2779 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2780 if (GPR_idx == Num_GPR_Regs) 2781 break; 2782 2783 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2784 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2785 SDValue Addr = FIN; 2786 if (j) { 2787 SDValue Off = DAG.getConstant(j, PtrVT); 2788 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 2789 } 2790 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 2791 MachinePointerInfo(FuncArg, j), 2792 false, false, 0); 2793 MemOps.push_back(Store); 2794 ++GPR_idx; 2795 } 2796 ArgOffset += ArgSize; 2797 continue; 2798 } 2799 2800 switch (ObjectVT.getSimpleVT().SimpleTy) { 2801 default: llvm_unreachable("Unhandled argument type!"); 2802 case MVT::i1: 2803 case MVT::i32: 2804 case MVT::i64: 2805 // These can be scalar arguments or elements of an integer array type 2806 // passed directly. Clang may use those instead of "byval" aggregate 2807 // types to avoid forcing arguments to memory unnecessarily. 2808 if (GPR_idx != Num_GPR_Regs) { 2809 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2810 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2811 2812 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 2813 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2814 // value to MVT::i64 and then truncate to the correct register size. 2815 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2816 } else { 2817 needsLoad = true; 2818 ArgSize = PtrByteSize; 2819 } 2820 ArgOffset += 8; 2821 break; 2822 2823 case MVT::f32: 2824 case MVT::f64: 2825 // These can be scalar arguments or elements of a float array type 2826 // passed directly. The latter are used to implement ELFv2 homogenous 2827 // float aggregates. 2828 if (FPR_idx != Num_FPR_Regs) { 2829 unsigned VReg; 2830 2831 if (ObjectVT == MVT::f32) 2832 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2833 else 2834 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() ? 2835 &PPC::VSFRCRegClass : 2836 &PPC::F8RCRegClass); 2837 2838 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2839 ++FPR_idx; 2840 } else if (GPR_idx != Num_GPR_Regs) { 2841 // This can only ever happen in the presence of f32 array types, 2842 // since otherwise we never run out of FPRs before running out 2843 // of GPRs. 2844 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2845 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2846 2847 if (ObjectVT == MVT::f32) { 2848 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 2849 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 2850 DAG.getConstant(32, MVT::i32)); 2851 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 2852 } 2853 2854 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 2855 } else { 2856 needsLoad = true; 2857 } 2858 2859 // When passing an array of floats, the array occupies consecutive 2860 // space in the argument area; only round up to the next doubleword 2861 // at the end of the array. Otherwise, each float takes 8 bytes. 2862 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 2863 ArgOffset += ArgSize; 2864 if (Flags.isInConsecutiveRegsLast()) 2865 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2866 break; 2867 case MVT::v4f32: 2868 case MVT::v4i32: 2869 case MVT::v8i16: 2870 case MVT::v16i8: 2871 case MVT::v2f64: 2872 case MVT::v2i64: 2873 // These can be scalar arguments or elements of a vector array type 2874 // passed directly. The latter are used to implement ELFv2 homogenous 2875 // vector aggregates. 2876 if (VR_idx != Num_VR_Regs) { 2877 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 2878 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 2879 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2880 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2881 ++VR_idx; 2882 } else { 2883 needsLoad = true; 2884 } 2885 ArgOffset += 16; 2886 break; 2887 } 2888 2889 // We need to load the argument to a virtual register if we determined 2890 // above that we ran out of physical registers of the appropriate type. 2891 if (needsLoad) { 2892 if (ObjSize < ArgSize && !isLittleEndian) 2893 CurArgOffset += ArgSize - ObjSize; 2894 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 2895 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2896 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2897 false, false, false, 0); 2898 } 2899 2900 InVals.push_back(ArgVal); 2901 } 2902 2903 // Area that is at least reserved in the caller of this function. 2904 unsigned MinReservedArea; 2905 if (HasParameterArea) 2906 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 2907 else 2908 MinReservedArea = LinkageSize; 2909 2910 // Set the size that is at least reserved in caller of this function. Tail 2911 // call optimized functions' reserved stack space needs to be aligned so that 2912 // taking the difference between two stack areas will result in an aligned 2913 // stack. 2914 MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea); 2915 FuncInfo->setMinReservedArea(MinReservedArea); 2916 2917 // If the function takes variable number of arguments, make a frame index for 2918 // the start of the first vararg value... for expansion of llvm.va_start. 2919 if (isVarArg) { 2920 int Depth = ArgOffset; 2921 2922 FuncInfo->setVarArgsFrameIndex( 2923 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 2924 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2925 2926 // If this function is vararg, store any remaining integer argument regs 2927 // to their spots on the stack so that they may be loaded by deferencing the 2928 // result of va_next. 2929 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 2930 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 2931 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2932 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2933 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2934 MachinePointerInfo(), false, false, 0); 2935 MemOps.push_back(Store); 2936 // Increment the address by four for the next argument to store 2937 SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT); 2938 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2939 } 2940 } 2941 2942 if (!MemOps.empty()) 2943 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 2944 2945 return Chain; 2946 } 2947 2948 SDValue 2949 PPCTargetLowering::LowerFormalArguments_Darwin( 2950 SDValue Chain, 2951 CallingConv::ID CallConv, bool isVarArg, 2952 const SmallVectorImpl<ISD::InputArg> 2953 &Ins, 2954 SDLoc dl, SelectionDAG &DAG, 2955 SmallVectorImpl<SDValue> &InVals) const { 2956 // TODO: add description of PPC stack frame format, or at least some docs. 2957 // 2958 MachineFunction &MF = DAG.getMachineFunction(); 2959 MachineFrameInfo *MFI = MF.getFrameInfo(); 2960 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2961 2962 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2963 bool isPPC64 = PtrVT == MVT::i64; 2964 // Potential tail calls could cause overwriting of argument stack slots. 2965 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2966 (CallConv == CallingConv::Fast)); 2967 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2968 2969 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true, 2970 false); 2971 unsigned ArgOffset = LinkageSize; 2972 // Area that is at least reserved in caller of this function. 2973 unsigned MinReservedArea = ArgOffset; 2974 2975 static const MCPhysReg GPR_32[] = { // 32-bit registers. 2976 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2977 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2978 }; 2979 static const MCPhysReg GPR_64[] = { // 64-bit registers. 2980 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2981 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2982 }; 2983 2984 static const MCPhysReg *FPR = GetFPR(); 2985 2986 static const MCPhysReg VR[] = { 2987 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2988 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2989 }; 2990 2991 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 2992 const unsigned Num_FPR_Regs = 13; 2993 const unsigned Num_VR_Regs = array_lengthof( VR); 2994 2995 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2996 2997 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 2998 2999 // In 32-bit non-varargs functions, the stack space for vectors is after the 3000 // stack space for non-vectors. We do not use this space unless we have 3001 // too many vectors to fit in registers, something that only occurs in 3002 // constructed examples:), but we have to walk the arglist to figure 3003 // that out...for the pathological case, compute VecArgOffset as the 3004 // start of the vector parameter area. Computing VecArgOffset is the 3005 // entire point of the following loop. 3006 unsigned VecArgOffset = ArgOffset; 3007 if (!isVarArg && !isPPC64) { 3008 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3009 ++ArgNo) { 3010 EVT ObjectVT = Ins[ArgNo].VT; 3011 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3012 3013 if (Flags.isByVal()) { 3014 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3015 unsigned ObjSize = Flags.getByValSize(); 3016 unsigned ArgSize = 3017 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3018 VecArgOffset += ArgSize; 3019 continue; 3020 } 3021 3022 switch(ObjectVT.getSimpleVT().SimpleTy) { 3023 default: llvm_unreachable("Unhandled argument type!"); 3024 case MVT::i1: 3025 case MVT::i32: 3026 case MVT::f32: 3027 VecArgOffset += 4; 3028 break; 3029 case MVT::i64: // PPC64 3030 case MVT::f64: 3031 // FIXME: We are guaranteed to be !isPPC64 at this point. 3032 // Does MVT::i64 apply? 3033 VecArgOffset += 8; 3034 break; 3035 case MVT::v4f32: 3036 case MVT::v4i32: 3037 case MVT::v8i16: 3038 case MVT::v16i8: 3039 // Nothing to do, we're only looking at Nonvector args here. 3040 break; 3041 } 3042 } 3043 } 3044 // We've found where the vector parameter area in memory is. Skip the 3045 // first 12 parameters; these don't use that memory. 3046 VecArgOffset = ((VecArgOffset+15)/16)*16; 3047 VecArgOffset += 12*16; 3048 3049 // Add DAG nodes to load the arguments or copy them out of registers. On 3050 // entry to a function on PPC, the arguments start after the linkage area, 3051 // although the first ones are often in registers. 3052 3053 SmallVector<SDValue, 8> MemOps; 3054 unsigned nAltivecParamsAtEnd = 0; 3055 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3056 unsigned CurArgIdx = 0; 3057 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3058 SDValue ArgVal; 3059 bool needsLoad = false; 3060 EVT ObjectVT = Ins[ArgNo].VT; 3061 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3062 unsigned ArgSize = ObjSize; 3063 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3064 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 3065 CurArgIdx = Ins[ArgNo].OrigArgIndex; 3066 3067 unsigned CurArgOffset = ArgOffset; 3068 3069 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3070 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3071 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3072 if (isVarArg || isPPC64) { 3073 MinReservedArea = ((MinReservedArea+15)/16)*16; 3074 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3075 Flags, 3076 PtrByteSize); 3077 } else nAltivecParamsAtEnd++; 3078 } else 3079 // Calculate min reserved area. 3080 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3081 Flags, 3082 PtrByteSize); 3083 3084 // FIXME the codegen can be much improved in some cases. 3085 // We do not have to keep everything in memory. 3086 if (Flags.isByVal()) { 3087 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3088 ObjSize = Flags.getByValSize(); 3089 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3090 // Objects of size 1 and 2 are right justified, everything else is 3091 // left justified. This means the memory address is adjusted forwards. 3092 if (ObjSize==1 || ObjSize==2) { 3093 CurArgOffset = CurArgOffset + (4 - ObjSize); 3094 } 3095 // The value of the object is its address. 3096 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true); 3097 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3098 InVals.push_back(FIN); 3099 if (ObjSize==1 || ObjSize==2) { 3100 if (GPR_idx != Num_GPR_Regs) { 3101 unsigned VReg; 3102 if (isPPC64) 3103 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3104 else 3105 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3106 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3107 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3108 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3109 MachinePointerInfo(FuncArg), 3110 ObjType, false, false, 0); 3111 MemOps.push_back(Store); 3112 ++GPR_idx; 3113 } 3114 3115 ArgOffset += PtrByteSize; 3116 3117 continue; 3118 } 3119 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3120 // Store whatever pieces of the object are in registers 3121 // to memory. ArgOffset will be the address of the beginning 3122 // of the object. 3123 if (GPR_idx != Num_GPR_Regs) { 3124 unsigned VReg; 3125 if (isPPC64) 3126 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3127 else 3128 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3129 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3130 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3131 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3132 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3133 MachinePointerInfo(FuncArg, j), 3134 false, false, 0); 3135 MemOps.push_back(Store); 3136 ++GPR_idx; 3137 ArgOffset += PtrByteSize; 3138 } else { 3139 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3140 break; 3141 } 3142 } 3143 continue; 3144 } 3145 3146 switch (ObjectVT.getSimpleVT().SimpleTy) { 3147 default: llvm_unreachable("Unhandled argument type!"); 3148 case MVT::i1: 3149 case MVT::i32: 3150 if (!isPPC64) { 3151 if (GPR_idx != Num_GPR_Regs) { 3152 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3153 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3154 3155 if (ObjectVT == MVT::i1) 3156 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3157 3158 ++GPR_idx; 3159 } else { 3160 needsLoad = true; 3161 ArgSize = PtrByteSize; 3162 } 3163 // All int arguments reserve stack space in the Darwin ABI. 3164 ArgOffset += PtrByteSize; 3165 break; 3166 } 3167 // FALLTHROUGH 3168 case MVT::i64: // PPC64 3169 if (GPR_idx != Num_GPR_Regs) { 3170 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3171 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3172 3173 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3174 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3175 // value to MVT::i64 and then truncate to the correct register size. 3176 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3177 3178 ++GPR_idx; 3179 } else { 3180 needsLoad = true; 3181 ArgSize = PtrByteSize; 3182 } 3183 // All int arguments reserve stack space in the Darwin ABI. 3184 ArgOffset += 8; 3185 break; 3186 3187 case MVT::f32: 3188 case MVT::f64: 3189 // Every 4 bytes of argument space consumes one of the GPRs available for 3190 // argument passing. 3191 if (GPR_idx != Num_GPR_Regs) { 3192 ++GPR_idx; 3193 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3194 ++GPR_idx; 3195 } 3196 if (FPR_idx != Num_FPR_Regs) { 3197 unsigned VReg; 3198 3199 if (ObjectVT == MVT::f32) 3200 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3201 else 3202 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3203 3204 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3205 ++FPR_idx; 3206 } else { 3207 needsLoad = true; 3208 } 3209 3210 // All FP arguments reserve stack space in the Darwin ABI. 3211 ArgOffset += isPPC64 ? 8 : ObjSize; 3212 break; 3213 case MVT::v4f32: 3214 case MVT::v4i32: 3215 case MVT::v8i16: 3216 case MVT::v16i8: 3217 // Note that vector arguments in registers don't reserve stack space, 3218 // except in varargs functions. 3219 if (VR_idx != Num_VR_Regs) { 3220 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3221 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3222 if (isVarArg) { 3223 while ((ArgOffset % 16) != 0) { 3224 ArgOffset += PtrByteSize; 3225 if (GPR_idx != Num_GPR_Regs) 3226 GPR_idx++; 3227 } 3228 ArgOffset += 16; 3229 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3230 } 3231 ++VR_idx; 3232 } else { 3233 if (!isVarArg && !isPPC64) { 3234 // Vectors go after all the nonvectors. 3235 CurArgOffset = VecArgOffset; 3236 VecArgOffset += 16; 3237 } else { 3238 // Vectors are aligned. 3239 ArgOffset = ((ArgOffset+15)/16)*16; 3240 CurArgOffset = ArgOffset; 3241 ArgOffset += 16; 3242 } 3243 needsLoad = true; 3244 } 3245 break; 3246 } 3247 3248 // We need to load the argument to a virtual register if we determined above 3249 // that we ran out of physical registers of the appropriate type. 3250 if (needsLoad) { 3251 int FI = MFI->CreateFixedObject(ObjSize, 3252 CurArgOffset + (ArgSize - ObjSize), 3253 isImmutable); 3254 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3255 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3256 false, false, false, 0); 3257 } 3258 3259 InVals.push_back(ArgVal); 3260 } 3261 3262 // Allow for Altivec parameters at the end, if needed. 3263 if (nAltivecParamsAtEnd) { 3264 MinReservedArea = ((MinReservedArea+15)/16)*16; 3265 MinReservedArea += 16*nAltivecParamsAtEnd; 3266 } 3267 3268 // Area that is at least reserved in the caller of this function. 3269 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3270 3271 // Set the size that is at least reserved in caller of this function. Tail 3272 // call optimized functions' reserved stack space needs to be aligned so that 3273 // taking the difference between two stack areas will result in an aligned 3274 // stack. 3275 MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea); 3276 FuncInfo->setMinReservedArea(MinReservedArea); 3277 3278 // If the function takes variable number of arguments, make a frame index for 3279 // the start of the first vararg value... for expansion of llvm.va_start. 3280 if (isVarArg) { 3281 int Depth = ArgOffset; 3282 3283 FuncInfo->setVarArgsFrameIndex( 3284 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3285 Depth, true)); 3286 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3287 3288 // If this function is vararg, store any remaining integer argument regs 3289 // to their spots on the stack so that they may be loaded by deferencing the 3290 // result of va_next. 3291 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3292 unsigned VReg; 3293 3294 if (isPPC64) 3295 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3296 else 3297 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3298 3299 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3300 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3301 MachinePointerInfo(), false, false, 0); 3302 MemOps.push_back(Store); 3303 // Increment the address by four for the next argument to store 3304 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 3305 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3306 } 3307 } 3308 3309 if (!MemOps.empty()) 3310 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3311 3312 return Chain; 3313 } 3314 3315 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3316 /// adjusted to accommodate the arguments for the tailcall. 3317 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3318 unsigned ParamSize) { 3319 3320 if (!isTailCall) return 0; 3321 3322 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3323 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3324 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3325 // Remember only if the new adjustement is bigger. 3326 if (SPDiff < FI->getTailCallSPDelta()) 3327 FI->setTailCallSPDelta(SPDiff); 3328 3329 return SPDiff; 3330 } 3331 3332 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 3333 /// for tail call optimization. Targets which want to do tail call 3334 /// optimization should implement this function. 3335 bool 3336 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 3337 CallingConv::ID CalleeCC, 3338 bool isVarArg, 3339 const SmallVectorImpl<ISD::InputArg> &Ins, 3340 SelectionDAG& DAG) const { 3341 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 3342 return false; 3343 3344 // Variable argument functions are not supported. 3345 if (isVarArg) 3346 return false; 3347 3348 MachineFunction &MF = DAG.getMachineFunction(); 3349 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3350 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 3351 // Functions containing by val parameters are not supported. 3352 for (unsigned i = 0; i != Ins.size(); i++) { 3353 ISD::ArgFlagsTy Flags = Ins[i].Flags; 3354 if (Flags.isByVal()) return false; 3355 } 3356 3357 // Non-PIC/GOT tail calls are supported. 3358 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 3359 return true; 3360 3361 // At the moment we can only do local tail calls (in same module, hidden 3362 // or protected) if we are generating PIC. 3363 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3364 return G->getGlobal()->hasHiddenVisibility() 3365 || G->getGlobal()->hasProtectedVisibility(); 3366 } 3367 3368 return false; 3369 } 3370 3371 /// isCallCompatibleAddress - Return the immediate to use if the specified 3372 /// 32-bit value is representable in the immediate field of a BxA instruction. 3373 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 3374 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 3375 if (!C) return nullptr; 3376 3377 int Addr = C->getZExtValue(); 3378 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 3379 SignExtend32<26>(Addr) != Addr) 3380 return nullptr; // Top 6 bits have to be sext of immediate. 3381 3382 return DAG.getConstant((int)C->getZExtValue() >> 2, 3383 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 3384 } 3385 3386 namespace { 3387 3388 struct TailCallArgumentInfo { 3389 SDValue Arg; 3390 SDValue FrameIdxOp; 3391 int FrameIdx; 3392 3393 TailCallArgumentInfo() : FrameIdx(0) {} 3394 }; 3395 3396 } 3397 3398 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 3399 static void 3400 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 3401 SDValue Chain, 3402 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 3403 SmallVectorImpl<SDValue> &MemOpChains, 3404 SDLoc dl) { 3405 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 3406 SDValue Arg = TailCallArgs[i].Arg; 3407 SDValue FIN = TailCallArgs[i].FrameIdxOp; 3408 int FI = TailCallArgs[i].FrameIdx; 3409 // Store relative to framepointer. 3410 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 3411 MachinePointerInfo::getFixedStack(FI), 3412 false, false, 0)); 3413 } 3414 } 3415 3416 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 3417 /// the appropriate stack slot for the tail call optimized function call. 3418 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 3419 MachineFunction &MF, 3420 SDValue Chain, 3421 SDValue OldRetAddr, 3422 SDValue OldFP, 3423 int SPDiff, 3424 bool isPPC64, 3425 bool isDarwinABI, 3426 SDLoc dl) { 3427 if (SPDiff) { 3428 // Calculate the new stack slot for the return address. 3429 int SlotSize = isPPC64 ? 8 : 4; 3430 int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64, 3431 isDarwinABI); 3432 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3433 NewRetAddrLoc, true); 3434 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3435 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 3436 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 3437 MachinePointerInfo::getFixedStack(NewRetAddr), 3438 false, false, 0); 3439 3440 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 3441 // slot as the FP is never overwritten. 3442 if (isDarwinABI) { 3443 int NewFPLoc = 3444 SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI); 3445 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3446 true); 3447 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3448 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 3449 MachinePointerInfo::getFixedStack(NewFPIdx), 3450 false, false, 0); 3451 } 3452 } 3453 return Chain; 3454 } 3455 3456 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3457 /// the position of the argument. 3458 static void 3459 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3460 SDValue Arg, int SPDiff, unsigned ArgOffset, 3461 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3462 int Offset = ArgOffset + SPDiff; 3463 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3464 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3465 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3466 SDValue FIN = DAG.getFrameIndex(FI, VT); 3467 TailCallArgumentInfo Info; 3468 Info.Arg = Arg; 3469 Info.FrameIdxOp = FIN; 3470 Info.FrameIdx = FI; 3471 TailCallArguments.push_back(Info); 3472 } 3473 3474 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3475 /// stack slot. Returns the chain as result and the loaded frame pointers in 3476 /// LROpOut/FPOpout. Used when tail calling. 3477 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3478 int SPDiff, 3479 SDValue Chain, 3480 SDValue &LROpOut, 3481 SDValue &FPOpOut, 3482 bool isDarwinABI, 3483 SDLoc dl) const { 3484 if (SPDiff) { 3485 // Load the LR and FP stack slot for later adjusting. 3486 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 3487 LROpOut = getReturnAddrFrameIndex(DAG); 3488 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3489 false, false, false, 0); 3490 Chain = SDValue(LROpOut.getNode(), 1); 3491 3492 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3493 // slot as the FP is never overwritten. 3494 if (isDarwinABI) { 3495 FPOpOut = getFramePointerFrameIndex(DAG); 3496 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 3497 false, false, false, 0); 3498 Chain = SDValue(FPOpOut.getNode(), 1); 3499 } 3500 } 3501 return Chain; 3502 } 3503 3504 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 3505 /// by "Src" to address "Dst" of size "Size". Alignment information is 3506 /// specified by the specific parameter attribute. The copy will be passed as 3507 /// a byval function parameter. 3508 /// Sometimes what we are copying is the end of a larger object, the part that 3509 /// does not fit in registers. 3510 static SDValue 3511 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 3512 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 3513 SDLoc dl) { 3514 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 3515 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 3516 false, false, MachinePointerInfo(), 3517 MachinePointerInfo()); 3518 } 3519 3520 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 3521 /// tail calls. 3522 static void 3523 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 3524 SDValue Arg, SDValue PtrOff, int SPDiff, 3525 unsigned ArgOffset, bool isPPC64, bool isTailCall, 3526 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 3527 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 3528 SDLoc dl) { 3529 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3530 if (!isTailCall) { 3531 if (isVector) { 3532 SDValue StackPtr; 3533 if (isPPC64) 3534 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3535 else 3536 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3537 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3538 DAG.getConstant(ArgOffset, PtrVT)); 3539 } 3540 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3541 MachinePointerInfo(), false, false, 0)); 3542 // Calculate and remember argument location. 3543 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 3544 TailCallArguments); 3545 } 3546 3547 static 3548 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 3549 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 3550 SDValue LROp, SDValue FPOp, bool isDarwinABI, 3551 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 3552 MachineFunction &MF = DAG.getMachineFunction(); 3553 3554 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 3555 // might overwrite each other in case of tail call optimization. 3556 SmallVector<SDValue, 8> MemOpChains2; 3557 // Do not flag preceding copytoreg stuff together with the following stuff. 3558 InFlag = SDValue(); 3559 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 3560 MemOpChains2, dl); 3561 if (!MemOpChains2.empty()) 3562 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 3563 3564 // Store the return address to the appropriate stack slot. 3565 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 3566 isPPC64, isDarwinABI, dl); 3567 3568 // Emit callseq_end just before tailcall node. 3569 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3570 DAG.getIntPtrConstant(0, true), InFlag, dl); 3571 InFlag = Chain.getValue(1); 3572 } 3573 3574 static 3575 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 3576 SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall, 3577 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 3578 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 3579 const PPCSubtarget &Subtarget) { 3580 3581 bool isPPC64 = Subtarget.isPPC64(); 3582 bool isSVR4ABI = Subtarget.isSVR4ABI(); 3583 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3584 3585 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3586 NodeTys.push_back(MVT::Other); // Returns a chain 3587 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 3588 3589 unsigned CallOpc = PPCISD::CALL; 3590 3591 bool needIndirectCall = true; 3592 if (!isSVR4ABI || !isPPC64) 3593 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 3594 // If this is an absolute destination address, use the munged value. 3595 Callee = SDValue(Dest, 0); 3596 needIndirectCall = false; 3597 } 3598 3599 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3600 unsigned OpFlags = 0; 3601 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 3602 (Subtarget.getTargetTriple().isMacOSX() && 3603 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 3604 (G->getGlobal()->isDeclaration() || 3605 G->getGlobal()->isWeakForLinker())) || 3606 (Subtarget.isTargetELF() && !isPPC64 && 3607 !G->getGlobal()->hasLocalLinkage() && 3608 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 3609 // PC-relative references to external symbols should go through $stub, 3610 // unless we're building with the leopard linker or later, which 3611 // automatically synthesizes these stubs. 3612 OpFlags = PPCII::MO_PLT_OR_STUB; 3613 } 3614 3615 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 3616 // every direct call is) turn it into a TargetGlobalAddress / 3617 // TargetExternalSymbol node so that legalize doesn't hack it. 3618 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 3619 Callee.getValueType(), 0, OpFlags); 3620 needIndirectCall = false; 3621 } 3622 3623 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 3624 unsigned char OpFlags = 0; 3625 3626 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 3627 (Subtarget.getTargetTriple().isMacOSX() && 3628 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) || 3629 (Subtarget.isTargetELF() && !isPPC64 && 3630 DAG.getTarget().getRelocationModel() == Reloc::PIC_) ) { 3631 // PC-relative references to external symbols should go through $stub, 3632 // unless we're building with the leopard linker or later, which 3633 // automatically synthesizes these stubs. 3634 OpFlags = PPCII::MO_PLT_OR_STUB; 3635 } 3636 3637 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 3638 OpFlags); 3639 needIndirectCall = false; 3640 } 3641 3642 if (needIndirectCall) { 3643 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 3644 // to do the call, we can't use PPCISD::CALL. 3645 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 3646 3647 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 3648 // Function pointers in the 64-bit SVR4 ABI do not point to the function 3649 // entry point, but to the function descriptor (the function entry point 3650 // address is part of the function descriptor though). 3651 // The function descriptor is a three doubleword structure with the 3652 // following fields: function entry point, TOC base address and 3653 // environment pointer. 3654 // Thus for a call through a function pointer, the following actions need 3655 // to be performed: 3656 // 1. Save the TOC of the caller in the TOC save area of its stack 3657 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 3658 // 2. Load the address of the function entry point from the function 3659 // descriptor. 3660 // 3. Load the TOC of the callee from the function descriptor into r2. 3661 // 4. Load the environment pointer from the function descriptor into 3662 // r11. 3663 // 5. Branch to the function entry point address. 3664 // 6. On return of the callee, the TOC of the caller needs to be 3665 // restored (this is done in FinishCall()). 3666 // 3667 // All those operations are flagged together to ensure that no other 3668 // operations can be scheduled in between. E.g. without flagging the 3669 // operations together, a TOC access in the caller could be scheduled 3670 // between the load of the callee TOC and the branch to the callee, which 3671 // results in the TOC access going through the TOC of the callee instead 3672 // of going through the TOC of the caller, which leads to incorrect code. 3673 3674 // Load the address of the function entry point from the function 3675 // descriptor. 3676 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue); 3677 SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, 3678 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 3679 Chain = LoadFuncPtr.getValue(1); 3680 InFlag = LoadFuncPtr.getValue(2); 3681 3682 // Load environment pointer into r11. 3683 // Offset of the environment pointer within the function descriptor. 3684 SDValue PtrOff = DAG.getIntPtrConstant(16); 3685 3686 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 3687 SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, 3688 InFlag); 3689 Chain = LoadEnvPtr.getValue(1); 3690 InFlag = LoadEnvPtr.getValue(2); 3691 3692 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 3693 InFlag); 3694 Chain = EnvVal.getValue(0); 3695 InFlag = EnvVal.getValue(1); 3696 3697 // Load TOC of the callee into r2. We are using a target-specific load 3698 // with r2 hard coded, because the result of a target-independent load 3699 // would never go directly into r2, since r2 is a reserved register (which 3700 // prevents the register allocator from allocating it), resulting in an 3701 // additional register being allocated and an unnecessary move instruction 3702 // being generated. 3703 VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3704 SDValue TOCOff = DAG.getIntPtrConstant(8); 3705 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 3706 SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, 3707 AddTOC, InFlag); 3708 Chain = LoadTOCPtr.getValue(0); 3709 InFlag = LoadTOCPtr.getValue(1); 3710 3711 MTCTROps[0] = Chain; 3712 MTCTROps[1] = LoadFuncPtr; 3713 MTCTROps[2] = InFlag; 3714 } 3715 3716 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 3717 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 3718 InFlag = Chain.getValue(1); 3719 3720 NodeTys.clear(); 3721 NodeTys.push_back(MVT::Other); 3722 NodeTys.push_back(MVT::Glue); 3723 Ops.push_back(Chain); 3724 CallOpc = PPCISD::BCTRL; 3725 Callee.setNode(nullptr); 3726 // Add use of X11 (holding environment pointer) 3727 if (isSVR4ABI && isPPC64 && !isELFv2ABI) 3728 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 3729 // Add CTR register as callee so a bctr can be emitted later. 3730 if (isTailCall) 3731 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 3732 } 3733 3734 // If this is a direct call, pass the chain and the callee. 3735 if (Callee.getNode()) { 3736 Ops.push_back(Chain); 3737 Ops.push_back(Callee); 3738 3739 // If this is a call to __tls_get_addr, find the symbol whose address 3740 // is to be taken and add it to the list. This will be used to 3741 // generate __tls_get_addr(<sym>@tlsgd) or __tls_get_addr(<sym>@tlsld). 3742 // We find the symbol by walking the chain to the CopyFromReg, walking 3743 // back from the CopyFromReg to the ADDI_TLSGD_L or ADDI_TLSLD_L, and 3744 // pulling the symbol from that node. 3745 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 3746 if (!strcmp(S->getSymbol(), "__tls_get_addr")) { 3747 assert(!needIndirectCall && "Indirect call to __tls_get_addr???"); 3748 SDNode *AddI = Chain.getNode()->getOperand(2).getNode(); 3749 SDValue TGTAddr = AddI->getOperand(1); 3750 assert(TGTAddr.getNode()->getOpcode() == ISD::TargetGlobalTLSAddress && 3751 "Didn't find target global TLS address where we expected one"); 3752 Ops.push_back(TGTAddr); 3753 CallOpc = PPCISD::CALL_TLS; 3754 } 3755 } 3756 // If this is a tail call add stack pointer delta. 3757 if (isTailCall) 3758 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 3759 3760 // Add argument registers to the end of the list so that they are known live 3761 // into the call. 3762 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 3763 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 3764 RegsToPass[i].second.getValueType())); 3765 3766 // Direct calls in the ELFv2 ABI need the TOC register live into the call. 3767 if (Callee.getNode() && isELFv2ABI) 3768 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 3769 3770 return CallOpc; 3771 } 3772 3773 static 3774 bool isLocalCall(const SDValue &Callee) 3775 { 3776 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3777 return !G->getGlobal()->isDeclaration() && 3778 !G->getGlobal()->isWeakForLinker(); 3779 return false; 3780 } 3781 3782 SDValue 3783 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 3784 CallingConv::ID CallConv, bool isVarArg, 3785 const SmallVectorImpl<ISD::InputArg> &Ins, 3786 SDLoc dl, SelectionDAG &DAG, 3787 SmallVectorImpl<SDValue> &InVals) const { 3788 3789 SmallVector<CCValAssign, 16> RVLocs; 3790 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 3791 *DAG.getContext()); 3792 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 3793 3794 // Copy all of the result registers out of their specified physreg. 3795 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 3796 CCValAssign &VA = RVLocs[i]; 3797 assert(VA.isRegLoc() && "Can only return in registers!"); 3798 3799 SDValue Val = DAG.getCopyFromReg(Chain, dl, 3800 VA.getLocReg(), VA.getLocVT(), InFlag); 3801 Chain = Val.getValue(1); 3802 InFlag = Val.getValue(2); 3803 3804 switch (VA.getLocInfo()) { 3805 default: llvm_unreachable("Unknown loc info!"); 3806 case CCValAssign::Full: break; 3807 case CCValAssign::AExt: 3808 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3809 break; 3810 case CCValAssign::ZExt: 3811 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 3812 DAG.getValueType(VA.getValVT())); 3813 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3814 break; 3815 case CCValAssign::SExt: 3816 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 3817 DAG.getValueType(VA.getValVT())); 3818 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3819 break; 3820 } 3821 3822 InVals.push_back(Val); 3823 } 3824 3825 return Chain; 3826 } 3827 3828 SDValue 3829 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 3830 bool isTailCall, bool isVarArg, 3831 SelectionDAG &DAG, 3832 SmallVector<std::pair<unsigned, SDValue>, 8> 3833 &RegsToPass, 3834 SDValue InFlag, SDValue Chain, 3835 SDValue &Callee, 3836 int SPDiff, unsigned NumBytes, 3837 const SmallVectorImpl<ISD::InputArg> &Ins, 3838 SmallVectorImpl<SDValue> &InVals) const { 3839 3840 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3841 std::vector<EVT> NodeTys; 3842 SmallVector<SDValue, 8> Ops; 3843 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 3844 isTailCall, RegsToPass, Ops, NodeTys, 3845 Subtarget); 3846 3847 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 3848 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 3849 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 3850 3851 // When performing tail call optimization the callee pops its arguments off 3852 // the stack. Account for this here so these bytes can be pushed back on in 3853 // PPCFrameLowering::eliminateCallFramePseudoInstr. 3854 int BytesCalleePops = 3855 (CallConv == CallingConv::Fast && 3856 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 3857 3858 // Add a register mask operand representing the call-preserved registers. 3859 const TargetRegisterInfo *TRI = 3860 getTargetMachine().getSubtargetImpl()->getRegisterInfo(); 3861 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 3862 assert(Mask && "Missing call preserved mask for calling convention"); 3863 Ops.push_back(DAG.getRegisterMask(Mask)); 3864 3865 if (InFlag.getNode()) 3866 Ops.push_back(InFlag); 3867 3868 // Emit tail call. 3869 if (isTailCall) { 3870 assert(((Callee.getOpcode() == ISD::Register && 3871 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 3872 Callee.getOpcode() == ISD::TargetExternalSymbol || 3873 Callee.getOpcode() == ISD::TargetGlobalAddress || 3874 isa<ConstantSDNode>(Callee)) && 3875 "Expecting an global address, external symbol, absolute value or register"); 3876 3877 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 3878 } 3879 3880 // Add a NOP immediately after the branch instruction when using the 64-bit 3881 // SVR4 ABI. At link time, if caller and callee are in a different module and 3882 // thus have a different TOC, the call will be replaced with a call to a stub 3883 // function which saves the current TOC, loads the TOC of the callee and 3884 // branches to the callee. The NOP will be replaced with a load instruction 3885 // which restores the TOC of the caller from the TOC save slot of the current 3886 // stack frame. If caller and callee belong to the same module (and have the 3887 // same TOC), the NOP will remain unchanged. 3888 3889 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64()) { 3890 if (CallOpc == PPCISD::BCTRL) { 3891 // This is a call through a function pointer. 3892 // Restore the caller TOC from the save area into R2. 3893 // See PrepareCall() for more information about calls through function 3894 // pointers in the 64-bit SVR4 ABI. 3895 // We are using a target-specific load with r2 hard coded, because the 3896 // result of a target-independent load would never go directly into r2, 3897 // since r2 is a reserved register (which prevents the register allocator 3898 // from allocating it), resulting in an additional register being 3899 // allocated and an unnecessary move instruction being generated. 3900 CallOpc = PPCISD::BCTRL_LOAD_TOC; 3901 3902 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3903 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 3904 unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI); 3905 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset); 3906 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 3907 3908 // The address needs to go after the chain input but before the flag (or 3909 // any other variadic arguments). 3910 Ops.insert(std::next(Ops.begin()), AddTOC); 3911 } else if ((CallOpc == PPCISD::CALL) && 3912 (!isLocalCall(Callee) || 3913 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 3914 // Otherwise insert NOP for non-local calls. 3915 CallOpc = PPCISD::CALL_NOP; 3916 } else if (CallOpc == PPCISD::CALL_TLS) 3917 // For 64-bit SVR4, TLS calls are always non-local. 3918 CallOpc = PPCISD::CALL_NOP_TLS; 3919 } 3920 3921 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 3922 InFlag = Chain.getValue(1); 3923 3924 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3925 DAG.getIntPtrConstant(BytesCalleePops, true), 3926 InFlag, dl); 3927 if (!Ins.empty()) 3928 InFlag = Chain.getValue(1); 3929 3930 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 3931 Ins, dl, DAG, InVals); 3932 } 3933 3934 SDValue 3935 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 3936 SmallVectorImpl<SDValue> &InVals) const { 3937 SelectionDAG &DAG = CLI.DAG; 3938 SDLoc &dl = CLI.DL; 3939 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 3940 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 3941 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 3942 SDValue Chain = CLI.Chain; 3943 SDValue Callee = CLI.Callee; 3944 bool &isTailCall = CLI.IsTailCall; 3945 CallingConv::ID CallConv = CLI.CallConv; 3946 bool isVarArg = CLI.IsVarArg; 3947 3948 if (isTailCall) 3949 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 3950 Ins, DAG); 3951 3952 if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall()) 3953 report_fatal_error("failed to perform tail call elimination on a call " 3954 "site marked musttail"); 3955 3956 if (Subtarget.isSVR4ABI()) { 3957 if (Subtarget.isPPC64()) 3958 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 3959 isTailCall, Outs, OutVals, Ins, 3960 dl, DAG, InVals); 3961 else 3962 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 3963 isTailCall, Outs, OutVals, Ins, 3964 dl, DAG, InVals); 3965 } 3966 3967 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 3968 isTailCall, Outs, OutVals, Ins, 3969 dl, DAG, InVals); 3970 } 3971 3972 SDValue 3973 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 3974 CallingConv::ID CallConv, bool isVarArg, 3975 bool isTailCall, 3976 const SmallVectorImpl<ISD::OutputArg> &Outs, 3977 const SmallVectorImpl<SDValue> &OutVals, 3978 const SmallVectorImpl<ISD::InputArg> &Ins, 3979 SDLoc dl, SelectionDAG &DAG, 3980 SmallVectorImpl<SDValue> &InVals) const { 3981 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 3982 // of the 32-bit SVR4 ABI stack frame layout. 3983 3984 assert((CallConv == CallingConv::C || 3985 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 3986 3987 unsigned PtrByteSize = 4; 3988 3989 MachineFunction &MF = DAG.getMachineFunction(); 3990 3991 // Mark this function as potentially containing a function that contains a 3992 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3993 // and restoring the callers stack pointer in this functions epilog. This is 3994 // done because by tail calling the called function might overwrite the value 3995 // in this function's (MF) stack pointer stack slot 0(SP). 3996 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3997 CallConv == CallingConv::Fast) 3998 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3999 4000 // Count how many bytes are to be pushed on the stack, including the linkage 4001 // area, parameter list area and the part of the local variable space which 4002 // contains copies of aggregates which are passed by value. 4003 4004 // Assign locations to all of the outgoing arguments. 4005 SmallVector<CCValAssign, 16> ArgLocs; 4006 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4007 *DAG.getContext()); 4008 4009 // Reserve space for the linkage area on the stack. 4010 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false, false), 4011 PtrByteSize); 4012 4013 if (isVarArg) { 4014 // Handle fixed and variable vector arguments differently. 4015 // Fixed vector arguments go into registers as long as registers are 4016 // available. Variable vector arguments always go into memory. 4017 unsigned NumArgs = Outs.size(); 4018 4019 for (unsigned i = 0; i != NumArgs; ++i) { 4020 MVT ArgVT = Outs[i].VT; 4021 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4022 bool Result; 4023 4024 if (Outs[i].IsFixed) { 4025 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4026 CCInfo); 4027 } else { 4028 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4029 ArgFlags, CCInfo); 4030 } 4031 4032 if (Result) { 4033 #ifndef NDEBUG 4034 errs() << "Call operand #" << i << " has unhandled type " 4035 << EVT(ArgVT).getEVTString() << "\n"; 4036 #endif 4037 llvm_unreachable(nullptr); 4038 } 4039 } 4040 } else { 4041 // All arguments are treated the same. 4042 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4043 } 4044 4045 // Assign locations to all of the outgoing aggregate by value arguments. 4046 SmallVector<CCValAssign, 16> ByValArgLocs; 4047 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4048 ByValArgLocs, *DAG.getContext()); 4049 4050 // Reserve stack space for the allocations in CCInfo. 4051 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4052 4053 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4054 4055 // Size of the linkage area, parameter list area and the part of the local 4056 // space variable where copies of aggregates which are passed by value are 4057 // stored. 4058 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4059 4060 // Calculate by how many bytes the stack has to be adjusted in case of tail 4061 // call optimization. 4062 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4063 4064 // Adjust the stack pointer for the new arguments... 4065 // These operations are automatically eliminated by the prolog/epilog pass 4066 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 4067 dl); 4068 SDValue CallSeqStart = Chain; 4069 4070 // Load the return address and frame pointer so it can be moved somewhere else 4071 // later. 4072 SDValue LROp, FPOp; 4073 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 4074 dl); 4075 4076 // Set up a copy of the stack pointer for use loading and storing any 4077 // arguments that may not fit in the registers available for argument 4078 // passing. 4079 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4080 4081 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4082 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4083 SmallVector<SDValue, 8> MemOpChains; 4084 4085 bool seenFloatArg = false; 4086 // Walk the register/memloc assignments, inserting copies/loads. 4087 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4088 i != e; 4089 ++i) { 4090 CCValAssign &VA = ArgLocs[i]; 4091 SDValue Arg = OutVals[i]; 4092 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4093 4094 if (Flags.isByVal()) { 4095 // Argument is an aggregate which is passed by value, thus we need to 4096 // create a copy of it in the local variable space of the current stack 4097 // frame (which is the stack frame of the caller) and pass the address of 4098 // this copy to the callee. 4099 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4100 CCValAssign &ByValVA = ByValArgLocs[j++]; 4101 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4102 4103 // Memory reserved in the local variable space of the callers stack frame. 4104 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4105 4106 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 4107 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 4108 4109 // Create a copy of the argument in the local area of the current 4110 // stack frame. 4111 SDValue MemcpyCall = 4112 CreateCopyOfByValArgument(Arg, PtrOff, 4113 CallSeqStart.getNode()->getOperand(0), 4114 Flags, DAG, dl); 4115 4116 // This must go outside the CALLSEQ_START..END. 4117 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4118 CallSeqStart.getNode()->getOperand(1), 4119 SDLoc(MemcpyCall)); 4120 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4121 NewCallSeqStart.getNode()); 4122 Chain = CallSeqStart = NewCallSeqStart; 4123 4124 // Pass the address of the aggregate copy on the stack either in a 4125 // physical register or in the parameter list area of the current stack 4126 // frame to the callee. 4127 Arg = PtrOff; 4128 } 4129 4130 if (VA.isRegLoc()) { 4131 if (Arg.getValueType() == MVT::i1) 4132 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4133 4134 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4135 // Put argument in a physical register. 4136 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4137 } else { 4138 // Put argument in the parameter list area of the current stack frame. 4139 assert(VA.isMemLoc()); 4140 unsigned LocMemOffset = VA.getLocMemOffset(); 4141 4142 if (!isTailCall) { 4143 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 4144 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 4145 4146 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4147 MachinePointerInfo(), 4148 false, false, 0)); 4149 } else { 4150 // Calculate and remember argument location. 4151 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4152 TailCallArguments); 4153 } 4154 } 4155 } 4156 4157 if (!MemOpChains.empty()) 4158 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4159 4160 // Build a sequence of copy-to-reg nodes chained together with token chain 4161 // and flag operands which copy the outgoing args into the appropriate regs. 4162 SDValue InFlag; 4163 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4164 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4165 RegsToPass[i].second, InFlag); 4166 InFlag = Chain.getValue(1); 4167 } 4168 4169 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4170 // registers. 4171 if (isVarArg) { 4172 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4173 SDValue Ops[] = { Chain, InFlag }; 4174 4175 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4176 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4177 4178 InFlag = Chain.getValue(1); 4179 } 4180 4181 if (isTailCall) 4182 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 4183 false, TailCallArguments); 4184 4185 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4186 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4187 Ins, InVals); 4188 } 4189 4190 // Copy an argument into memory, being careful to do this outside the 4191 // call sequence for the call to which the argument belongs. 4192 SDValue 4193 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 4194 SDValue CallSeqStart, 4195 ISD::ArgFlagsTy Flags, 4196 SelectionDAG &DAG, 4197 SDLoc dl) const { 4198 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4199 CallSeqStart.getNode()->getOperand(0), 4200 Flags, DAG, dl); 4201 // The MEMCPY must go outside the CALLSEQ_START..END. 4202 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4203 CallSeqStart.getNode()->getOperand(1), 4204 SDLoc(MemcpyCall)); 4205 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4206 NewCallSeqStart.getNode()); 4207 return NewCallSeqStart; 4208 } 4209 4210 SDValue 4211 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 4212 CallingConv::ID CallConv, bool isVarArg, 4213 bool isTailCall, 4214 const SmallVectorImpl<ISD::OutputArg> &Outs, 4215 const SmallVectorImpl<SDValue> &OutVals, 4216 const SmallVectorImpl<ISD::InputArg> &Ins, 4217 SDLoc dl, SelectionDAG &DAG, 4218 SmallVectorImpl<SDValue> &InVals) const { 4219 4220 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4221 bool isLittleEndian = Subtarget.isLittleEndian(); 4222 unsigned NumOps = Outs.size(); 4223 4224 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4225 unsigned PtrByteSize = 8; 4226 4227 MachineFunction &MF = DAG.getMachineFunction(); 4228 4229 // Mark this function as potentially containing a function that contains a 4230 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4231 // and restoring the callers stack pointer in this functions epilog. This is 4232 // done because by tail calling the called function might overwrite the value 4233 // in this function's (MF) stack pointer stack slot 0(SP). 4234 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4235 CallConv == CallingConv::Fast) 4236 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4237 4238 // Count how many bytes are to be pushed on the stack, including the linkage 4239 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 4240 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 4241 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 4242 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false, 4243 isELFv2ABI); 4244 unsigned NumBytes = LinkageSize; 4245 4246 // Add up all the space actually used. 4247 for (unsigned i = 0; i != NumOps; ++i) { 4248 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4249 EVT ArgVT = Outs[i].VT; 4250 EVT OrigVT = Outs[i].ArgVT; 4251 4252 /* Respect alignment of argument on the stack. */ 4253 unsigned Align = 4254 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4255 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 4256 4257 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 4258 if (Flags.isInConsecutiveRegsLast()) 4259 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4260 } 4261 4262 unsigned NumBytesActuallyUsed = NumBytes; 4263 4264 // The prolog code of the callee may store up to 8 GPR argument registers to 4265 // the stack, allowing va_start to index over them in memory if its varargs. 4266 // Because we cannot tell if this is needed on the caller side, we have to 4267 // conservatively assume that it is needed. As such, make sure we have at 4268 // least enough stack space for the caller to store the 8 GPRs. 4269 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 4270 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 4271 4272 // Tail call needs the stack to be aligned. 4273 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4274 CallConv == CallingConv::Fast) 4275 NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes); 4276 4277 // Calculate by how many bytes the stack has to be adjusted in case of tail 4278 // call optimization. 4279 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4280 4281 // To protect arguments on the stack from being clobbered in a tail call, 4282 // force all the loads to happen before doing any other lowering. 4283 if (isTailCall) 4284 Chain = DAG.getStackArgumentTokenFactor(Chain); 4285 4286 // Adjust the stack pointer for the new arguments... 4287 // These operations are automatically eliminated by the prolog/epilog pass 4288 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 4289 dl); 4290 SDValue CallSeqStart = Chain; 4291 4292 // Load the return address and frame pointer so it can be move somewhere else 4293 // later. 4294 SDValue LROp, FPOp; 4295 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4296 dl); 4297 4298 // Set up a copy of the stack pointer for use loading and storing any 4299 // arguments that may not fit in the registers available for argument 4300 // passing. 4301 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4302 4303 // Figure out which arguments are going to go in registers, and which in 4304 // memory. Also, if this is a vararg function, floating point operations 4305 // must be stored to our stack, and loaded into integer regs as well, if 4306 // any integer regs are available for argument passing. 4307 unsigned ArgOffset = LinkageSize; 4308 unsigned GPR_idx, FPR_idx = 0, VR_idx = 0; 4309 4310 static const MCPhysReg GPR[] = { 4311 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4312 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4313 }; 4314 static const MCPhysReg *FPR = GetFPR(); 4315 4316 static const MCPhysReg VR[] = { 4317 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4318 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4319 }; 4320 static const MCPhysReg VSRH[] = { 4321 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 4322 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 4323 }; 4324 4325 const unsigned NumGPRs = array_lengthof(GPR); 4326 const unsigned NumFPRs = 13; 4327 const unsigned NumVRs = array_lengthof(VR); 4328 4329 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4330 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4331 4332 SmallVector<SDValue, 8> MemOpChains; 4333 for (unsigned i = 0; i != NumOps; ++i) { 4334 SDValue Arg = OutVals[i]; 4335 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4336 EVT ArgVT = Outs[i].VT; 4337 EVT OrigVT = Outs[i].ArgVT; 4338 4339 /* Respect alignment of argument on the stack. */ 4340 unsigned Align = 4341 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4342 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 4343 4344 /* Compute GPR index associated with argument offset. */ 4345 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4346 GPR_idx = std::min(GPR_idx, NumGPRs); 4347 4348 // PtrOff will be used to store the current argument to the stack if a 4349 // register cannot be found for it. 4350 SDValue PtrOff; 4351 4352 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 4353 4354 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4355 4356 // Promote integers to 64-bit values. 4357 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 4358 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4359 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4360 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4361 } 4362 4363 // FIXME memcpy is used way more than necessary. Correctness first. 4364 // Note: "by value" is code for passing a structure by value, not 4365 // basic types. 4366 if (Flags.isByVal()) { 4367 // Note: Size includes alignment padding, so 4368 // struct x { short a; char b; } 4369 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 4370 // These are the proper values we need for right-justifying the 4371 // aggregate in a parameter register. 4372 unsigned Size = Flags.getByValSize(); 4373 4374 // An empty aggregate parameter takes up no storage and no 4375 // registers. 4376 if (Size == 0) 4377 continue; 4378 4379 // All aggregates smaller than 8 bytes must be passed right-justified. 4380 if (Size==1 || Size==2 || Size==4) { 4381 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 4382 if (GPR_idx != NumGPRs) { 4383 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4384 MachinePointerInfo(), VT, 4385 false, false, false, 0); 4386 MemOpChains.push_back(Load.getValue(1)); 4387 RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Load)); 4388 4389 ArgOffset += PtrByteSize; 4390 continue; 4391 } 4392 } 4393 4394 if (GPR_idx == NumGPRs && Size < 8) { 4395 SDValue AddPtr = PtrOff; 4396 if (!isLittleEndian) { 4397 SDValue Const = DAG.getConstant(PtrByteSize - Size, 4398 PtrOff.getValueType()); 4399 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4400 } 4401 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4402 CallSeqStart, 4403 Flags, DAG, dl); 4404 ArgOffset += PtrByteSize; 4405 continue; 4406 } 4407 // Copy entire object into memory. There are cases where gcc-generated 4408 // code assumes it is there, even if it could be put entirely into 4409 // registers. (This is not what the doc says.) 4410 4411 // FIXME: The above statement is likely due to a misunderstanding of the 4412 // documents. All arguments must be copied into the parameter area BY 4413 // THE CALLEE in the event that the callee takes the address of any 4414 // formal argument. That has not yet been implemented. However, it is 4415 // reasonable to use the stack area as a staging area for the register 4416 // load. 4417 4418 // Skip this for small aggregates, as we will use the same slot for a 4419 // right-justified copy, below. 4420 if (Size >= 8) 4421 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4422 CallSeqStart, 4423 Flags, DAG, dl); 4424 4425 // When a register is available, pass a small aggregate right-justified. 4426 if (Size < 8 && GPR_idx != NumGPRs) { 4427 // The easiest way to get this right-justified in a register 4428 // is to copy the structure into the rightmost portion of a 4429 // local variable slot, then load the whole slot into the 4430 // register. 4431 // FIXME: The memcpy seems to produce pretty awful code for 4432 // small aggregates, particularly for packed ones. 4433 // FIXME: It would be preferable to use the slot in the 4434 // parameter save area instead of a new local variable. 4435 SDValue AddPtr = PtrOff; 4436 if (!isLittleEndian) { 4437 SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType()); 4438 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4439 } 4440 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4441 CallSeqStart, 4442 Flags, DAG, dl); 4443 4444 // Load the slot into the register. 4445 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 4446 MachinePointerInfo(), 4447 false, false, false, 0); 4448 MemOpChains.push_back(Load.getValue(1)); 4449 RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Load)); 4450 4451 // Done with this argument. 4452 ArgOffset += PtrByteSize; 4453 continue; 4454 } 4455 4456 // For aggregates larger than PtrByteSize, copy the pieces of the 4457 // object that fit into registers from the parameter save area. 4458 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4459 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 4460 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4461 if (GPR_idx != NumGPRs) { 4462 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4463 MachinePointerInfo(), 4464 false, false, false, 0); 4465 MemOpChains.push_back(Load.getValue(1)); 4466 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4467 ArgOffset += PtrByteSize; 4468 } else { 4469 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4470 break; 4471 } 4472 } 4473 continue; 4474 } 4475 4476 switch (Arg.getSimpleValueType().SimpleTy) { 4477 default: llvm_unreachable("Unexpected ValueType for argument!"); 4478 case MVT::i1: 4479 case MVT::i32: 4480 case MVT::i64: 4481 // These can be scalar arguments or elements of an integer array type 4482 // passed directly. Clang may use those instead of "byval" aggregate 4483 // types to avoid forcing arguments to memory unnecessarily. 4484 if (GPR_idx != NumGPRs) { 4485 RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Arg)); 4486 } else { 4487 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4488 true, isTailCall, false, MemOpChains, 4489 TailCallArguments, dl); 4490 } 4491 ArgOffset += PtrByteSize; 4492 break; 4493 case MVT::f32: 4494 case MVT::f64: { 4495 // These can be scalar arguments or elements of a float array type 4496 // passed directly. The latter are used to implement ELFv2 homogenous 4497 // float aggregates. 4498 4499 // Named arguments go into FPRs first, and once they overflow, the 4500 // remaining arguments go into GPRs and then the parameter save area. 4501 // Unnamed arguments for vararg functions always go to GPRs and 4502 // then the parameter save area. For now, put all arguments to vararg 4503 // routines always in both locations (FPR *and* GPR or stack slot). 4504 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 4505 4506 // First load the argument into the next available FPR. 4507 if (FPR_idx != NumFPRs) 4508 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4509 4510 // Next, load the argument into GPR or stack slot if needed. 4511 if (!NeedGPROrStack) 4512 ; 4513 else if (GPR_idx != NumGPRs) { 4514 // In the non-vararg case, this can only ever happen in the 4515 // presence of f32 array types, since otherwise we never run 4516 // out of FPRs before running out of GPRs. 4517 SDValue ArgVal; 4518 4519 // Double values are always passed in a single GPR. 4520 if (Arg.getValueType() != MVT::f32) { 4521 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 4522 4523 // Non-array float values are extended and passed in a GPR. 4524 } else if (!Flags.isInConsecutiveRegs()) { 4525 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 4526 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 4527 4528 // If we have an array of floats, we collect every odd element 4529 // together with its predecessor into one GPR. 4530 } else if (ArgOffset % PtrByteSize != 0) { 4531 SDValue Lo, Hi; 4532 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 4533 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 4534 if (!isLittleEndian) 4535 std::swap(Lo, Hi); 4536 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 4537 4538 // The final element, if even, goes into the first half of a GPR. 4539 } else if (Flags.isInConsecutiveRegsLast()) { 4540 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 4541 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 4542 if (!isLittleEndian) 4543 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 4544 DAG.getConstant(32, MVT::i32)); 4545 4546 // Non-final even elements are skipped; they will be handled 4547 // together the with subsequent argument on the next go-around. 4548 } else 4549 ArgVal = SDValue(); 4550 4551 if (ArgVal.getNode()) 4552 RegsToPass.push_back(std::make_pair(GPR[GPR_idx], ArgVal)); 4553 } else { 4554 // Single-precision floating-point values are mapped to the 4555 // second (rightmost) word of the stack doubleword. 4556 if (Arg.getValueType() == MVT::f32 && 4557 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 4558 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4559 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4560 } 4561 4562 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4563 true, isTailCall, false, MemOpChains, 4564 TailCallArguments, dl); 4565 } 4566 // When passing an array of floats, the array occupies consecutive 4567 // space in the argument area; only round up to the next doubleword 4568 // at the end of the array. Otherwise, each float takes 8 bytes. 4569 ArgOffset += (Arg.getValueType() == MVT::f32 && 4570 Flags.isInConsecutiveRegs()) ? 4 : 8; 4571 if (Flags.isInConsecutiveRegsLast()) 4572 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4573 break; 4574 } 4575 case MVT::v4f32: 4576 case MVT::v4i32: 4577 case MVT::v8i16: 4578 case MVT::v16i8: 4579 case MVT::v2f64: 4580 case MVT::v2i64: 4581 // These can be scalar arguments or elements of a vector array type 4582 // passed directly. The latter are used to implement ELFv2 homogenous 4583 // vector aggregates. 4584 4585 // For a varargs call, named arguments go into VRs or on the stack as 4586 // usual; unnamed arguments always go to the stack or the corresponding 4587 // GPRs when within range. For now, we always put the value in both 4588 // locations (or even all three). 4589 if (isVarArg) { 4590 // We could elide this store in the case where the object fits 4591 // entirely in R registers. Maybe later. 4592 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4593 MachinePointerInfo(), false, false, 0); 4594 MemOpChains.push_back(Store); 4595 if (VR_idx != NumVRs) { 4596 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4597 MachinePointerInfo(), 4598 false, false, false, 0); 4599 MemOpChains.push_back(Load.getValue(1)); 4600 4601 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 4602 Arg.getSimpleValueType() == MVT::v2i64) ? 4603 VSRH[VR_idx] : VR[VR_idx]; 4604 ++VR_idx; 4605 4606 RegsToPass.push_back(std::make_pair(VReg, Load)); 4607 } 4608 ArgOffset += 16; 4609 for (unsigned i=0; i<16; i+=PtrByteSize) { 4610 if (GPR_idx == NumGPRs) 4611 break; 4612 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4613 DAG.getConstant(i, PtrVT)); 4614 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4615 false, false, false, 0); 4616 MemOpChains.push_back(Load.getValue(1)); 4617 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4618 } 4619 break; 4620 } 4621 4622 // Non-varargs Altivec params go into VRs or on the stack. 4623 if (VR_idx != NumVRs) { 4624 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 4625 Arg.getSimpleValueType() == MVT::v2i64) ? 4626 VSRH[VR_idx] : VR[VR_idx]; 4627 ++VR_idx; 4628 4629 RegsToPass.push_back(std::make_pair(VReg, Arg)); 4630 } else { 4631 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4632 true, isTailCall, true, MemOpChains, 4633 TailCallArguments, dl); 4634 } 4635 ArgOffset += 16; 4636 break; 4637 } 4638 } 4639 4640 assert(NumBytesActuallyUsed == ArgOffset); 4641 (void)NumBytesActuallyUsed; 4642 4643 if (!MemOpChains.empty()) 4644 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4645 4646 // Check if this is an indirect call (MTCTR/BCTRL). 4647 // See PrepareCall() for more information about calls through function 4648 // pointers in the 64-bit SVR4 ABI. 4649 if (!isTailCall && 4650 !dyn_cast<GlobalAddressSDNode>(Callee) && 4651 !dyn_cast<ExternalSymbolSDNode>(Callee)) { 4652 // Load r2 into a virtual register and store it to the TOC save area. 4653 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 4654 // TOC save area offset. 4655 unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI); 4656 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset); 4657 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4658 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), 4659 false, false, 0); 4660 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 4661 // This does not mean the MTCTR instruction must use R12; it's easier 4662 // to model this as an extra parameter, so do that. 4663 if (isELFv2ABI) 4664 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 4665 } 4666 4667 // Build a sequence of copy-to-reg nodes chained together with token chain 4668 // and flag operands which copy the outgoing args into the appropriate regs. 4669 SDValue InFlag; 4670 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4671 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4672 RegsToPass[i].second, InFlag); 4673 InFlag = Chain.getValue(1); 4674 } 4675 4676 if (isTailCall) 4677 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 4678 FPOp, true, TailCallArguments); 4679 4680 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4681 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4682 Ins, InVals); 4683 } 4684 4685 SDValue 4686 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 4687 CallingConv::ID CallConv, bool isVarArg, 4688 bool isTailCall, 4689 const SmallVectorImpl<ISD::OutputArg> &Outs, 4690 const SmallVectorImpl<SDValue> &OutVals, 4691 const SmallVectorImpl<ISD::InputArg> &Ins, 4692 SDLoc dl, SelectionDAG &DAG, 4693 SmallVectorImpl<SDValue> &InVals) const { 4694 4695 unsigned NumOps = Outs.size(); 4696 4697 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4698 bool isPPC64 = PtrVT == MVT::i64; 4699 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4700 4701 MachineFunction &MF = DAG.getMachineFunction(); 4702 4703 // Mark this function as potentially containing a function that contains a 4704 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4705 // and restoring the callers stack pointer in this functions epilog. This is 4706 // done because by tail calling the called function might overwrite the value 4707 // in this function's (MF) stack pointer stack slot 0(SP). 4708 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4709 CallConv == CallingConv::Fast) 4710 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4711 4712 // Count how many bytes are to be pushed on the stack, including the linkage 4713 // area, and parameter passing area. We start with 24/48 bytes, which is 4714 // prereserved space for [SP][CR][LR][3 x unused]. 4715 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true, 4716 false); 4717 unsigned NumBytes = LinkageSize; 4718 4719 // Add up all the space actually used. 4720 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 4721 // they all go in registers, but we must reserve stack space for them for 4722 // possible use by the caller. In varargs or 64-bit calls, parameters are 4723 // assigned stack space in order, with padding so Altivec parameters are 4724 // 16-byte aligned. 4725 unsigned nAltivecParamsAtEnd = 0; 4726 for (unsigned i = 0; i != NumOps; ++i) { 4727 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4728 EVT ArgVT = Outs[i].VT; 4729 // Varargs Altivec parameters are padded to a 16 byte boundary. 4730 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 4731 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 4732 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 4733 if (!isVarArg && !isPPC64) { 4734 // Non-varargs Altivec parameters go after all the non-Altivec 4735 // parameters; handle those later so we know how much padding we need. 4736 nAltivecParamsAtEnd++; 4737 continue; 4738 } 4739 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 4740 NumBytes = ((NumBytes+15)/16)*16; 4741 } 4742 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 4743 } 4744 4745 // Allow for Altivec parameters at the end, if needed. 4746 if (nAltivecParamsAtEnd) { 4747 NumBytes = ((NumBytes+15)/16)*16; 4748 NumBytes += 16*nAltivecParamsAtEnd; 4749 } 4750 4751 // The prolog code of the callee may store up to 8 GPR argument registers to 4752 // the stack, allowing va_start to index over them in memory if its varargs. 4753 // Because we cannot tell if this is needed on the caller side, we have to 4754 // conservatively assume that it is needed. As such, make sure we have at 4755 // least enough stack space for the caller to store the 8 GPRs. 4756 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 4757 4758 // Tail call needs the stack to be aligned. 4759 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4760 CallConv == CallingConv::Fast) 4761 NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes); 4762 4763 // Calculate by how many bytes the stack has to be adjusted in case of tail 4764 // call optimization. 4765 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4766 4767 // To protect arguments on the stack from being clobbered in a tail call, 4768 // force all the loads to happen before doing any other lowering. 4769 if (isTailCall) 4770 Chain = DAG.getStackArgumentTokenFactor(Chain); 4771 4772 // Adjust the stack pointer for the new arguments... 4773 // These operations are automatically eliminated by the prolog/epilog pass 4774 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 4775 dl); 4776 SDValue CallSeqStart = Chain; 4777 4778 // Load the return address and frame pointer so it can be move somewhere else 4779 // later. 4780 SDValue LROp, FPOp; 4781 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4782 dl); 4783 4784 // Set up a copy of the stack pointer for use loading and storing any 4785 // arguments that may not fit in the registers available for argument 4786 // passing. 4787 SDValue StackPtr; 4788 if (isPPC64) 4789 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4790 else 4791 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4792 4793 // Figure out which arguments are going to go in registers, and which in 4794 // memory. Also, if this is a vararg function, floating point operations 4795 // must be stored to our stack, and loaded into integer regs as well, if 4796 // any integer regs are available for argument passing. 4797 unsigned ArgOffset = LinkageSize; 4798 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4799 4800 static const MCPhysReg GPR_32[] = { // 32-bit registers. 4801 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4802 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4803 }; 4804 static const MCPhysReg GPR_64[] = { // 64-bit registers. 4805 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4806 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4807 }; 4808 static const MCPhysReg *FPR = GetFPR(); 4809 4810 static const MCPhysReg VR[] = { 4811 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4812 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4813 }; 4814 const unsigned NumGPRs = array_lengthof(GPR_32); 4815 const unsigned NumFPRs = 13; 4816 const unsigned NumVRs = array_lengthof(VR); 4817 4818 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 4819 4820 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4821 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4822 4823 SmallVector<SDValue, 8> MemOpChains; 4824 for (unsigned i = 0; i != NumOps; ++i) { 4825 SDValue Arg = OutVals[i]; 4826 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4827 4828 // PtrOff will be used to store the current argument to the stack if a 4829 // register cannot be found for it. 4830 SDValue PtrOff; 4831 4832 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 4833 4834 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4835 4836 // On PPC64, promote integers to 64-bit values. 4837 if (isPPC64 && Arg.getValueType() == MVT::i32) { 4838 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4839 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4840 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4841 } 4842 4843 // FIXME memcpy is used way more than necessary. Correctness first. 4844 // Note: "by value" is code for passing a structure by value, not 4845 // basic types. 4846 if (Flags.isByVal()) { 4847 unsigned Size = Flags.getByValSize(); 4848 // Very small objects are passed right-justified. Everything else is 4849 // passed left-justified. 4850 if (Size==1 || Size==2) { 4851 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 4852 if (GPR_idx != NumGPRs) { 4853 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4854 MachinePointerInfo(), VT, 4855 false, false, false, 0); 4856 MemOpChains.push_back(Load.getValue(1)); 4857 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4858 4859 ArgOffset += PtrByteSize; 4860 } else { 4861 SDValue Const = DAG.getConstant(PtrByteSize - Size, 4862 PtrOff.getValueType()); 4863 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4864 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4865 CallSeqStart, 4866 Flags, DAG, dl); 4867 ArgOffset += PtrByteSize; 4868 } 4869 continue; 4870 } 4871 // Copy entire object into memory. There are cases where gcc-generated 4872 // code assumes it is there, even if it could be put entirely into 4873 // registers. (This is not what the doc says.) 4874 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4875 CallSeqStart, 4876 Flags, DAG, dl); 4877 4878 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 4879 // copy the pieces of the object that fit into registers from the 4880 // parameter save area. 4881 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4882 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 4883 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4884 if (GPR_idx != NumGPRs) { 4885 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4886 MachinePointerInfo(), 4887 false, false, false, 0); 4888 MemOpChains.push_back(Load.getValue(1)); 4889 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4890 ArgOffset += PtrByteSize; 4891 } else { 4892 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4893 break; 4894 } 4895 } 4896 continue; 4897 } 4898 4899 switch (Arg.getSimpleValueType().SimpleTy) { 4900 default: llvm_unreachable("Unexpected ValueType for argument!"); 4901 case MVT::i1: 4902 case MVT::i32: 4903 case MVT::i64: 4904 if (GPR_idx != NumGPRs) { 4905 if (Arg.getValueType() == MVT::i1) 4906 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 4907 4908 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4909 } else { 4910 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4911 isPPC64, isTailCall, false, MemOpChains, 4912 TailCallArguments, dl); 4913 } 4914 ArgOffset += PtrByteSize; 4915 break; 4916 case MVT::f32: 4917 case MVT::f64: 4918 if (FPR_idx != NumFPRs) { 4919 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4920 4921 if (isVarArg) { 4922 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4923 MachinePointerInfo(), false, false, 0); 4924 MemOpChains.push_back(Store); 4925 4926 // Float varargs are always shadowed in available integer registers 4927 if (GPR_idx != NumGPRs) { 4928 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4929 MachinePointerInfo(), false, false, 4930 false, 0); 4931 MemOpChains.push_back(Load.getValue(1)); 4932 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4933 } 4934 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 4935 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4936 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4937 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4938 MachinePointerInfo(), 4939 false, false, false, 0); 4940 MemOpChains.push_back(Load.getValue(1)); 4941 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4942 } 4943 } else { 4944 // If we have any FPRs remaining, we may also have GPRs remaining. 4945 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 4946 // GPRs. 4947 if (GPR_idx != NumGPRs) 4948 ++GPR_idx; 4949 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 4950 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 4951 ++GPR_idx; 4952 } 4953 } else 4954 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4955 isPPC64, isTailCall, false, MemOpChains, 4956 TailCallArguments, dl); 4957 if (isPPC64) 4958 ArgOffset += 8; 4959 else 4960 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 4961 break; 4962 case MVT::v4f32: 4963 case MVT::v4i32: 4964 case MVT::v8i16: 4965 case MVT::v16i8: 4966 if (isVarArg) { 4967 // These go aligned on the stack, or in the corresponding R registers 4968 // when within range. The Darwin PPC ABI doc claims they also go in 4969 // V registers; in fact gcc does this only for arguments that are 4970 // prototyped, not for those that match the ... We do it for all 4971 // arguments, seems to work. 4972 while (ArgOffset % 16 !=0) { 4973 ArgOffset += PtrByteSize; 4974 if (GPR_idx != NumGPRs) 4975 GPR_idx++; 4976 } 4977 // We could elide this store in the case where the object fits 4978 // entirely in R registers. Maybe later. 4979 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4980 DAG.getConstant(ArgOffset, PtrVT)); 4981 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4982 MachinePointerInfo(), false, false, 0); 4983 MemOpChains.push_back(Store); 4984 if (VR_idx != NumVRs) { 4985 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4986 MachinePointerInfo(), 4987 false, false, false, 0); 4988 MemOpChains.push_back(Load.getValue(1)); 4989 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4990 } 4991 ArgOffset += 16; 4992 for (unsigned i=0; i<16; i+=PtrByteSize) { 4993 if (GPR_idx == NumGPRs) 4994 break; 4995 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4996 DAG.getConstant(i, PtrVT)); 4997 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4998 false, false, false, 0); 4999 MemOpChains.push_back(Load.getValue(1)); 5000 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5001 } 5002 break; 5003 } 5004 5005 // Non-varargs Altivec params generally go in registers, but have 5006 // stack space allocated at the end. 5007 if (VR_idx != NumVRs) { 5008 // Doesn't have GPR space allocated. 5009 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5010 } else if (nAltivecParamsAtEnd==0) { 5011 // We are emitting Altivec params in order. 5012 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5013 isPPC64, isTailCall, true, MemOpChains, 5014 TailCallArguments, dl); 5015 ArgOffset += 16; 5016 } 5017 break; 5018 } 5019 } 5020 // If all Altivec parameters fit in registers, as they usually do, 5021 // they get stack space following the non-Altivec parameters. We 5022 // don't track this here because nobody below needs it. 5023 // If there are more Altivec parameters than fit in registers emit 5024 // the stores here. 5025 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5026 unsigned j = 0; 5027 // Offset is aligned; skip 1st 12 params which go in V registers. 5028 ArgOffset = ((ArgOffset+15)/16)*16; 5029 ArgOffset += 12*16; 5030 for (unsigned i = 0; i != NumOps; ++i) { 5031 SDValue Arg = OutVals[i]; 5032 EVT ArgType = Outs[i].VT; 5033 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5034 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5035 if (++j > NumVRs) { 5036 SDValue PtrOff; 5037 // We are emitting Altivec params in order. 5038 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5039 isPPC64, isTailCall, true, MemOpChains, 5040 TailCallArguments, dl); 5041 ArgOffset += 16; 5042 } 5043 } 5044 } 5045 } 5046 5047 if (!MemOpChains.empty()) 5048 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5049 5050 // On Darwin, R12 must contain the address of an indirect callee. This does 5051 // not mean the MTCTR instruction must use R12; it's easier to model this as 5052 // an extra parameter, so do that. 5053 if (!isTailCall && 5054 !dyn_cast<GlobalAddressSDNode>(Callee) && 5055 !dyn_cast<ExternalSymbolSDNode>(Callee) && 5056 !isBLACompatibleAddress(Callee, DAG)) 5057 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5058 PPC::R12), Callee)); 5059 5060 // Build a sequence of copy-to-reg nodes chained together with token chain 5061 // and flag operands which copy the outgoing args into the appropriate regs. 5062 SDValue InFlag; 5063 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5064 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5065 RegsToPass[i].second, InFlag); 5066 InFlag = Chain.getValue(1); 5067 } 5068 5069 if (isTailCall) 5070 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 5071 FPOp, true, TailCallArguments); 5072 5073 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 5074 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 5075 Ins, InVals); 5076 } 5077 5078 bool 5079 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5080 MachineFunction &MF, bool isVarArg, 5081 const SmallVectorImpl<ISD::OutputArg> &Outs, 5082 LLVMContext &Context) const { 5083 SmallVector<CCValAssign, 16> RVLocs; 5084 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 5085 return CCInfo.CheckReturn(Outs, RetCC_PPC); 5086 } 5087 5088 SDValue 5089 PPCTargetLowering::LowerReturn(SDValue Chain, 5090 CallingConv::ID CallConv, bool isVarArg, 5091 const SmallVectorImpl<ISD::OutputArg> &Outs, 5092 const SmallVectorImpl<SDValue> &OutVals, 5093 SDLoc dl, SelectionDAG &DAG) const { 5094 5095 SmallVector<CCValAssign, 16> RVLocs; 5096 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5097 *DAG.getContext()); 5098 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 5099 5100 SDValue Flag; 5101 SmallVector<SDValue, 4> RetOps(1, Chain); 5102 5103 // Copy the result values into the output registers. 5104 for (unsigned i = 0; i != RVLocs.size(); ++i) { 5105 CCValAssign &VA = RVLocs[i]; 5106 assert(VA.isRegLoc() && "Can only return in registers!"); 5107 5108 SDValue Arg = OutVals[i]; 5109 5110 switch (VA.getLocInfo()) { 5111 default: llvm_unreachable("Unknown loc info!"); 5112 case CCValAssign::Full: break; 5113 case CCValAssign::AExt: 5114 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 5115 break; 5116 case CCValAssign::ZExt: 5117 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 5118 break; 5119 case CCValAssign::SExt: 5120 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 5121 break; 5122 } 5123 5124 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 5125 Flag = Chain.getValue(1); 5126 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 5127 } 5128 5129 RetOps[0] = Chain; // Update chain. 5130 5131 // Add the flag if we have it. 5132 if (Flag.getNode()) 5133 RetOps.push_back(Flag); 5134 5135 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 5136 } 5137 5138 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 5139 const PPCSubtarget &Subtarget) const { 5140 // When we pop the dynamic allocation we need to restore the SP link. 5141 SDLoc dl(Op); 5142 5143 // Get the corect type for pointers. 5144 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5145 5146 // Construct the stack pointer operand. 5147 bool isPPC64 = Subtarget.isPPC64(); 5148 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 5149 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 5150 5151 // Get the operands for the STACKRESTORE. 5152 SDValue Chain = Op.getOperand(0); 5153 SDValue SaveSP = Op.getOperand(1); 5154 5155 // Load the old link SP. 5156 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 5157 MachinePointerInfo(), 5158 false, false, false, 0); 5159 5160 // Restore the stack pointer. 5161 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 5162 5163 // Store the old link SP. 5164 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 5165 false, false, 0); 5166 } 5167 5168 5169 5170 SDValue 5171 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 5172 MachineFunction &MF = DAG.getMachineFunction(); 5173 bool isPPC64 = Subtarget.isPPC64(); 5174 bool isDarwinABI = Subtarget.isDarwinABI(); 5175 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5176 5177 // Get current frame pointer save index. The users of this index will be 5178 // primarily DYNALLOC instructions. 5179 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5180 int RASI = FI->getReturnAddrSaveIndex(); 5181 5182 // If the frame pointer save index hasn't been defined yet. 5183 if (!RASI) { 5184 // Find out what the fix offset of the frame pointer save area. 5185 int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); 5186 // Allocate the frame index for frame pointer save area. 5187 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 5188 // Save the result. 5189 FI->setReturnAddrSaveIndex(RASI); 5190 } 5191 return DAG.getFrameIndex(RASI, PtrVT); 5192 } 5193 5194 SDValue 5195 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 5196 MachineFunction &MF = DAG.getMachineFunction(); 5197 bool isPPC64 = Subtarget.isPPC64(); 5198 bool isDarwinABI = Subtarget.isDarwinABI(); 5199 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5200 5201 // Get current frame pointer save index. The users of this index will be 5202 // primarily DYNALLOC instructions. 5203 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5204 int FPSI = FI->getFramePointerSaveIndex(); 5205 5206 // If the frame pointer save index hasn't been defined yet. 5207 if (!FPSI) { 5208 // Find out what the fix offset of the frame pointer save area. 5209 int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, 5210 isDarwinABI); 5211 5212 // Allocate the frame index for frame pointer save area. 5213 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 5214 // Save the result. 5215 FI->setFramePointerSaveIndex(FPSI); 5216 } 5217 return DAG.getFrameIndex(FPSI, PtrVT); 5218 } 5219 5220 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 5221 SelectionDAG &DAG, 5222 const PPCSubtarget &Subtarget) const { 5223 // Get the inputs. 5224 SDValue Chain = Op.getOperand(0); 5225 SDValue Size = Op.getOperand(1); 5226 SDLoc dl(Op); 5227 5228 // Get the corect type for pointers. 5229 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5230 // Negate the size. 5231 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 5232 DAG.getConstant(0, PtrVT), Size); 5233 // Construct a node for the frame pointer save index. 5234 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 5235 // Build a DYNALLOC node. 5236 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 5237 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 5238 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 5239 } 5240 5241 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 5242 SelectionDAG &DAG) const { 5243 SDLoc DL(Op); 5244 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 5245 DAG.getVTList(MVT::i32, MVT::Other), 5246 Op.getOperand(0), Op.getOperand(1)); 5247 } 5248 5249 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 5250 SelectionDAG &DAG) const { 5251 SDLoc DL(Op); 5252 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 5253 Op.getOperand(0), Op.getOperand(1)); 5254 } 5255 5256 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 5257 assert(Op.getValueType() == MVT::i1 && 5258 "Custom lowering only for i1 loads"); 5259 5260 // First, load 8 bits into 32 bits, then truncate to 1 bit. 5261 5262 SDLoc dl(Op); 5263 LoadSDNode *LD = cast<LoadSDNode>(Op); 5264 5265 SDValue Chain = LD->getChain(); 5266 SDValue BasePtr = LD->getBasePtr(); 5267 MachineMemOperand *MMO = LD->getMemOperand(); 5268 5269 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(), Chain, 5270 BasePtr, MVT::i8, MMO); 5271 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 5272 5273 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 5274 return DAG.getMergeValues(Ops, dl); 5275 } 5276 5277 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 5278 assert(Op.getOperand(1).getValueType() == MVT::i1 && 5279 "Custom lowering only for i1 stores"); 5280 5281 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 5282 5283 SDLoc dl(Op); 5284 StoreSDNode *ST = cast<StoreSDNode>(Op); 5285 5286 SDValue Chain = ST->getChain(); 5287 SDValue BasePtr = ST->getBasePtr(); 5288 SDValue Value = ST->getValue(); 5289 MachineMemOperand *MMO = ST->getMemOperand(); 5290 5291 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(), Value); 5292 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 5293 } 5294 5295 // FIXME: Remove this once the ANDI glue bug is fixed: 5296 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 5297 assert(Op.getValueType() == MVT::i1 && 5298 "Custom lowering only for i1 results"); 5299 5300 SDLoc DL(Op); 5301 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 5302 Op.getOperand(0)); 5303 } 5304 5305 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 5306 /// possible. 5307 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 5308 // Not FP? Not a fsel. 5309 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 5310 !Op.getOperand(2).getValueType().isFloatingPoint()) 5311 return Op; 5312 5313 // We might be able to do better than this under some circumstances, but in 5314 // general, fsel-based lowering of select is a finite-math-only optimization. 5315 // For more information, see section F.3 of the 2.06 ISA specification. 5316 if (!DAG.getTarget().Options.NoInfsFPMath || 5317 !DAG.getTarget().Options.NoNaNsFPMath) 5318 return Op; 5319 5320 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5321 5322 EVT ResVT = Op.getValueType(); 5323 EVT CmpVT = Op.getOperand(0).getValueType(); 5324 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5325 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 5326 SDLoc dl(Op); 5327 5328 // If the RHS of the comparison is a 0.0, we don't need to do the 5329 // subtraction at all. 5330 SDValue Sel1; 5331 if (isFloatingPointZero(RHS)) 5332 switch (CC) { 5333 default: break; // SETUO etc aren't handled by fsel. 5334 case ISD::SETNE: 5335 std::swap(TV, FV); 5336 case ISD::SETEQ: 5337 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5338 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5339 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 5340 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 5341 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 5342 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 5343 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 5344 case ISD::SETULT: 5345 case ISD::SETLT: 5346 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 5347 case ISD::SETOGE: 5348 case ISD::SETGE: 5349 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5350 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5351 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 5352 case ISD::SETUGT: 5353 case ISD::SETGT: 5354 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 5355 case ISD::SETOLE: 5356 case ISD::SETLE: 5357 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5358 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5359 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 5360 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 5361 } 5362 5363 SDValue Cmp; 5364 switch (CC) { 5365 default: break; // SETUO etc aren't handled by fsel. 5366 case ISD::SETNE: 5367 std::swap(TV, FV); 5368 case ISD::SETEQ: 5369 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 5370 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5371 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5372 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 5373 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 5374 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 5375 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 5376 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 5377 case ISD::SETULT: 5378 case ISD::SETLT: 5379 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 5380 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5381 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5382 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 5383 case ISD::SETOGE: 5384 case ISD::SETGE: 5385 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 5386 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5387 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5388 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 5389 case ISD::SETUGT: 5390 case ISD::SETGT: 5391 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 5392 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5393 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5394 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 5395 case ISD::SETOLE: 5396 case ISD::SETLE: 5397 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 5398 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5399 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5400 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 5401 } 5402 return Op; 5403 } 5404 5405 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 5406 SelectionDAG &DAG, 5407 SDLoc dl) const { 5408 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 5409 SDValue Src = Op.getOperand(0); 5410 if (Src.getValueType() == MVT::f32) 5411 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 5412 5413 SDValue Tmp; 5414 switch (Op.getSimpleValueType().SimpleTy) { 5415 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 5416 case MVT::i32: 5417 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 5418 (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : 5419 PPCISD::FCTIDZ), 5420 dl, MVT::f64, Src); 5421 break; 5422 case MVT::i64: 5423 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 5424 "i64 FP_TO_UINT is supported only with FPCVT"); 5425 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 5426 PPCISD::FCTIDUZ, 5427 dl, MVT::f64, Src); 5428 break; 5429 } 5430 5431 // Convert the FP value to an int value through memory. 5432 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 5433 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 5434 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 5435 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 5436 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI); 5437 5438 // Emit a store to the stack slot. 5439 SDValue Chain; 5440 if (i32Stack) { 5441 MachineFunction &MF = DAG.getMachineFunction(); 5442 MachineMemOperand *MMO = 5443 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 5444 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 5445 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 5446 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 5447 } else 5448 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 5449 MPI, false, false, 0); 5450 5451 // Result is a load from the stack slot. If loading 4 bytes, make sure to 5452 // add in a bias. 5453 if (Op.getValueType() == MVT::i32 && !i32Stack) { 5454 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 5455 DAG.getConstant(4, FIPtr.getValueType())); 5456 MPI = MPI.getWithOffset(4); 5457 } 5458 5459 RLI.Chain = Chain; 5460 RLI.Ptr = FIPtr; 5461 RLI.MPI = MPI; 5462 } 5463 5464 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 5465 SDLoc dl) const { 5466 ReuseLoadInfo RLI; 5467 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 5468 5469 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 5470 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 5471 RLI.Ranges); 5472 } 5473 5474 // We're trying to insert a regular store, S, and then a load, L. If the 5475 // incoming value, O, is a load, we might just be able to have our load use the 5476 // address used by O. However, we don't know if anything else will store to 5477 // that address before we can load from it. To prevent this situation, we need 5478 // to insert our load, L, into the chain as a peer of O. To do this, we give L 5479 // the same chain operand as O, we create a token factor from the chain results 5480 // of O and L, and we replace all uses of O's chain result with that token 5481 // factor (see spliceIntoChain below for this last part). 5482 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 5483 ReuseLoadInfo &RLI, 5484 SelectionDAG &DAG) const { 5485 SDLoc dl(Op); 5486 if ((Op.getOpcode() == ISD::FP_TO_UINT || 5487 Op.getOpcode() == ISD::FP_TO_SINT) && 5488 isOperationLegalOrCustom(Op.getOpcode(), 5489 Op.getOperand(0).getValueType())) { 5490 5491 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 5492 return true; 5493 } 5494 5495 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 5496 if (!LD || !ISD::isNON_EXTLoad(LD) || LD->isVolatile() || LD->isNonTemporal()) 5497 return false; 5498 if (LD->getMemoryVT() != MemVT) 5499 return false; 5500 5501 RLI.Ptr = LD->getBasePtr(); 5502 if (LD->isIndexed() && LD->getOffset().getOpcode() != ISD::UNDEF) { 5503 assert(LD->getAddressingMode() == ISD::PRE_INC && 5504 "Non-pre-inc AM on PPC?"); 5505 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 5506 LD->getOffset()); 5507 } 5508 5509 RLI.Chain = LD->getChain(); 5510 RLI.MPI = LD->getPointerInfo(); 5511 RLI.IsInvariant = LD->isInvariant(); 5512 RLI.Alignment = LD->getAlignment(); 5513 RLI.AAInfo = LD->getAAInfo(); 5514 RLI.Ranges = LD->getRanges(); 5515 5516 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 5517 return true; 5518 } 5519 5520 // Given the head of the old chain, ResChain, insert a token factor containing 5521 // it and NewResChain, and make users of ResChain now be users of that token 5522 // factor. 5523 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 5524 SDValue NewResChain, 5525 SelectionDAG &DAG) const { 5526 if (!ResChain) 5527 return; 5528 5529 SDLoc dl(NewResChain); 5530 5531 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 5532 NewResChain, DAG.getUNDEF(MVT::Other)); 5533 assert(TF.getNode() != NewResChain.getNode() && 5534 "A new TF really is required here"); 5535 5536 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 5537 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 5538 } 5539 5540 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 5541 SelectionDAG &DAG) const { 5542 SDLoc dl(Op); 5543 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 5544 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 5545 return SDValue(); 5546 5547 if (Op.getOperand(0).getValueType() == MVT::i1) 5548 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 5549 DAG.getConstantFP(1.0, Op.getValueType()), 5550 DAG.getConstantFP(0.0, Op.getValueType())); 5551 5552 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 5553 "UINT_TO_FP is supported only with FPCVT"); 5554 5555 // If we have FCFIDS, then use it when converting to single-precision. 5556 // Otherwise, convert to double-precision and then round. 5557 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 5558 (Op.getOpcode() == ISD::UINT_TO_FP ? 5559 PPCISD::FCFIDUS : PPCISD::FCFIDS) : 5560 (Op.getOpcode() == ISD::UINT_TO_FP ? 5561 PPCISD::FCFIDU : PPCISD::FCFID); 5562 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 5563 MVT::f32 : MVT::f64; 5564 5565 if (Op.getOperand(0).getValueType() == MVT::i64) { 5566 SDValue SINT = Op.getOperand(0); 5567 // When converting to single-precision, we actually need to convert 5568 // to double-precision first and then round to single-precision. 5569 // To avoid double-rounding effects during that operation, we have 5570 // to prepare the input operand. Bits that might be truncated when 5571 // converting to double-precision are replaced by a bit that won't 5572 // be lost at this stage, but is below the single-precision rounding 5573 // position. 5574 // 5575 // However, if -enable-unsafe-fp-math is in effect, accept double 5576 // rounding to avoid the extra overhead. 5577 if (Op.getValueType() == MVT::f32 && 5578 !Subtarget.hasFPCVT() && 5579 !DAG.getTarget().Options.UnsafeFPMath) { 5580 5581 // Twiddle input to make sure the low 11 bits are zero. (If this 5582 // is the case, we are guaranteed the value will fit into the 53 bit 5583 // mantissa of an IEEE double-precision value without rounding.) 5584 // If any of those low 11 bits were not zero originally, make sure 5585 // bit 12 (value 2048) is set instead, so that the final rounding 5586 // to single-precision gets the correct result. 5587 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 5588 SINT, DAG.getConstant(2047, MVT::i64)); 5589 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 5590 Round, DAG.getConstant(2047, MVT::i64)); 5591 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 5592 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 5593 Round, DAG.getConstant(-2048, MVT::i64)); 5594 5595 // However, we cannot use that value unconditionally: if the magnitude 5596 // of the input value is small, the bit-twiddling we did above might 5597 // end up visibly changing the output. Fortunately, in that case, we 5598 // don't need to twiddle bits since the original input will convert 5599 // exactly to double-precision floating-point already. Therefore, 5600 // construct a conditional to use the original value if the top 11 5601 // bits are all sign-bit copies, and use the rounded value computed 5602 // above otherwise. 5603 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 5604 SINT, DAG.getConstant(53, MVT::i32)); 5605 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 5606 Cond, DAG.getConstant(1, MVT::i64)); 5607 Cond = DAG.getSetCC(dl, MVT::i32, 5608 Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT); 5609 5610 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 5611 } 5612 5613 ReuseLoadInfo RLI; 5614 SDValue Bits; 5615 5616 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 5617 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 5618 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 5619 RLI.Ranges); 5620 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 5621 } else 5622 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 5623 5624 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 5625 5626 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 5627 FP = DAG.getNode(ISD::FP_ROUND, dl, 5628 MVT::f32, FP, DAG.getIntPtrConstant(0)); 5629 return FP; 5630 } 5631 5632 assert(Op.getOperand(0).getValueType() == MVT::i32 && 5633 "Unhandled INT_TO_FP type in custom expander!"); 5634 // Since we only generate this in 64-bit mode, we can take advantage of 5635 // 64-bit registers. In particular, sign extend the input value into the 5636 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 5637 // then lfd it and fcfid it. 5638 MachineFunction &MF = DAG.getMachineFunction(); 5639 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 5640 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5641 5642 SDValue Ld; 5643 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 5644 ReuseLoadInfo RLI; 5645 bool ReusingLoad; 5646 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 5647 DAG))) { 5648 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 5649 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 5650 5651 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 5652 MachinePointerInfo::getFixedStack(FrameIdx), 5653 false, false, 0); 5654 5655 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 5656 "Expected an i32 store"); 5657 5658 RLI.Ptr = FIdx; 5659 RLI.Chain = Store; 5660 RLI.MPI = MachinePointerInfo::getFixedStack(FrameIdx); 5661 RLI.Alignment = 4; 5662 } 5663 5664 MachineMemOperand *MMO = 5665 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 5666 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 5667 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 5668 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 5669 PPCISD::LFIWZX : PPCISD::LFIWAX, 5670 dl, DAG.getVTList(MVT::f64, MVT::Other), 5671 Ops, MVT::i32, MMO); 5672 if (ReusingLoad) 5673 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 5674 } else { 5675 assert(Subtarget.isPPC64() && 5676 "i32->FP without LFIWAX supported only on PPC64"); 5677 5678 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 5679 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 5680 5681 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 5682 Op.getOperand(0)); 5683 5684 // STD the extended value into the stack slot. 5685 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx, 5686 MachinePointerInfo::getFixedStack(FrameIdx), 5687 false, false, 0); 5688 5689 // Load the value as a double. 5690 Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, 5691 MachinePointerInfo::getFixedStack(FrameIdx), 5692 false, false, false, 0); 5693 } 5694 5695 // FCFID it and return it. 5696 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 5697 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 5698 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 5699 return FP; 5700 } 5701 5702 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 5703 SelectionDAG &DAG) const { 5704 SDLoc dl(Op); 5705 /* 5706 The rounding mode is in bits 30:31 of FPSR, and has the following 5707 settings: 5708 00 Round to nearest 5709 01 Round to 0 5710 10 Round to +inf 5711 11 Round to -inf 5712 5713 FLT_ROUNDS, on the other hand, expects the following: 5714 -1 Undefined 5715 0 Round to 0 5716 1 Round to nearest 5717 2 Round to +inf 5718 3 Round to -inf 5719 5720 To perform the conversion, we do: 5721 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 5722 */ 5723 5724 MachineFunction &MF = DAG.getMachineFunction(); 5725 EVT VT = Op.getValueType(); 5726 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5727 5728 // Save FP Control Word to register 5729 EVT NodeTys[] = { 5730 MVT::f64, // return register 5731 MVT::Glue // unused in this context 5732 }; 5733 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 5734 5735 // Save FP register to stack slot 5736 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 5737 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 5738 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 5739 StackSlot, MachinePointerInfo(), false, false,0); 5740 5741 // Load FP Control Word from low 32 bits of stack slot. 5742 SDValue Four = DAG.getConstant(4, PtrVT); 5743 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 5744 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 5745 false, false, false, 0); 5746 5747 // Transform as necessary 5748 SDValue CWD1 = 5749 DAG.getNode(ISD::AND, dl, MVT::i32, 5750 CWD, DAG.getConstant(3, MVT::i32)); 5751 SDValue CWD2 = 5752 DAG.getNode(ISD::SRL, dl, MVT::i32, 5753 DAG.getNode(ISD::AND, dl, MVT::i32, 5754 DAG.getNode(ISD::XOR, dl, MVT::i32, 5755 CWD, DAG.getConstant(3, MVT::i32)), 5756 DAG.getConstant(3, MVT::i32)), 5757 DAG.getConstant(1, MVT::i32)); 5758 5759 SDValue RetVal = 5760 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 5761 5762 return DAG.getNode((VT.getSizeInBits() < 16 ? 5763 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 5764 } 5765 5766 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 5767 EVT VT = Op.getValueType(); 5768 unsigned BitWidth = VT.getSizeInBits(); 5769 SDLoc dl(Op); 5770 assert(Op.getNumOperands() == 3 && 5771 VT == Op.getOperand(1).getValueType() && 5772 "Unexpected SHL!"); 5773 5774 // Expand into a bunch of logical ops. Note that these ops 5775 // depend on the PPC behavior for oversized shift amounts. 5776 SDValue Lo = Op.getOperand(0); 5777 SDValue Hi = Op.getOperand(1); 5778 SDValue Amt = Op.getOperand(2); 5779 EVT AmtVT = Amt.getValueType(); 5780 5781 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5782 DAG.getConstant(BitWidth, AmtVT), Amt); 5783 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 5784 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 5785 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 5786 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5787 DAG.getConstant(-BitWidth, AmtVT)); 5788 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 5789 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5790 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 5791 SDValue OutOps[] = { OutLo, OutHi }; 5792 return DAG.getMergeValues(OutOps, dl); 5793 } 5794 5795 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 5796 EVT VT = Op.getValueType(); 5797 SDLoc dl(Op); 5798 unsigned BitWidth = VT.getSizeInBits(); 5799 assert(Op.getNumOperands() == 3 && 5800 VT == Op.getOperand(1).getValueType() && 5801 "Unexpected SRL!"); 5802 5803 // Expand into a bunch of logical ops. Note that these ops 5804 // depend on the PPC behavior for oversized shift amounts. 5805 SDValue Lo = Op.getOperand(0); 5806 SDValue Hi = Op.getOperand(1); 5807 SDValue Amt = Op.getOperand(2); 5808 EVT AmtVT = Amt.getValueType(); 5809 5810 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5811 DAG.getConstant(BitWidth, AmtVT), Amt); 5812 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5813 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5814 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5815 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5816 DAG.getConstant(-BitWidth, AmtVT)); 5817 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 5818 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5819 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 5820 SDValue OutOps[] = { OutLo, OutHi }; 5821 return DAG.getMergeValues(OutOps, dl); 5822 } 5823 5824 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 5825 SDLoc dl(Op); 5826 EVT VT = Op.getValueType(); 5827 unsigned BitWidth = VT.getSizeInBits(); 5828 assert(Op.getNumOperands() == 3 && 5829 VT == Op.getOperand(1).getValueType() && 5830 "Unexpected SRA!"); 5831 5832 // Expand into a bunch of logical ops, followed by a select_cc. 5833 SDValue Lo = Op.getOperand(0); 5834 SDValue Hi = Op.getOperand(1); 5835 SDValue Amt = Op.getOperand(2); 5836 EVT AmtVT = Amt.getValueType(); 5837 5838 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5839 DAG.getConstant(BitWidth, AmtVT), Amt); 5840 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5841 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5842 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5843 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5844 DAG.getConstant(-BitWidth, AmtVT)); 5845 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 5846 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 5847 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 5848 Tmp4, Tmp6, ISD::SETLE); 5849 SDValue OutOps[] = { OutLo, OutHi }; 5850 return DAG.getMergeValues(OutOps, dl); 5851 } 5852 5853 //===----------------------------------------------------------------------===// 5854 // Vector related lowering. 5855 // 5856 5857 /// BuildSplatI - Build a canonical splati of Val with an element size of 5858 /// SplatSize. Cast the result to VT. 5859 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 5860 SelectionDAG &DAG, SDLoc dl) { 5861 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 5862 5863 static const EVT VTys[] = { // canonical VT to use for each size. 5864 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 5865 }; 5866 5867 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 5868 5869 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 5870 if (Val == -1) 5871 SplatSize = 1; 5872 5873 EVT CanonicalVT = VTys[SplatSize-1]; 5874 5875 // Build a canonical splat for this value. 5876 SDValue Elt = DAG.getConstant(Val, MVT::i32); 5877 SmallVector<SDValue, 8> Ops; 5878 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 5879 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops); 5880 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 5881 } 5882 5883 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 5884 /// specified intrinsic ID. 5885 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 5886 SelectionDAG &DAG, SDLoc dl, 5887 EVT DestVT = MVT::Other) { 5888 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 5889 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5890 DAG.getConstant(IID, MVT::i32), Op); 5891 } 5892 5893 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 5894 /// specified intrinsic ID. 5895 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 5896 SelectionDAG &DAG, SDLoc dl, 5897 EVT DestVT = MVT::Other) { 5898 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 5899 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5900 DAG.getConstant(IID, MVT::i32), LHS, RHS); 5901 } 5902 5903 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 5904 /// specified intrinsic ID. 5905 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 5906 SDValue Op2, SelectionDAG &DAG, 5907 SDLoc dl, EVT DestVT = MVT::Other) { 5908 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 5909 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5910 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 5911 } 5912 5913 5914 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 5915 /// amount. The result has the specified value type. 5916 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 5917 EVT VT, SelectionDAG &DAG, SDLoc dl) { 5918 // Force LHS/RHS to be the right type. 5919 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 5920 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 5921 5922 int Ops[16]; 5923 for (unsigned i = 0; i != 16; ++i) 5924 Ops[i] = i + Amt; 5925 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 5926 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5927 } 5928 5929 // If this is a case we can't handle, return null and let the default 5930 // expansion code take care of it. If we CAN select this case, and if it 5931 // selects to a single instruction, return Op. Otherwise, if we can codegen 5932 // this case more efficiently than a constant pool load, lower it to the 5933 // sequence of ops that should be used. 5934 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 5935 SelectionDAG &DAG) const { 5936 SDLoc dl(Op); 5937 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 5938 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 5939 5940 // Check if this is a splat of a constant value. 5941 APInt APSplatBits, APSplatUndef; 5942 unsigned SplatBitSize; 5943 bool HasAnyUndefs; 5944 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 5945 HasAnyUndefs, 0, true) || SplatBitSize > 32) 5946 return SDValue(); 5947 5948 unsigned SplatBits = APSplatBits.getZExtValue(); 5949 unsigned SplatUndef = APSplatUndef.getZExtValue(); 5950 unsigned SplatSize = SplatBitSize / 8; 5951 5952 // First, handle single instruction cases. 5953 5954 // All zeros? 5955 if (SplatBits == 0) { 5956 // Canonicalize all zero vectors to be v4i32. 5957 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 5958 SDValue Z = DAG.getConstant(0, MVT::i32); 5959 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 5960 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 5961 } 5962 return Op; 5963 } 5964 5965 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 5966 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 5967 (32-SplatBitSize)); 5968 if (SextVal >= -16 && SextVal <= 15) 5969 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 5970 5971 5972 // Two instruction sequences. 5973 5974 // If this value is in the range [-32,30] and is even, use: 5975 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 5976 // If this value is in the range [17,31] and is odd, use: 5977 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 5978 // If this value is in the range [-31,-17] and is odd, use: 5979 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 5980 // Note the last two are three-instruction sequences. 5981 if (SextVal >= -32 && SextVal <= 31) { 5982 // To avoid having these optimizations undone by constant folding, 5983 // we convert to a pseudo that will be expanded later into one of 5984 // the above forms. 5985 SDValue Elt = DAG.getConstant(SextVal, MVT::i32); 5986 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 5987 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 5988 SDValue EltSize = DAG.getConstant(SplatSize, MVT::i32); 5989 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 5990 if (VT == Op.getValueType()) 5991 return RetVal; 5992 else 5993 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 5994 } 5995 5996 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 5997 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 5998 // for fneg/fabs. 5999 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 6000 // Make -1 and vspltisw -1: 6001 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 6002 6003 // Make the VSLW intrinsic, computing 0x8000_0000. 6004 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 6005 OnesV, DAG, dl); 6006 6007 // xor by OnesV to invert it. 6008 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 6009 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6010 } 6011 6012 // The remaining cases assume either big endian element order or 6013 // a splat-size that equates to the element size of the vector 6014 // to be built. An example that doesn't work for little endian is 6015 // {0, -1, 0, -1, 0, -1, 0, -1} which has a splat size of 32 bits 6016 // and a vector element size of 16 bits. The code below will 6017 // produce the vector in big endian element order, which for little 6018 // endian is {-1, 0, -1, 0, -1, 0, -1, 0}. 6019 6020 // For now, just avoid these optimizations in that case. 6021 // FIXME: Develop correct optimizations for LE with mismatched 6022 // splat and element sizes. 6023 6024 if (Subtarget.isLittleEndian() && 6025 SplatSize != Op.getValueType().getVectorElementType().getSizeInBits()) 6026 return SDValue(); 6027 6028 // Check to see if this is a wide variety of vsplti*, binop self cases. 6029 static const signed char SplatCsts[] = { 6030 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 6031 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 6032 }; 6033 6034 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 6035 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 6036 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 6037 int i = SplatCsts[idx]; 6038 6039 // Figure out what shift amount will be used by altivec if shifted by i in 6040 // this splat size. 6041 unsigned TypeShiftAmt = i & (SplatBitSize-1); 6042 6043 // vsplti + shl self. 6044 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 6045 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6046 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6047 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 6048 Intrinsic::ppc_altivec_vslw 6049 }; 6050 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6051 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6052 } 6053 6054 // vsplti + srl self. 6055 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 6056 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6057 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6058 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 6059 Intrinsic::ppc_altivec_vsrw 6060 }; 6061 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6062 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6063 } 6064 6065 // vsplti + sra self. 6066 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 6067 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6068 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6069 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 6070 Intrinsic::ppc_altivec_vsraw 6071 }; 6072 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6073 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6074 } 6075 6076 // vsplti + rol self. 6077 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 6078 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 6079 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 6080 static const unsigned IIDs[] = { // Intrinsic to use for each size. 6081 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 6082 Intrinsic::ppc_altivec_vrlw 6083 }; 6084 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 6085 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 6086 } 6087 6088 // t = vsplti c, result = vsldoi t, t, 1 6089 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 6090 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 6091 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 6092 } 6093 // t = vsplti c, result = vsldoi t, t, 2 6094 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 6095 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 6096 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 6097 } 6098 // t = vsplti c, result = vsldoi t, t, 3 6099 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 6100 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 6101 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 6102 } 6103 } 6104 6105 return SDValue(); 6106 } 6107 6108 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 6109 /// the specified operations to build the shuffle. 6110 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 6111 SDValue RHS, SelectionDAG &DAG, 6112 SDLoc dl) { 6113 unsigned OpNum = (PFEntry >> 26) & 0x0F; 6114 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 6115 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 6116 6117 enum { 6118 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 6119 OP_VMRGHW, 6120 OP_VMRGLW, 6121 OP_VSPLTISW0, 6122 OP_VSPLTISW1, 6123 OP_VSPLTISW2, 6124 OP_VSPLTISW3, 6125 OP_VSLDOI4, 6126 OP_VSLDOI8, 6127 OP_VSLDOI12 6128 }; 6129 6130 if (OpNum == OP_COPY) { 6131 if (LHSID == (1*9+2)*9+3) return LHS; 6132 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 6133 return RHS; 6134 } 6135 6136 SDValue OpLHS, OpRHS; 6137 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 6138 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 6139 6140 int ShufIdxs[16]; 6141 switch (OpNum) { 6142 default: llvm_unreachable("Unknown i32 permute!"); 6143 case OP_VMRGHW: 6144 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 6145 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 6146 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 6147 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 6148 break; 6149 case OP_VMRGLW: 6150 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 6151 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 6152 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 6153 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 6154 break; 6155 case OP_VSPLTISW0: 6156 for (unsigned i = 0; i != 16; ++i) 6157 ShufIdxs[i] = (i&3)+0; 6158 break; 6159 case OP_VSPLTISW1: 6160 for (unsigned i = 0; i != 16; ++i) 6161 ShufIdxs[i] = (i&3)+4; 6162 break; 6163 case OP_VSPLTISW2: 6164 for (unsigned i = 0; i != 16; ++i) 6165 ShufIdxs[i] = (i&3)+8; 6166 break; 6167 case OP_VSPLTISW3: 6168 for (unsigned i = 0; i != 16; ++i) 6169 ShufIdxs[i] = (i&3)+12; 6170 break; 6171 case OP_VSLDOI4: 6172 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 6173 case OP_VSLDOI8: 6174 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 6175 case OP_VSLDOI12: 6176 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 6177 } 6178 EVT VT = OpLHS.getValueType(); 6179 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 6180 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 6181 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 6182 return DAG.getNode(ISD::BITCAST, dl, VT, T); 6183 } 6184 6185 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 6186 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 6187 /// return the code it can be lowered into. Worst case, it can always be 6188 /// lowered into a vperm. 6189 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 6190 SelectionDAG &DAG) const { 6191 SDLoc dl(Op); 6192 SDValue V1 = Op.getOperand(0); 6193 SDValue V2 = Op.getOperand(1); 6194 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6195 EVT VT = Op.getValueType(); 6196 bool isLittleEndian = Subtarget.isLittleEndian(); 6197 6198 // Cases that are handled by instructions that take permute immediates 6199 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 6200 // selected by the instruction selector. 6201 if (V2.getOpcode() == ISD::UNDEF) { 6202 if (PPC::isSplatShuffleMask(SVOp, 1) || 6203 PPC::isSplatShuffleMask(SVOp, 2) || 6204 PPC::isSplatShuffleMask(SVOp, 4) || 6205 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 6206 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 6207 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 6208 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 6209 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 6210 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 6211 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 6212 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 6213 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG)) { 6214 return Op; 6215 } 6216 } 6217 6218 // Altivec has a variety of "shuffle immediates" that take two vector inputs 6219 // and produce a fixed permutation. If any of these match, do not lower to 6220 // VPERM. 6221 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 6222 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 6223 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 6224 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 6225 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 6226 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 6227 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 6228 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 6229 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 6230 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG)) 6231 return Op; 6232 6233 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 6234 // perfect shuffle table to emit an optimal matching sequence. 6235 ArrayRef<int> PermMask = SVOp->getMask(); 6236 6237 unsigned PFIndexes[4]; 6238 bool isFourElementShuffle = true; 6239 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 6240 unsigned EltNo = 8; // Start out undef. 6241 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 6242 if (PermMask[i*4+j] < 0) 6243 continue; // Undef, ignore it. 6244 6245 unsigned ByteSource = PermMask[i*4+j]; 6246 if ((ByteSource & 3) != j) { 6247 isFourElementShuffle = false; 6248 break; 6249 } 6250 6251 if (EltNo == 8) { 6252 EltNo = ByteSource/4; 6253 } else if (EltNo != ByteSource/4) { 6254 isFourElementShuffle = false; 6255 break; 6256 } 6257 } 6258 PFIndexes[i] = EltNo; 6259 } 6260 6261 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 6262 // perfect shuffle vector to determine if it is cost effective to do this as 6263 // discrete instructions, or whether we should use a vperm. 6264 // For now, we skip this for little endian until such time as we have a 6265 // little-endian perfect shuffle table. 6266 if (isFourElementShuffle && !isLittleEndian) { 6267 // Compute the index in the perfect shuffle table. 6268 unsigned PFTableIndex = 6269 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 6270 6271 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 6272 unsigned Cost = (PFEntry >> 30); 6273 6274 // Determining when to avoid vperm is tricky. Many things affect the cost 6275 // of vperm, particularly how many times the perm mask needs to be computed. 6276 // For example, if the perm mask can be hoisted out of a loop or is already 6277 // used (perhaps because there are multiple permutes with the same shuffle 6278 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 6279 // the loop requires an extra register. 6280 // 6281 // As a compromise, we only emit discrete instructions if the shuffle can be 6282 // generated in 3 or fewer operations. When we have loop information 6283 // available, if this block is within a loop, we should avoid using vperm 6284 // for 3-operation perms and use a constant pool load instead. 6285 if (Cost < 3) 6286 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 6287 } 6288 6289 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 6290 // vector that will get spilled to the constant pool. 6291 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 6292 6293 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 6294 // that it is in input element units, not in bytes. Convert now. 6295 6296 // For little endian, the order of the input vectors is reversed, and 6297 // the permutation mask is complemented with respect to 31. This is 6298 // necessary to produce proper semantics with the big-endian-biased vperm 6299 // instruction. 6300 EVT EltVT = V1.getValueType().getVectorElementType(); 6301 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 6302 6303 SmallVector<SDValue, 16> ResultMask; 6304 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 6305 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 6306 6307 for (unsigned j = 0; j != BytesPerElement; ++j) 6308 if (isLittleEndian) 6309 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement+j), 6310 MVT::i32)); 6311 else 6312 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 6313 MVT::i32)); 6314 } 6315 6316 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 6317 ResultMask); 6318 if (isLittleEndian) 6319 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 6320 V2, V1, VPermMask); 6321 else 6322 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 6323 V1, V2, VPermMask); 6324 } 6325 6326 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 6327 /// altivec comparison. If it is, return true and fill in Opc/isDot with 6328 /// information about the intrinsic. 6329 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 6330 bool &isDot) { 6331 unsigned IntrinsicID = 6332 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 6333 CompareOpc = -1; 6334 isDot = false; 6335 switch (IntrinsicID) { 6336 default: return false; 6337 // Comparison predicates. 6338 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 6339 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 6340 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 6341 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 6342 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 6343 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 6344 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 6345 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 6346 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 6347 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 6348 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 6349 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 6350 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 6351 6352 // Normal Comparisons. 6353 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 6354 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 6355 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 6356 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 6357 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 6358 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 6359 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 6360 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 6361 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 6362 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 6363 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 6364 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 6365 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 6366 } 6367 return true; 6368 } 6369 6370 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 6371 /// lower, do it, otherwise return null. 6372 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 6373 SelectionDAG &DAG) const { 6374 // If this is a lowered altivec predicate compare, CompareOpc is set to the 6375 // opcode number of the comparison. 6376 SDLoc dl(Op); 6377 int CompareOpc; 6378 bool isDot; 6379 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 6380 return SDValue(); // Don't custom lower most intrinsics. 6381 6382 // If this is a non-dot comparison, make the VCMP node and we are done. 6383 if (!isDot) { 6384 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 6385 Op.getOperand(1), Op.getOperand(2), 6386 DAG.getConstant(CompareOpc, MVT::i32)); 6387 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 6388 } 6389 6390 // Create the PPCISD altivec 'dot' comparison node. 6391 SDValue Ops[] = { 6392 Op.getOperand(2), // LHS 6393 Op.getOperand(3), // RHS 6394 DAG.getConstant(CompareOpc, MVT::i32) 6395 }; 6396 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 6397 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 6398 6399 // Now that we have the comparison, emit a copy from the CR to a GPR. 6400 // This is flagged to the above dot comparison. 6401 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 6402 DAG.getRegister(PPC::CR6, MVT::i32), 6403 CompNode.getValue(1)); 6404 6405 // Unpack the result based on how the target uses it. 6406 unsigned BitNo; // Bit # of CR6. 6407 bool InvertBit; // Invert result? 6408 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 6409 default: // Can't happen, don't crash on invalid number though. 6410 case 0: // Return the value of the EQ bit of CR6. 6411 BitNo = 0; InvertBit = false; 6412 break; 6413 case 1: // Return the inverted value of the EQ bit of CR6. 6414 BitNo = 0; InvertBit = true; 6415 break; 6416 case 2: // Return the value of the LT bit of CR6. 6417 BitNo = 2; InvertBit = false; 6418 break; 6419 case 3: // Return the inverted value of the LT bit of CR6. 6420 BitNo = 2; InvertBit = true; 6421 break; 6422 } 6423 6424 // Shift the bit into the low position. 6425 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 6426 DAG.getConstant(8-(3-BitNo), MVT::i32)); 6427 // Isolate the bit. 6428 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 6429 DAG.getConstant(1, MVT::i32)); 6430 6431 // If we are supposed to, toggle the bit. 6432 if (InvertBit) 6433 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 6434 DAG.getConstant(1, MVT::i32)); 6435 return Flags; 6436 } 6437 6438 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 6439 SelectionDAG &DAG) const { 6440 SDLoc dl(Op); 6441 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 6442 // instructions), but for smaller types, we need to first extend up to v2i32 6443 // before doing going farther. 6444 if (Op.getValueType() == MVT::v2i64) { 6445 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 6446 if (ExtVT != MVT::v2i32) { 6447 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 6448 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 6449 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 6450 ExtVT.getVectorElementType(), 4))); 6451 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 6452 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 6453 DAG.getValueType(MVT::v2i32)); 6454 } 6455 6456 return Op; 6457 } 6458 6459 return SDValue(); 6460 } 6461 6462 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 6463 SelectionDAG &DAG) const { 6464 SDLoc dl(Op); 6465 // Create a stack slot that is 16-byte aligned. 6466 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 6467 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 6468 EVT PtrVT = getPointerTy(); 6469 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6470 6471 // Store the input value into Value#0 of the stack slot. 6472 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 6473 Op.getOperand(0), FIdx, MachinePointerInfo(), 6474 false, false, 0); 6475 // Load it out. 6476 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 6477 false, false, false, 0); 6478 } 6479 6480 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 6481 SDLoc dl(Op); 6482 if (Op.getValueType() == MVT::v4i32) { 6483 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6484 6485 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 6486 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 6487 6488 SDValue RHSSwap = // = vrlw RHS, 16 6489 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 6490 6491 // Shrinkify inputs to v8i16. 6492 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 6493 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 6494 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 6495 6496 // Low parts multiplied together, generating 32-bit results (we ignore the 6497 // top parts). 6498 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 6499 LHS, RHS, DAG, dl, MVT::v4i32); 6500 6501 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 6502 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 6503 // Shift the high parts up 16 bits. 6504 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 6505 Neg16, DAG, dl); 6506 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 6507 } else if (Op.getValueType() == MVT::v8i16) { 6508 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6509 6510 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 6511 6512 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 6513 LHS, RHS, Zero, DAG, dl); 6514 } else if (Op.getValueType() == MVT::v16i8) { 6515 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6516 bool isLittleEndian = Subtarget.isLittleEndian(); 6517 6518 // Multiply the even 8-bit parts, producing 16-bit sums. 6519 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 6520 LHS, RHS, DAG, dl, MVT::v8i16); 6521 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 6522 6523 // Multiply the odd 8-bit parts, producing 16-bit sums. 6524 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 6525 LHS, RHS, DAG, dl, MVT::v8i16); 6526 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 6527 6528 // Merge the results together. Because vmuleub and vmuloub are 6529 // instructions with a big-endian bias, we must reverse the 6530 // element numbering and reverse the meaning of "odd" and "even" 6531 // when generating little endian code. 6532 int Ops[16]; 6533 for (unsigned i = 0; i != 8; ++i) { 6534 if (isLittleEndian) { 6535 Ops[i*2 ] = 2*i; 6536 Ops[i*2+1] = 2*i+16; 6537 } else { 6538 Ops[i*2 ] = 2*i+1; 6539 Ops[i*2+1] = 2*i+1+16; 6540 } 6541 } 6542 if (isLittleEndian) 6543 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 6544 else 6545 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 6546 } else { 6547 llvm_unreachable("Unknown mul to lower!"); 6548 } 6549 } 6550 6551 /// LowerOperation - Provide custom lowering hooks for some operations. 6552 /// 6553 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 6554 switch (Op.getOpcode()) { 6555 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 6556 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 6557 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 6558 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 6559 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 6560 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 6561 case ISD::SETCC: return LowerSETCC(Op, DAG); 6562 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 6563 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 6564 case ISD::VASTART: 6565 return LowerVASTART(Op, DAG, Subtarget); 6566 6567 case ISD::VAARG: 6568 return LowerVAARG(Op, DAG, Subtarget); 6569 6570 case ISD::VACOPY: 6571 return LowerVACOPY(Op, DAG, Subtarget); 6572 6573 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget); 6574 case ISD::DYNAMIC_STACKALLOC: 6575 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget); 6576 6577 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 6578 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 6579 6580 case ISD::LOAD: return LowerLOAD(Op, DAG); 6581 case ISD::STORE: return LowerSTORE(Op, DAG); 6582 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 6583 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 6584 case ISD::FP_TO_UINT: 6585 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 6586 SDLoc(Op)); 6587 case ISD::UINT_TO_FP: 6588 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 6589 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 6590 6591 // Lower 64-bit shifts. 6592 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 6593 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 6594 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 6595 6596 // Vector-related lowering. 6597 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 6598 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 6599 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 6600 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 6601 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 6602 case ISD::MUL: return LowerMUL(Op, DAG); 6603 6604 // For counter-based loop handling. 6605 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 6606 6607 // Frame & Return address. 6608 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 6609 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 6610 } 6611 } 6612 6613 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 6614 SmallVectorImpl<SDValue>&Results, 6615 SelectionDAG &DAG) const { 6616 const TargetMachine &TM = getTargetMachine(); 6617 SDLoc dl(N); 6618 switch (N->getOpcode()) { 6619 default: 6620 llvm_unreachable("Do not know how to custom type legalize this operation!"); 6621 case ISD::READCYCLECOUNTER: { 6622 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 6623 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 6624 6625 Results.push_back(RTB); 6626 Results.push_back(RTB.getValue(1)); 6627 Results.push_back(RTB.getValue(2)); 6628 break; 6629 } 6630 case ISD::INTRINSIC_W_CHAIN: { 6631 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 6632 Intrinsic::ppc_is_decremented_ctr_nonzero) 6633 break; 6634 6635 assert(N->getValueType(0) == MVT::i1 && 6636 "Unexpected result type for CTR decrement intrinsic"); 6637 EVT SVT = getSetCCResultType(*DAG.getContext(), N->getValueType(0)); 6638 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 6639 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 6640 N->getOperand(1)); 6641 6642 Results.push_back(NewInt); 6643 Results.push_back(NewInt.getValue(1)); 6644 break; 6645 } 6646 case ISD::VAARG: { 6647 if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 6648 || TM.getSubtarget<PPCSubtarget>().isPPC64()) 6649 return; 6650 6651 EVT VT = N->getValueType(0); 6652 6653 if (VT == MVT::i64) { 6654 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget); 6655 6656 Results.push_back(NewNode); 6657 Results.push_back(NewNode.getValue(1)); 6658 } 6659 return; 6660 } 6661 case ISD::FP_ROUND_INREG: { 6662 assert(N->getValueType(0) == MVT::ppcf128); 6663 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 6664 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 6665 MVT::f64, N->getOperand(0), 6666 DAG.getIntPtrConstant(0)); 6667 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 6668 MVT::f64, N->getOperand(0), 6669 DAG.getIntPtrConstant(1)); 6670 6671 // Add the two halves of the long double in round-to-zero mode. 6672 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 6673 6674 // We know the low half is about to be thrown away, so just use something 6675 // convenient. 6676 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 6677 FPreg, FPreg)); 6678 return; 6679 } 6680 case ISD::FP_TO_SINT: 6681 // LowerFP_TO_INT() can only handle f32 and f64. 6682 if (N->getOperand(0).getValueType() == MVT::ppcf128) 6683 return; 6684 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 6685 return; 6686 } 6687 } 6688 6689 6690 //===----------------------------------------------------------------------===// 6691 // Other Lowering Code 6692 //===----------------------------------------------------------------------===// 6693 6694 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 6695 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 6696 Function *Func = Intrinsic::getDeclaration(M, Id); 6697 return Builder.CreateCall(Func); 6698 } 6699 6700 // The mappings for emitLeading/TrailingFence is taken from 6701 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 6702 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 6703 AtomicOrdering Ord, bool IsStore, 6704 bool IsLoad) const { 6705 if (Ord == SequentiallyConsistent) 6706 return callIntrinsic(Builder, Intrinsic::ppc_sync); 6707 else if (isAtLeastRelease(Ord)) 6708 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 6709 else 6710 return nullptr; 6711 } 6712 6713 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 6714 AtomicOrdering Ord, bool IsStore, 6715 bool IsLoad) const { 6716 if (IsLoad && isAtLeastAcquire(Ord)) 6717 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 6718 // FIXME: this is too conservative, a dependent branch + isync is enough. 6719 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 6720 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 6721 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 6722 else 6723 return nullptr; 6724 } 6725 6726 MachineBasicBlock * 6727 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 6728 bool is64bit, unsigned BinOpcode) const { 6729 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 6730 const TargetInstrInfo *TII = 6731 getTargetMachine().getSubtargetImpl()->getInstrInfo(); 6732 6733 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6734 MachineFunction *F = BB->getParent(); 6735 MachineFunction::iterator It = BB; 6736 ++It; 6737 6738 unsigned dest = MI->getOperand(0).getReg(); 6739 unsigned ptrA = MI->getOperand(1).getReg(); 6740 unsigned ptrB = MI->getOperand(2).getReg(); 6741 unsigned incr = MI->getOperand(3).getReg(); 6742 DebugLoc dl = MI->getDebugLoc(); 6743 6744 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 6745 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6746 F->insert(It, loopMBB); 6747 F->insert(It, exitMBB); 6748 exitMBB->splice(exitMBB->begin(), BB, 6749 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 6750 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6751 6752 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6753 unsigned TmpReg = (!BinOpcode) ? incr : 6754 RegInfo.createVirtualRegister( is64bit ? &PPC::G8RCRegClass 6755 : &PPC::GPRCRegClass); 6756 6757 // thisMBB: 6758 // ... 6759 // fallthrough --> loopMBB 6760 BB->addSuccessor(loopMBB); 6761 6762 // loopMBB: 6763 // l[wd]arx dest, ptr 6764 // add r0, dest, incr 6765 // st[wd]cx. r0, ptr 6766 // bne- loopMBB 6767 // fallthrough --> exitMBB 6768 BB = loopMBB; 6769 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 6770 .addReg(ptrA).addReg(ptrB); 6771 if (BinOpcode) 6772 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 6773 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6774 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 6775 BuildMI(BB, dl, TII->get(PPC::BCC)) 6776 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 6777 BB->addSuccessor(loopMBB); 6778 BB->addSuccessor(exitMBB); 6779 6780 // exitMBB: 6781 // ... 6782 BB = exitMBB; 6783 return BB; 6784 } 6785 6786 MachineBasicBlock * 6787 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 6788 MachineBasicBlock *BB, 6789 bool is8bit, // operation 6790 unsigned BinOpcode) const { 6791 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 6792 const TargetInstrInfo *TII = 6793 getTargetMachine().getSubtargetImpl()->getInstrInfo(); 6794 // In 64 bit mode we have to use 64 bits for addresses, even though the 6795 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 6796 // registers without caring whether they're 32 or 64, but here we're 6797 // doing actual arithmetic on the addresses. 6798 bool is64bit = Subtarget.isPPC64(); 6799 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 6800 6801 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6802 MachineFunction *F = BB->getParent(); 6803 MachineFunction::iterator It = BB; 6804 ++It; 6805 6806 unsigned dest = MI->getOperand(0).getReg(); 6807 unsigned ptrA = MI->getOperand(1).getReg(); 6808 unsigned ptrB = MI->getOperand(2).getReg(); 6809 unsigned incr = MI->getOperand(3).getReg(); 6810 DebugLoc dl = MI->getDebugLoc(); 6811 6812 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 6813 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6814 F->insert(It, loopMBB); 6815 F->insert(It, exitMBB); 6816 exitMBB->splice(exitMBB->begin(), BB, 6817 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 6818 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6819 6820 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6821 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 6822 : &PPC::GPRCRegClass; 6823 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 6824 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 6825 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 6826 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 6827 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 6828 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 6829 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 6830 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 6831 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 6832 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 6833 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 6834 unsigned Ptr1Reg; 6835 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 6836 6837 // thisMBB: 6838 // ... 6839 // fallthrough --> loopMBB 6840 BB->addSuccessor(loopMBB); 6841 6842 // The 4-byte load must be aligned, while a char or short may be 6843 // anywhere in the word. Hence all this nasty bookkeeping code. 6844 // add ptr1, ptrA, ptrB [copy if ptrA==0] 6845 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 6846 // xori shift, shift1, 24 [16] 6847 // rlwinm ptr, ptr1, 0, 0, 29 6848 // slw incr2, incr, shift 6849 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 6850 // slw mask, mask2, shift 6851 // loopMBB: 6852 // lwarx tmpDest, ptr 6853 // add tmp, tmpDest, incr2 6854 // andc tmp2, tmpDest, mask 6855 // and tmp3, tmp, mask 6856 // or tmp4, tmp3, tmp2 6857 // stwcx. tmp4, ptr 6858 // bne- loopMBB 6859 // fallthrough --> exitMBB 6860 // srw dest, tmpDest, shift 6861 if (ptrA != ZeroReg) { 6862 Ptr1Reg = RegInfo.createVirtualRegister(RC); 6863 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 6864 .addReg(ptrA).addReg(ptrB); 6865 } else { 6866 Ptr1Reg = ptrB; 6867 } 6868 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 6869 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 6870 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 6871 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 6872 if (is64bit) 6873 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 6874 .addReg(Ptr1Reg).addImm(0).addImm(61); 6875 else 6876 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 6877 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 6878 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 6879 .addReg(incr).addReg(ShiftReg); 6880 if (is8bit) 6881 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 6882 else { 6883 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 6884 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 6885 } 6886 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 6887 .addReg(Mask2Reg).addReg(ShiftReg); 6888 6889 BB = loopMBB; 6890 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 6891 .addReg(ZeroReg).addReg(PtrReg); 6892 if (BinOpcode) 6893 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 6894 .addReg(Incr2Reg).addReg(TmpDestReg); 6895 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 6896 .addReg(TmpDestReg).addReg(MaskReg); 6897 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 6898 .addReg(TmpReg).addReg(MaskReg); 6899 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 6900 .addReg(Tmp3Reg).addReg(Tmp2Reg); 6901 BuildMI(BB, dl, TII->get(PPC::STWCX)) 6902 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 6903 BuildMI(BB, dl, TII->get(PPC::BCC)) 6904 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 6905 BB->addSuccessor(loopMBB); 6906 BB->addSuccessor(exitMBB); 6907 6908 // exitMBB: 6909 // ... 6910 BB = exitMBB; 6911 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 6912 .addReg(ShiftReg); 6913 return BB; 6914 } 6915 6916 llvm::MachineBasicBlock* 6917 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 6918 MachineBasicBlock *MBB) const { 6919 DebugLoc DL = MI->getDebugLoc(); 6920 const TargetInstrInfo *TII = 6921 getTargetMachine().getSubtargetImpl()->getInstrInfo(); 6922 6923 MachineFunction *MF = MBB->getParent(); 6924 MachineRegisterInfo &MRI = MF->getRegInfo(); 6925 6926 const BasicBlock *BB = MBB->getBasicBlock(); 6927 MachineFunction::iterator I = MBB; 6928 ++I; 6929 6930 // Memory Reference 6931 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6932 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6933 6934 unsigned DstReg = MI->getOperand(0).getReg(); 6935 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 6936 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 6937 unsigned mainDstReg = MRI.createVirtualRegister(RC); 6938 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 6939 6940 MVT PVT = getPointerTy(); 6941 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6942 "Invalid Pointer Size!"); 6943 // For v = setjmp(buf), we generate 6944 // 6945 // thisMBB: 6946 // SjLjSetup mainMBB 6947 // bl mainMBB 6948 // v_restore = 1 6949 // b sinkMBB 6950 // 6951 // mainMBB: 6952 // buf[LabelOffset] = LR 6953 // v_main = 0 6954 // 6955 // sinkMBB: 6956 // v = phi(main, restore) 6957 // 6958 6959 MachineBasicBlock *thisMBB = MBB; 6960 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 6961 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 6962 MF->insert(I, mainMBB); 6963 MF->insert(I, sinkMBB); 6964 6965 MachineInstrBuilder MIB; 6966 6967 // Transfer the remainder of BB and its successor edges to sinkMBB. 6968 sinkMBB->splice(sinkMBB->begin(), MBB, 6969 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 6970 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 6971 6972 // Note that the structure of the jmp_buf used here is not compatible 6973 // with that used by libc, and is not designed to be. Specifically, it 6974 // stores only those 'reserved' registers that LLVM does not otherwise 6975 // understand how to spill. Also, by convention, by the time this 6976 // intrinsic is called, Clang has already stored the frame address in the 6977 // first slot of the buffer and stack address in the third. Following the 6978 // X86 target code, we'll store the jump address in the second slot. We also 6979 // need to save the TOC pointer (R2) to handle jumps between shared 6980 // libraries, and that will be stored in the fourth slot. The thread 6981 // identifier (R13) is not affected. 6982 6983 // thisMBB: 6984 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6985 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6986 const int64_t BPOffset = 4 * PVT.getStoreSize(); 6987 6988 // Prepare IP either in reg. 6989 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 6990 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 6991 unsigned BufReg = MI->getOperand(1).getReg(); 6992 6993 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 6994 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 6995 .addReg(PPC::X2) 6996 .addImm(TOCOffset) 6997 .addReg(BufReg); 6998 MIB.setMemRefs(MMOBegin, MMOEnd); 6999 } 7000 7001 // Naked functions never have a base pointer, and so we use r1. For all 7002 // other functions, this decision must be delayed until during PEI. 7003 unsigned BaseReg; 7004 if (MF->getFunction()->getAttributes().hasAttribute( 7005 AttributeSet::FunctionIndex, Attribute::Naked)) 7006 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 7007 else 7008 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 7009 7010 MIB = BuildMI(*thisMBB, MI, DL, 7011 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 7012 .addReg(BaseReg) 7013 .addImm(BPOffset) 7014 .addReg(BufReg); 7015 MIB.setMemRefs(MMOBegin, MMOEnd); 7016 7017 // Setup 7018 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 7019 const PPCRegisterInfo *TRI = 7020 getTargetMachine().getSubtarget<PPCSubtarget>().getRegisterInfo(); 7021 MIB.addRegMask(TRI->getNoPreservedMask()); 7022 7023 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 7024 7025 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 7026 .addMBB(mainMBB); 7027 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 7028 7029 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 7030 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 7031 7032 // mainMBB: 7033 // mainDstReg = 0 7034 MIB = BuildMI(mainMBB, DL, 7035 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 7036 7037 // Store IP 7038 if (Subtarget.isPPC64()) { 7039 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 7040 .addReg(LabelReg) 7041 .addImm(LabelOffset) 7042 .addReg(BufReg); 7043 } else { 7044 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 7045 .addReg(LabelReg) 7046 .addImm(LabelOffset) 7047 .addReg(BufReg); 7048 } 7049 7050 MIB.setMemRefs(MMOBegin, MMOEnd); 7051 7052 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 7053 mainMBB->addSuccessor(sinkMBB); 7054 7055 // sinkMBB: 7056 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 7057 TII->get(PPC::PHI), DstReg) 7058 .addReg(mainDstReg).addMBB(mainMBB) 7059 .addReg(restoreDstReg).addMBB(thisMBB); 7060 7061 MI->eraseFromParent(); 7062 return sinkMBB; 7063 } 7064 7065 MachineBasicBlock * 7066 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 7067 MachineBasicBlock *MBB) const { 7068 DebugLoc DL = MI->getDebugLoc(); 7069 const TargetInstrInfo *TII = 7070 getTargetMachine().getSubtargetImpl()->getInstrInfo(); 7071 7072 MachineFunction *MF = MBB->getParent(); 7073 MachineRegisterInfo &MRI = MF->getRegInfo(); 7074 7075 // Memory Reference 7076 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 7077 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 7078 7079 MVT PVT = getPointerTy(); 7080 assert((PVT == MVT::i64 || PVT == MVT::i32) && 7081 "Invalid Pointer Size!"); 7082 7083 const TargetRegisterClass *RC = 7084 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 7085 unsigned Tmp = MRI.createVirtualRegister(RC); 7086 // Since FP is only updated here but NOT referenced, it's treated as GPR. 7087 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 7088 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 7089 unsigned BP = (PVT == MVT::i64) ? PPC::X30 : 7090 (Subtarget.isSVR4ABI() && 7091 MF->getTarget().getRelocationModel() == Reloc::PIC_ ? 7092 PPC::R29 : PPC::R30); 7093 7094 MachineInstrBuilder MIB; 7095 7096 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 7097 const int64_t SPOffset = 2 * PVT.getStoreSize(); 7098 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 7099 const int64_t BPOffset = 4 * PVT.getStoreSize(); 7100 7101 unsigned BufReg = MI->getOperand(0).getReg(); 7102 7103 // Reload FP (the jumped-to function may not have had a 7104 // frame pointer, and if so, then its r31 will be restored 7105 // as necessary). 7106 if (PVT == MVT::i64) { 7107 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 7108 .addImm(0) 7109 .addReg(BufReg); 7110 } else { 7111 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 7112 .addImm(0) 7113 .addReg(BufReg); 7114 } 7115 MIB.setMemRefs(MMOBegin, MMOEnd); 7116 7117 // Reload IP 7118 if (PVT == MVT::i64) { 7119 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 7120 .addImm(LabelOffset) 7121 .addReg(BufReg); 7122 } else { 7123 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 7124 .addImm(LabelOffset) 7125 .addReg(BufReg); 7126 } 7127 MIB.setMemRefs(MMOBegin, MMOEnd); 7128 7129 // Reload SP 7130 if (PVT == MVT::i64) { 7131 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 7132 .addImm(SPOffset) 7133 .addReg(BufReg); 7134 } else { 7135 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 7136 .addImm(SPOffset) 7137 .addReg(BufReg); 7138 } 7139 MIB.setMemRefs(MMOBegin, MMOEnd); 7140 7141 // Reload BP 7142 if (PVT == MVT::i64) { 7143 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 7144 .addImm(BPOffset) 7145 .addReg(BufReg); 7146 } else { 7147 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 7148 .addImm(BPOffset) 7149 .addReg(BufReg); 7150 } 7151 MIB.setMemRefs(MMOBegin, MMOEnd); 7152 7153 // Reload TOC 7154 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 7155 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 7156 .addImm(TOCOffset) 7157 .addReg(BufReg); 7158 7159 MIB.setMemRefs(MMOBegin, MMOEnd); 7160 } 7161 7162 // Jump 7163 BuildMI(*MBB, MI, DL, 7164 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 7165 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 7166 7167 MI->eraseFromParent(); 7168 return MBB; 7169 } 7170 7171 MachineBasicBlock * 7172 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 7173 MachineBasicBlock *BB) const { 7174 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 7175 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 7176 return emitEHSjLjSetJmp(MI, BB); 7177 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 7178 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 7179 return emitEHSjLjLongJmp(MI, BB); 7180 } 7181 7182 const TargetInstrInfo *TII = 7183 getTargetMachine().getSubtargetImpl()->getInstrInfo(); 7184 7185 // To "insert" these instructions we actually have to insert their 7186 // control-flow patterns. 7187 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7188 MachineFunction::iterator It = BB; 7189 ++It; 7190 7191 MachineFunction *F = BB->getParent(); 7192 7193 if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 7194 MI->getOpcode() == PPC::SELECT_CC_I8 || 7195 MI->getOpcode() == PPC::SELECT_I4 || 7196 MI->getOpcode() == PPC::SELECT_I8)) { 7197 SmallVector<MachineOperand, 2> Cond; 7198 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 7199 MI->getOpcode() == PPC::SELECT_CC_I8) 7200 Cond.push_back(MI->getOperand(4)); 7201 else 7202 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 7203 Cond.push_back(MI->getOperand(1)); 7204 7205 DebugLoc dl = MI->getDebugLoc(); 7206 const TargetInstrInfo *TII = 7207 getTargetMachine().getSubtargetImpl()->getInstrInfo(); 7208 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 7209 Cond, MI->getOperand(2).getReg(), 7210 MI->getOperand(3).getReg()); 7211 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 7212 MI->getOpcode() == PPC::SELECT_CC_I8 || 7213 MI->getOpcode() == PPC::SELECT_CC_F4 || 7214 MI->getOpcode() == PPC::SELECT_CC_F8 || 7215 MI->getOpcode() == PPC::SELECT_CC_VRRC || 7216 MI->getOpcode() == PPC::SELECT_CC_VSFRC || 7217 MI->getOpcode() == PPC::SELECT_CC_VSRC || 7218 MI->getOpcode() == PPC::SELECT_I4 || 7219 MI->getOpcode() == PPC::SELECT_I8 || 7220 MI->getOpcode() == PPC::SELECT_F4 || 7221 MI->getOpcode() == PPC::SELECT_F8 || 7222 MI->getOpcode() == PPC::SELECT_VRRC || 7223 MI->getOpcode() == PPC::SELECT_VSFRC || 7224 MI->getOpcode() == PPC::SELECT_VSRC) { 7225 // The incoming instruction knows the destination vreg to set, the 7226 // condition code register to branch on, the true/false values to 7227 // select between, and a branch opcode to use. 7228 7229 // thisMBB: 7230 // ... 7231 // TrueVal = ... 7232 // cmpTY ccX, r1, r2 7233 // bCC copy1MBB 7234 // fallthrough --> copy0MBB 7235 MachineBasicBlock *thisMBB = BB; 7236 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 7237 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 7238 DebugLoc dl = MI->getDebugLoc(); 7239 F->insert(It, copy0MBB); 7240 F->insert(It, sinkMBB); 7241 7242 // Transfer the remainder of BB and its successor edges to sinkMBB. 7243 sinkMBB->splice(sinkMBB->begin(), BB, 7244 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7245 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 7246 7247 // Next, add the true and fallthrough blocks as its successors. 7248 BB->addSuccessor(copy0MBB); 7249 BB->addSuccessor(sinkMBB); 7250 7251 if (MI->getOpcode() == PPC::SELECT_I4 || 7252 MI->getOpcode() == PPC::SELECT_I8 || 7253 MI->getOpcode() == PPC::SELECT_F4 || 7254 MI->getOpcode() == PPC::SELECT_F8 || 7255 MI->getOpcode() == PPC::SELECT_VRRC || 7256 MI->getOpcode() == PPC::SELECT_VSFRC || 7257 MI->getOpcode() == PPC::SELECT_VSRC) { 7258 BuildMI(BB, dl, TII->get(PPC::BC)) 7259 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 7260 } else { 7261 unsigned SelectPred = MI->getOperand(4).getImm(); 7262 BuildMI(BB, dl, TII->get(PPC::BCC)) 7263 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 7264 } 7265 7266 // copy0MBB: 7267 // %FalseValue = ... 7268 // # fallthrough to sinkMBB 7269 BB = copy0MBB; 7270 7271 // Update machine-CFG edges 7272 BB->addSuccessor(sinkMBB); 7273 7274 // sinkMBB: 7275 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 7276 // ... 7277 BB = sinkMBB; 7278 BuildMI(*BB, BB->begin(), dl, 7279 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 7280 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 7281 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 7282 } else if (MI->getOpcode() == PPC::ReadTB) { 7283 // To read the 64-bit time-base register on a 32-bit target, we read the 7284 // two halves. Should the counter have wrapped while it was being read, we 7285 // need to try again. 7286 // ... 7287 // readLoop: 7288 // mfspr Rx,TBU # load from TBU 7289 // mfspr Ry,TB # load from TB 7290 // mfspr Rz,TBU # load from TBU 7291 // cmpw crX,Rx,Rz # check if ‘old’=’new’ 7292 // bne readLoop # branch if they're not equal 7293 // ... 7294 7295 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 7296 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 7297 DebugLoc dl = MI->getDebugLoc(); 7298 F->insert(It, readMBB); 7299 F->insert(It, sinkMBB); 7300 7301 // Transfer the remainder of BB and its successor edges to sinkMBB. 7302 sinkMBB->splice(sinkMBB->begin(), BB, 7303 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7304 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 7305 7306 BB->addSuccessor(readMBB); 7307 BB = readMBB; 7308 7309 MachineRegisterInfo &RegInfo = F->getRegInfo(); 7310 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 7311 unsigned LoReg = MI->getOperand(0).getReg(); 7312 unsigned HiReg = MI->getOperand(1).getReg(); 7313 7314 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 7315 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 7316 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 7317 7318 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 7319 7320 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 7321 .addReg(HiReg).addReg(ReadAgainReg); 7322 BuildMI(BB, dl, TII->get(PPC::BCC)) 7323 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 7324 7325 BB->addSuccessor(readMBB); 7326 BB->addSuccessor(sinkMBB); 7327 } 7328 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 7329 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 7330 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 7331 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 7332 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 7333 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 7334 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 7335 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 7336 7337 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 7338 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 7339 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 7340 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 7341 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 7342 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 7343 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 7344 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 7345 7346 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 7347 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 7348 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 7349 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 7350 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 7351 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 7352 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 7353 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 7354 7355 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 7356 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 7357 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 7358 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 7359 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 7360 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 7361 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 7362 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 7363 7364 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 7365 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 7366 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 7367 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 7368 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 7369 BB = EmitAtomicBinary(MI, BB, false, PPC::NAND); 7370 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 7371 BB = EmitAtomicBinary(MI, BB, true, PPC::NAND8); 7372 7373 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 7374 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 7375 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 7376 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 7377 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 7378 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 7379 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 7380 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 7381 7382 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 7383 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 7384 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 7385 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 7386 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 7387 BB = EmitAtomicBinary(MI, BB, false, 0); 7388 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 7389 BB = EmitAtomicBinary(MI, BB, true, 0); 7390 7391 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 7392 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 7393 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 7394 7395 unsigned dest = MI->getOperand(0).getReg(); 7396 unsigned ptrA = MI->getOperand(1).getReg(); 7397 unsigned ptrB = MI->getOperand(2).getReg(); 7398 unsigned oldval = MI->getOperand(3).getReg(); 7399 unsigned newval = MI->getOperand(4).getReg(); 7400 DebugLoc dl = MI->getDebugLoc(); 7401 7402 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 7403 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 7404 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 7405 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 7406 F->insert(It, loop1MBB); 7407 F->insert(It, loop2MBB); 7408 F->insert(It, midMBB); 7409 F->insert(It, exitMBB); 7410 exitMBB->splice(exitMBB->begin(), BB, 7411 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7412 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 7413 7414 // thisMBB: 7415 // ... 7416 // fallthrough --> loopMBB 7417 BB->addSuccessor(loop1MBB); 7418 7419 // loop1MBB: 7420 // l[wd]arx dest, ptr 7421 // cmp[wd] dest, oldval 7422 // bne- midMBB 7423 // loop2MBB: 7424 // st[wd]cx. newval, ptr 7425 // bne- loopMBB 7426 // b exitBB 7427 // midMBB: 7428 // st[wd]cx. dest, ptr 7429 // exitBB: 7430 BB = loop1MBB; 7431 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 7432 .addReg(ptrA).addReg(ptrB); 7433 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 7434 .addReg(oldval).addReg(dest); 7435 BuildMI(BB, dl, TII->get(PPC::BCC)) 7436 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 7437 BB->addSuccessor(loop2MBB); 7438 BB->addSuccessor(midMBB); 7439 7440 BB = loop2MBB; 7441 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 7442 .addReg(newval).addReg(ptrA).addReg(ptrB); 7443 BuildMI(BB, dl, TII->get(PPC::BCC)) 7444 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 7445 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 7446 BB->addSuccessor(loop1MBB); 7447 BB->addSuccessor(exitMBB); 7448 7449 BB = midMBB; 7450 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 7451 .addReg(dest).addReg(ptrA).addReg(ptrB); 7452 BB->addSuccessor(exitMBB); 7453 7454 // exitMBB: 7455 // ... 7456 BB = exitMBB; 7457 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 7458 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 7459 // We must use 64-bit registers for addresses when targeting 64-bit, 7460 // since we're actually doing arithmetic on them. Other registers 7461 // can be 32-bit. 7462 bool is64bit = Subtarget.isPPC64(); 7463 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 7464 7465 unsigned dest = MI->getOperand(0).getReg(); 7466 unsigned ptrA = MI->getOperand(1).getReg(); 7467 unsigned ptrB = MI->getOperand(2).getReg(); 7468 unsigned oldval = MI->getOperand(3).getReg(); 7469 unsigned newval = MI->getOperand(4).getReg(); 7470 DebugLoc dl = MI->getDebugLoc(); 7471 7472 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 7473 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 7474 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 7475 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 7476 F->insert(It, loop1MBB); 7477 F->insert(It, loop2MBB); 7478 F->insert(It, midMBB); 7479 F->insert(It, exitMBB); 7480 exitMBB->splice(exitMBB->begin(), BB, 7481 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7482 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 7483 7484 MachineRegisterInfo &RegInfo = F->getRegInfo(); 7485 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 7486 : &PPC::GPRCRegClass; 7487 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 7488 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 7489 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 7490 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 7491 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 7492 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 7493 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 7494 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 7495 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 7496 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 7497 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 7498 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 7499 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 7500 unsigned Ptr1Reg; 7501 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 7502 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 7503 // thisMBB: 7504 // ... 7505 // fallthrough --> loopMBB 7506 BB->addSuccessor(loop1MBB); 7507 7508 // The 4-byte load must be aligned, while a char or short may be 7509 // anywhere in the word. Hence all this nasty bookkeeping code. 7510 // add ptr1, ptrA, ptrB [copy if ptrA==0] 7511 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 7512 // xori shift, shift1, 24 [16] 7513 // rlwinm ptr, ptr1, 0, 0, 29 7514 // slw newval2, newval, shift 7515 // slw oldval2, oldval,shift 7516 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 7517 // slw mask, mask2, shift 7518 // and newval3, newval2, mask 7519 // and oldval3, oldval2, mask 7520 // loop1MBB: 7521 // lwarx tmpDest, ptr 7522 // and tmp, tmpDest, mask 7523 // cmpw tmp, oldval3 7524 // bne- midMBB 7525 // loop2MBB: 7526 // andc tmp2, tmpDest, mask 7527 // or tmp4, tmp2, newval3 7528 // stwcx. tmp4, ptr 7529 // bne- loop1MBB 7530 // b exitBB 7531 // midMBB: 7532 // stwcx. tmpDest, ptr 7533 // exitBB: 7534 // srw dest, tmpDest, shift 7535 if (ptrA != ZeroReg) { 7536 Ptr1Reg = RegInfo.createVirtualRegister(RC); 7537 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 7538 .addReg(ptrA).addReg(ptrB); 7539 } else { 7540 Ptr1Reg = ptrB; 7541 } 7542 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 7543 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 7544 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 7545 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 7546 if (is64bit) 7547 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 7548 .addReg(Ptr1Reg).addImm(0).addImm(61); 7549 else 7550 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 7551 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 7552 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 7553 .addReg(newval).addReg(ShiftReg); 7554 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 7555 .addReg(oldval).addReg(ShiftReg); 7556 if (is8bit) 7557 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 7558 else { 7559 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 7560 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 7561 .addReg(Mask3Reg).addImm(65535); 7562 } 7563 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 7564 .addReg(Mask2Reg).addReg(ShiftReg); 7565 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 7566 .addReg(NewVal2Reg).addReg(MaskReg); 7567 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 7568 .addReg(OldVal2Reg).addReg(MaskReg); 7569 7570 BB = loop1MBB; 7571 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 7572 .addReg(ZeroReg).addReg(PtrReg); 7573 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 7574 .addReg(TmpDestReg).addReg(MaskReg); 7575 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 7576 .addReg(TmpReg).addReg(OldVal3Reg); 7577 BuildMI(BB, dl, TII->get(PPC::BCC)) 7578 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 7579 BB->addSuccessor(loop2MBB); 7580 BB->addSuccessor(midMBB); 7581 7582 BB = loop2MBB; 7583 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 7584 .addReg(TmpDestReg).addReg(MaskReg); 7585 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 7586 .addReg(Tmp2Reg).addReg(NewVal3Reg); 7587 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 7588 .addReg(ZeroReg).addReg(PtrReg); 7589 BuildMI(BB, dl, TII->get(PPC::BCC)) 7590 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 7591 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 7592 BB->addSuccessor(loop1MBB); 7593 BB->addSuccessor(exitMBB); 7594 7595 BB = midMBB; 7596 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 7597 .addReg(ZeroReg).addReg(PtrReg); 7598 BB->addSuccessor(exitMBB); 7599 7600 // exitMBB: 7601 // ... 7602 BB = exitMBB; 7603 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 7604 .addReg(ShiftReg); 7605 } else if (MI->getOpcode() == PPC::FADDrtz) { 7606 // This pseudo performs an FADD with rounding mode temporarily forced 7607 // to round-to-zero. We emit this via custom inserter since the FPSCR 7608 // is not modeled at the SelectionDAG level. 7609 unsigned Dest = MI->getOperand(0).getReg(); 7610 unsigned Src1 = MI->getOperand(1).getReg(); 7611 unsigned Src2 = MI->getOperand(2).getReg(); 7612 DebugLoc dl = MI->getDebugLoc(); 7613 7614 MachineRegisterInfo &RegInfo = F->getRegInfo(); 7615 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 7616 7617 // Save FPSCR value. 7618 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 7619 7620 // Set rounding mode to round-to-zero. 7621 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 7622 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 7623 7624 // Perform addition. 7625 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 7626 7627 // Restore FPSCR value. 7628 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg); 7629 } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 7630 MI->getOpcode() == PPC::ANDIo_1_GT_BIT || 7631 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 7632 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) { 7633 unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 7634 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ? 7635 PPC::ANDIo8 : PPC::ANDIo; 7636 bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 7637 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8); 7638 7639 MachineRegisterInfo &RegInfo = F->getRegInfo(); 7640 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 7641 &PPC::GPRCRegClass : 7642 &PPC::G8RCRegClass); 7643 7644 DebugLoc dl = MI->getDebugLoc(); 7645 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 7646 .addReg(MI->getOperand(1).getReg()).addImm(1); 7647 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 7648 MI->getOperand(0).getReg()) 7649 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 7650 } else { 7651 llvm_unreachable("Unexpected instr type to insert"); 7652 } 7653 7654 MI->eraseFromParent(); // The pseudo instruction is gone now. 7655 return BB; 7656 } 7657 7658 //===----------------------------------------------------------------------===// 7659 // Target Optimization Hooks 7660 //===----------------------------------------------------------------------===// 7661 7662 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand, 7663 DAGCombinerInfo &DCI, 7664 unsigned &RefinementSteps, 7665 bool &UseOneConstNR) const { 7666 EVT VT = Operand.getValueType(); 7667 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 7668 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 7669 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 7670 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 7671 // Convergence is quadratic, so we essentially double the number of digits 7672 // correct after every iteration. For both FRE and FRSQRTE, the minimum 7673 // architected relative accuracy is 2^-5. When hasRecipPrec(), this is 7674 // 2^-14. IEEE float has 23 digits and double has 52 digits. 7675 RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 7676 if (VT.getScalarType() == MVT::f64) 7677 ++RefinementSteps; 7678 UseOneConstNR = true; 7679 return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 7680 } 7681 return SDValue(); 7682 } 7683 7684 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, 7685 DAGCombinerInfo &DCI, 7686 unsigned &RefinementSteps) const { 7687 EVT VT = Operand.getValueType(); 7688 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 7689 (VT == MVT::f64 && Subtarget.hasFRE()) || 7690 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 7691 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 7692 // Convergence is quadratic, so we essentially double the number of digits 7693 // correct after every iteration. For both FRE and FRSQRTE, the minimum 7694 // architected relative accuracy is 2^-5. When hasRecipPrec(), this is 7695 // 2^-14. IEEE float has 23 digits and double has 52 digits. 7696 RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 7697 if (VT.getScalarType() == MVT::f64) 7698 ++RefinementSteps; 7699 return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 7700 } 7701 return SDValue(); 7702 } 7703 7704 bool PPCTargetLowering::combineRepeatedFPDivisors(unsigned NumUsers) const { 7705 // Note: This functionality is used only when unsafe-fp-math is enabled, and 7706 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 7707 // enabled for division), this functionality is redundant with the default 7708 // combiner logic (once the division -> reciprocal/multiply transformation 7709 // has taken place). As a result, this matters more for older cores than for 7710 // newer ones. 7711 7712 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 7713 // reciprocal if there are two or more FDIVs (for embedded cores with only 7714 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 7715 switch (Subtarget.getDarwinDirective()) { 7716 default: 7717 return NumUsers > 2; 7718 case PPC::DIR_440: 7719 case PPC::DIR_A2: 7720 case PPC::DIR_E500mc: 7721 case PPC::DIR_E5500: 7722 return NumUsers > 1; 7723 } 7724 } 7725 7726 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 7727 unsigned Bytes, int Dist, 7728 SelectionDAG &DAG) { 7729 if (VT.getSizeInBits() / 8 != Bytes) 7730 return false; 7731 7732 SDValue BaseLoc = Base->getBasePtr(); 7733 if (Loc.getOpcode() == ISD::FrameIndex) { 7734 if (BaseLoc.getOpcode() != ISD::FrameIndex) 7735 return false; 7736 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7737 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 7738 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 7739 int FS = MFI->getObjectSize(FI); 7740 int BFS = MFI->getObjectSize(BFI); 7741 if (FS != BFS || FS != (int)Bytes) return false; 7742 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 7743 } 7744 7745 // Handle X+C 7746 if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 7747 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 7748 return true; 7749 7750 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7751 const GlobalValue *GV1 = nullptr; 7752 const GlobalValue *GV2 = nullptr; 7753 int64_t Offset1 = 0; 7754 int64_t Offset2 = 0; 7755 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 7756 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 7757 if (isGA1 && isGA2 && GV1 == GV2) 7758 return Offset1 == (Offset2 + Dist*Bytes); 7759 return false; 7760 } 7761 7762 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 7763 // not enforce equality of the chain operands. 7764 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 7765 unsigned Bytes, int Dist, 7766 SelectionDAG &DAG) { 7767 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 7768 EVT VT = LS->getMemoryVT(); 7769 SDValue Loc = LS->getBasePtr(); 7770 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 7771 } 7772 7773 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 7774 EVT VT; 7775 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 7776 default: return false; 7777 case Intrinsic::ppc_altivec_lvx: 7778 case Intrinsic::ppc_altivec_lvxl: 7779 case Intrinsic::ppc_vsx_lxvw4x: 7780 VT = MVT::v4i32; 7781 break; 7782 case Intrinsic::ppc_vsx_lxvd2x: 7783 VT = MVT::v2f64; 7784 break; 7785 case Intrinsic::ppc_altivec_lvebx: 7786 VT = MVT::i8; 7787 break; 7788 case Intrinsic::ppc_altivec_lvehx: 7789 VT = MVT::i16; 7790 break; 7791 case Intrinsic::ppc_altivec_lvewx: 7792 VT = MVT::i32; 7793 break; 7794 } 7795 7796 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 7797 } 7798 7799 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 7800 EVT VT; 7801 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 7802 default: return false; 7803 case Intrinsic::ppc_altivec_stvx: 7804 case Intrinsic::ppc_altivec_stvxl: 7805 case Intrinsic::ppc_vsx_stxvw4x: 7806 VT = MVT::v4i32; 7807 break; 7808 case Intrinsic::ppc_vsx_stxvd2x: 7809 VT = MVT::v2f64; 7810 break; 7811 case Intrinsic::ppc_altivec_stvebx: 7812 VT = MVT::i8; 7813 break; 7814 case Intrinsic::ppc_altivec_stvehx: 7815 VT = MVT::i16; 7816 break; 7817 case Intrinsic::ppc_altivec_stvewx: 7818 VT = MVT::i32; 7819 break; 7820 } 7821 7822 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 7823 } 7824 7825 return false; 7826 } 7827 7828 // Return true is there is a nearyby consecutive load to the one provided 7829 // (regardless of alignment). We search up and down the chain, looking though 7830 // token factors and other loads (but nothing else). As a result, a true result 7831 // indicates that it is safe to create a new consecutive load adjacent to the 7832 // load provided. 7833 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 7834 SDValue Chain = LD->getChain(); 7835 EVT VT = LD->getMemoryVT(); 7836 7837 SmallSet<SDNode *, 16> LoadRoots; 7838 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 7839 SmallSet<SDNode *, 16> Visited; 7840 7841 // First, search up the chain, branching to follow all token-factor operands. 7842 // If we find a consecutive load, then we're done, otherwise, record all 7843 // nodes just above the top-level loads and token factors. 7844 while (!Queue.empty()) { 7845 SDNode *ChainNext = Queue.pop_back_val(); 7846 if (!Visited.insert(ChainNext).second) 7847 continue; 7848 7849 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 7850 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 7851 return true; 7852 7853 if (!Visited.count(ChainLD->getChain().getNode())) 7854 Queue.push_back(ChainLD->getChain().getNode()); 7855 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 7856 for (const SDUse &O : ChainNext->ops()) 7857 if (!Visited.count(O.getNode())) 7858 Queue.push_back(O.getNode()); 7859 } else 7860 LoadRoots.insert(ChainNext); 7861 } 7862 7863 // Second, search down the chain, starting from the top-level nodes recorded 7864 // in the first phase. These top-level nodes are the nodes just above all 7865 // loads and token factors. Starting with their uses, recursively look though 7866 // all loads (just the chain uses) and token factors to find a consecutive 7867 // load. 7868 Visited.clear(); 7869 Queue.clear(); 7870 7871 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 7872 IE = LoadRoots.end(); I != IE; ++I) { 7873 Queue.push_back(*I); 7874 7875 while (!Queue.empty()) { 7876 SDNode *LoadRoot = Queue.pop_back_val(); 7877 if (!Visited.insert(LoadRoot).second) 7878 continue; 7879 7880 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 7881 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 7882 return true; 7883 7884 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 7885 UE = LoadRoot->use_end(); UI != UE; ++UI) 7886 if (((isa<MemSDNode>(*UI) && 7887 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 7888 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 7889 Queue.push_back(*UI); 7890 } 7891 } 7892 7893 return false; 7894 } 7895 7896 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 7897 DAGCombinerInfo &DCI) const { 7898 SelectionDAG &DAG = DCI.DAG; 7899 SDLoc dl(N); 7900 7901 assert(Subtarget.useCRBits() && 7902 "Expecting to be tracking CR bits"); 7903 // If we're tracking CR bits, we need to be careful that we don't have: 7904 // trunc(binary-ops(zext(x), zext(y))) 7905 // or 7906 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 7907 // such that we're unnecessarily moving things into GPRs when it would be 7908 // better to keep them in CR bits. 7909 7910 // Note that trunc here can be an actual i1 trunc, or can be the effective 7911 // truncation that comes from a setcc or select_cc. 7912 if (N->getOpcode() == ISD::TRUNCATE && 7913 N->getValueType(0) != MVT::i1) 7914 return SDValue(); 7915 7916 if (N->getOperand(0).getValueType() != MVT::i32 && 7917 N->getOperand(0).getValueType() != MVT::i64) 7918 return SDValue(); 7919 7920 if (N->getOpcode() == ISD::SETCC || 7921 N->getOpcode() == ISD::SELECT_CC) { 7922 // If we're looking at a comparison, then we need to make sure that the 7923 // high bits (all except for the first) don't matter the result. 7924 ISD::CondCode CC = 7925 cast<CondCodeSDNode>(N->getOperand( 7926 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 7927 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 7928 7929 if (ISD::isSignedIntSetCC(CC)) { 7930 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 7931 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 7932 return SDValue(); 7933 } else if (ISD::isUnsignedIntSetCC(CC)) { 7934 if (!DAG.MaskedValueIsZero(N->getOperand(0), 7935 APInt::getHighBitsSet(OpBits, OpBits-1)) || 7936 !DAG.MaskedValueIsZero(N->getOperand(1), 7937 APInt::getHighBitsSet(OpBits, OpBits-1))) 7938 return SDValue(); 7939 } else { 7940 // This is neither a signed nor an unsigned comparison, just make sure 7941 // that the high bits are equal. 7942 APInt Op1Zero, Op1One; 7943 APInt Op2Zero, Op2One; 7944 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 7945 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 7946 7947 // We don't really care about what is known about the first bit (if 7948 // anything), so clear it in all masks prior to comparing them. 7949 Op1Zero.clearBit(0); Op1One.clearBit(0); 7950 Op2Zero.clearBit(0); Op2One.clearBit(0); 7951 7952 if (Op1Zero != Op2Zero || Op1One != Op2One) 7953 return SDValue(); 7954 } 7955 } 7956 7957 // We now know that the higher-order bits are irrelevant, we just need to 7958 // make sure that all of the intermediate operations are bit operations, and 7959 // all inputs are extensions. 7960 if (N->getOperand(0).getOpcode() != ISD::AND && 7961 N->getOperand(0).getOpcode() != ISD::OR && 7962 N->getOperand(0).getOpcode() != ISD::XOR && 7963 N->getOperand(0).getOpcode() != ISD::SELECT && 7964 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 7965 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 7966 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 7967 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 7968 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 7969 return SDValue(); 7970 7971 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 7972 N->getOperand(1).getOpcode() != ISD::AND && 7973 N->getOperand(1).getOpcode() != ISD::OR && 7974 N->getOperand(1).getOpcode() != ISD::XOR && 7975 N->getOperand(1).getOpcode() != ISD::SELECT && 7976 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 7977 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 7978 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 7979 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 7980 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 7981 return SDValue(); 7982 7983 SmallVector<SDValue, 4> Inputs; 7984 SmallVector<SDValue, 8> BinOps, PromOps; 7985 SmallPtrSet<SDNode *, 16> Visited; 7986 7987 for (unsigned i = 0; i < 2; ++i) { 7988 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 7989 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 7990 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 7991 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 7992 isa<ConstantSDNode>(N->getOperand(i))) 7993 Inputs.push_back(N->getOperand(i)); 7994 else 7995 BinOps.push_back(N->getOperand(i)); 7996 7997 if (N->getOpcode() == ISD::TRUNCATE) 7998 break; 7999 } 8000 8001 // Visit all inputs, collect all binary operations (and, or, xor and 8002 // select) that are all fed by extensions. 8003 while (!BinOps.empty()) { 8004 SDValue BinOp = BinOps.back(); 8005 BinOps.pop_back(); 8006 8007 if (!Visited.insert(BinOp.getNode()).second) 8008 continue; 8009 8010 PromOps.push_back(BinOp); 8011 8012 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 8013 // The condition of the select is not promoted. 8014 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 8015 continue; 8016 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 8017 continue; 8018 8019 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 8020 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 8021 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 8022 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 8023 isa<ConstantSDNode>(BinOp.getOperand(i))) { 8024 Inputs.push_back(BinOp.getOperand(i)); 8025 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 8026 BinOp.getOperand(i).getOpcode() == ISD::OR || 8027 BinOp.getOperand(i).getOpcode() == ISD::XOR || 8028 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 8029 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 8030 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 8031 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 8032 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 8033 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 8034 BinOps.push_back(BinOp.getOperand(i)); 8035 } else { 8036 // We have an input that is not an extension or another binary 8037 // operation; we'll abort this transformation. 8038 return SDValue(); 8039 } 8040 } 8041 } 8042 8043 // Make sure that this is a self-contained cluster of operations (which 8044 // is not quite the same thing as saying that everything has only one 8045 // use). 8046 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 8047 if (isa<ConstantSDNode>(Inputs[i])) 8048 continue; 8049 8050 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 8051 UE = Inputs[i].getNode()->use_end(); 8052 UI != UE; ++UI) { 8053 SDNode *User = *UI; 8054 if (User != N && !Visited.count(User)) 8055 return SDValue(); 8056 8057 // Make sure that we're not going to promote the non-output-value 8058 // operand(s) or SELECT or SELECT_CC. 8059 // FIXME: Although we could sometimes handle this, and it does occur in 8060 // practice that one of the condition inputs to the select is also one of 8061 // the outputs, we currently can't deal with this. 8062 if (User->getOpcode() == ISD::SELECT) { 8063 if (User->getOperand(0) == Inputs[i]) 8064 return SDValue(); 8065 } else if (User->getOpcode() == ISD::SELECT_CC) { 8066 if (User->getOperand(0) == Inputs[i] || 8067 User->getOperand(1) == Inputs[i]) 8068 return SDValue(); 8069 } 8070 } 8071 } 8072 8073 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 8074 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 8075 UE = PromOps[i].getNode()->use_end(); 8076 UI != UE; ++UI) { 8077 SDNode *User = *UI; 8078 if (User != N && !Visited.count(User)) 8079 return SDValue(); 8080 8081 // Make sure that we're not going to promote the non-output-value 8082 // operand(s) or SELECT or SELECT_CC. 8083 // FIXME: Although we could sometimes handle this, and it does occur in 8084 // practice that one of the condition inputs to the select is also one of 8085 // the outputs, we currently can't deal with this. 8086 if (User->getOpcode() == ISD::SELECT) { 8087 if (User->getOperand(0) == PromOps[i]) 8088 return SDValue(); 8089 } else if (User->getOpcode() == ISD::SELECT_CC) { 8090 if (User->getOperand(0) == PromOps[i] || 8091 User->getOperand(1) == PromOps[i]) 8092 return SDValue(); 8093 } 8094 } 8095 } 8096 8097 // Replace all inputs with the extension operand. 8098 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 8099 // Constants may have users outside the cluster of to-be-promoted nodes, 8100 // and so we need to replace those as we do the promotions. 8101 if (isa<ConstantSDNode>(Inputs[i])) 8102 continue; 8103 else 8104 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 8105 } 8106 8107 // Replace all operations (these are all the same, but have a different 8108 // (i1) return type). DAG.getNode will validate that the types of 8109 // a binary operator match, so go through the list in reverse so that 8110 // we've likely promoted both operands first. Any intermediate truncations or 8111 // extensions disappear. 8112 while (!PromOps.empty()) { 8113 SDValue PromOp = PromOps.back(); 8114 PromOps.pop_back(); 8115 8116 if (PromOp.getOpcode() == ISD::TRUNCATE || 8117 PromOp.getOpcode() == ISD::SIGN_EXTEND || 8118 PromOp.getOpcode() == ISD::ZERO_EXTEND || 8119 PromOp.getOpcode() == ISD::ANY_EXTEND) { 8120 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 8121 PromOp.getOperand(0).getValueType() != MVT::i1) { 8122 // The operand is not yet ready (see comment below). 8123 PromOps.insert(PromOps.begin(), PromOp); 8124 continue; 8125 } 8126 8127 SDValue RepValue = PromOp.getOperand(0); 8128 if (isa<ConstantSDNode>(RepValue)) 8129 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 8130 8131 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 8132 continue; 8133 } 8134 8135 unsigned C; 8136 switch (PromOp.getOpcode()) { 8137 default: C = 0; break; 8138 case ISD::SELECT: C = 1; break; 8139 case ISD::SELECT_CC: C = 2; break; 8140 } 8141 8142 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 8143 PromOp.getOperand(C).getValueType() != MVT::i1) || 8144 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 8145 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 8146 // The to-be-promoted operands of this node have not yet been 8147 // promoted (this should be rare because we're going through the 8148 // list backward, but if one of the operands has several users in 8149 // this cluster of to-be-promoted nodes, it is possible). 8150 PromOps.insert(PromOps.begin(), PromOp); 8151 continue; 8152 } 8153 8154 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 8155 PromOp.getNode()->op_end()); 8156 8157 // If there are any constant inputs, make sure they're replaced now. 8158 for (unsigned i = 0; i < 2; ++i) 8159 if (isa<ConstantSDNode>(Ops[C+i])) 8160 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 8161 8162 DAG.ReplaceAllUsesOfValueWith(PromOp, 8163 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 8164 } 8165 8166 // Now we're left with the initial truncation itself. 8167 if (N->getOpcode() == ISD::TRUNCATE) 8168 return N->getOperand(0); 8169 8170 // Otherwise, this is a comparison. The operands to be compared have just 8171 // changed type (to i1), but everything else is the same. 8172 return SDValue(N, 0); 8173 } 8174 8175 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 8176 DAGCombinerInfo &DCI) const { 8177 SelectionDAG &DAG = DCI.DAG; 8178 SDLoc dl(N); 8179 8180 // If we're tracking CR bits, we need to be careful that we don't have: 8181 // zext(binary-ops(trunc(x), trunc(y))) 8182 // or 8183 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 8184 // such that we're unnecessarily moving things into CR bits that can more 8185 // efficiently stay in GPRs. Note that if we're not certain that the high 8186 // bits are set as required by the final extension, we still may need to do 8187 // some masking to get the proper behavior. 8188 8189 // This same functionality is important on PPC64 when dealing with 8190 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 8191 // the return values of functions. Because it is so similar, it is handled 8192 // here as well. 8193 8194 if (N->getValueType(0) != MVT::i32 && 8195 N->getValueType(0) != MVT::i64) 8196 return SDValue(); 8197 8198 if (!((N->getOperand(0).getValueType() == MVT::i1 && 8199 Subtarget.useCRBits()) || 8200 (N->getOperand(0).getValueType() == MVT::i32 && 8201 Subtarget.isPPC64()))) 8202 return SDValue(); 8203 8204 if (N->getOperand(0).getOpcode() != ISD::AND && 8205 N->getOperand(0).getOpcode() != ISD::OR && 8206 N->getOperand(0).getOpcode() != ISD::XOR && 8207 N->getOperand(0).getOpcode() != ISD::SELECT && 8208 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 8209 return SDValue(); 8210 8211 SmallVector<SDValue, 4> Inputs; 8212 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 8213 SmallPtrSet<SDNode *, 16> Visited; 8214 8215 // Visit all inputs, collect all binary operations (and, or, xor and 8216 // select) that are all fed by truncations. 8217 while (!BinOps.empty()) { 8218 SDValue BinOp = BinOps.back(); 8219 BinOps.pop_back(); 8220 8221 if (!Visited.insert(BinOp.getNode()).second) 8222 continue; 8223 8224 PromOps.push_back(BinOp); 8225 8226 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 8227 // The condition of the select is not promoted. 8228 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 8229 continue; 8230 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 8231 continue; 8232 8233 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 8234 isa<ConstantSDNode>(BinOp.getOperand(i))) { 8235 Inputs.push_back(BinOp.getOperand(i)); 8236 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 8237 BinOp.getOperand(i).getOpcode() == ISD::OR || 8238 BinOp.getOperand(i).getOpcode() == ISD::XOR || 8239 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 8240 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 8241 BinOps.push_back(BinOp.getOperand(i)); 8242 } else { 8243 // We have an input that is not a truncation or another binary 8244 // operation; we'll abort this transformation. 8245 return SDValue(); 8246 } 8247 } 8248 } 8249 8250 // The operands of a select that must be truncated when the select is 8251 // promoted because the operand is actually part of the to-be-promoted set. 8252 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 8253 8254 // Make sure that this is a self-contained cluster of operations (which 8255 // is not quite the same thing as saying that everything has only one 8256 // use). 8257 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 8258 if (isa<ConstantSDNode>(Inputs[i])) 8259 continue; 8260 8261 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 8262 UE = Inputs[i].getNode()->use_end(); 8263 UI != UE; ++UI) { 8264 SDNode *User = *UI; 8265 if (User != N && !Visited.count(User)) 8266 return SDValue(); 8267 8268 // If we're going to promote the non-output-value operand(s) or SELECT or 8269 // SELECT_CC, record them for truncation. 8270 if (User->getOpcode() == ISD::SELECT) { 8271 if (User->getOperand(0) == Inputs[i]) 8272 SelectTruncOp[0].insert(std::make_pair(User, 8273 User->getOperand(0).getValueType())); 8274 } else if (User->getOpcode() == ISD::SELECT_CC) { 8275 if (User->getOperand(0) == Inputs[i]) 8276 SelectTruncOp[0].insert(std::make_pair(User, 8277 User->getOperand(0).getValueType())); 8278 if (User->getOperand(1) == Inputs[i]) 8279 SelectTruncOp[1].insert(std::make_pair(User, 8280 User->getOperand(1).getValueType())); 8281 } 8282 } 8283 } 8284 8285 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 8286 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 8287 UE = PromOps[i].getNode()->use_end(); 8288 UI != UE; ++UI) { 8289 SDNode *User = *UI; 8290 if (User != N && !Visited.count(User)) 8291 return SDValue(); 8292 8293 // If we're going to promote the non-output-value operand(s) or SELECT or 8294 // SELECT_CC, record them for truncation. 8295 if (User->getOpcode() == ISD::SELECT) { 8296 if (User->getOperand(0) == PromOps[i]) 8297 SelectTruncOp[0].insert(std::make_pair(User, 8298 User->getOperand(0).getValueType())); 8299 } else if (User->getOpcode() == ISD::SELECT_CC) { 8300 if (User->getOperand(0) == PromOps[i]) 8301 SelectTruncOp[0].insert(std::make_pair(User, 8302 User->getOperand(0).getValueType())); 8303 if (User->getOperand(1) == PromOps[i]) 8304 SelectTruncOp[1].insert(std::make_pair(User, 8305 User->getOperand(1).getValueType())); 8306 } 8307 } 8308 } 8309 8310 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 8311 bool ReallyNeedsExt = false; 8312 if (N->getOpcode() != ISD::ANY_EXTEND) { 8313 // If all of the inputs are not already sign/zero extended, then 8314 // we'll still need to do that at the end. 8315 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 8316 if (isa<ConstantSDNode>(Inputs[i])) 8317 continue; 8318 8319 unsigned OpBits = 8320 Inputs[i].getOperand(0).getValueSizeInBits(); 8321 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 8322 8323 if ((N->getOpcode() == ISD::ZERO_EXTEND && 8324 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 8325 APInt::getHighBitsSet(OpBits, 8326 OpBits-PromBits))) || 8327 (N->getOpcode() == ISD::SIGN_EXTEND && 8328 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 8329 (OpBits-(PromBits-1)))) { 8330 ReallyNeedsExt = true; 8331 break; 8332 } 8333 } 8334 } 8335 8336 // Replace all inputs, either with the truncation operand, or a 8337 // truncation or extension to the final output type. 8338 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 8339 // Constant inputs need to be replaced with the to-be-promoted nodes that 8340 // use them because they might have users outside of the cluster of 8341 // promoted nodes. 8342 if (isa<ConstantSDNode>(Inputs[i])) 8343 continue; 8344 8345 SDValue InSrc = Inputs[i].getOperand(0); 8346 if (Inputs[i].getValueType() == N->getValueType(0)) 8347 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 8348 else if (N->getOpcode() == ISD::SIGN_EXTEND) 8349 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 8350 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 8351 else if (N->getOpcode() == ISD::ZERO_EXTEND) 8352 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 8353 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 8354 else 8355 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 8356 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 8357 } 8358 8359 // Replace all operations (these are all the same, but have a different 8360 // (promoted) return type). DAG.getNode will validate that the types of 8361 // a binary operator match, so go through the list in reverse so that 8362 // we've likely promoted both operands first. 8363 while (!PromOps.empty()) { 8364 SDValue PromOp = PromOps.back(); 8365 PromOps.pop_back(); 8366 8367 unsigned C; 8368 switch (PromOp.getOpcode()) { 8369 default: C = 0; break; 8370 case ISD::SELECT: C = 1; break; 8371 case ISD::SELECT_CC: C = 2; break; 8372 } 8373 8374 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 8375 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 8376 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 8377 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 8378 // The to-be-promoted operands of this node have not yet been 8379 // promoted (this should be rare because we're going through the 8380 // list backward, but if one of the operands has several users in 8381 // this cluster of to-be-promoted nodes, it is possible). 8382 PromOps.insert(PromOps.begin(), PromOp); 8383 continue; 8384 } 8385 8386 // For SELECT and SELECT_CC nodes, we do a similar check for any 8387 // to-be-promoted comparison inputs. 8388 if (PromOp.getOpcode() == ISD::SELECT || 8389 PromOp.getOpcode() == ISD::SELECT_CC) { 8390 if ((SelectTruncOp[0].count(PromOp.getNode()) && 8391 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 8392 (SelectTruncOp[1].count(PromOp.getNode()) && 8393 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 8394 PromOps.insert(PromOps.begin(), PromOp); 8395 continue; 8396 } 8397 } 8398 8399 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 8400 PromOp.getNode()->op_end()); 8401 8402 // If this node has constant inputs, then they'll need to be promoted here. 8403 for (unsigned i = 0; i < 2; ++i) { 8404 if (!isa<ConstantSDNode>(Ops[C+i])) 8405 continue; 8406 if (Ops[C+i].getValueType() == N->getValueType(0)) 8407 continue; 8408 8409 if (N->getOpcode() == ISD::SIGN_EXTEND) 8410 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 8411 else if (N->getOpcode() == ISD::ZERO_EXTEND) 8412 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 8413 else 8414 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 8415 } 8416 8417 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 8418 // truncate them again to the original value type. 8419 if (PromOp.getOpcode() == ISD::SELECT || 8420 PromOp.getOpcode() == ISD::SELECT_CC) { 8421 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 8422 if (SI0 != SelectTruncOp[0].end()) 8423 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 8424 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 8425 if (SI1 != SelectTruncOp[1].end()) 8426 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 8427 } 8428 8429 DAG.ReplaceAllUsesOfValueWith(PromOp, 8430 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 8431 } 8432 8433 // Now we're left with the initial extension itself. 8434 if (!ReallyNeedsExt) 8435 return N->getOperand(0); 8436 8437 // To zero extend, just mask off everything except for the first bit (in the 8438 // i1 case). 8439 if (N->getOpcode() == ISD::ZERO_EXTEND) 8440 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 8441 DAG.getConstant(APInt::getLowBitsSet( 8442 N->getValueSizeInBits(0), PromBits), 8443 N->getValueType(0))); 8444 8445 assert(N->getOpcode() == ISD::SIGN_EXTEND && 8446 "Invalid extension type"); 8447 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0)); 8448 SDValue ShiftCst = 8449 DAG.getConstant(N->getValueSizeInBits(0)-PromBits, ShiftAmountTy); 8450 return DAG.getNode(ISD::SRA, dl, N->getValueType(0), 8451 DAG.getNode(ISD::SHL, dl, N->getValueType(0), 8452 N->getOperand(0), ShiftCst), ShiftCst); 8453 } 8454 8455 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 8456 DAGCombinerInfo &DCI) const { 8457 assert((N->getOpcode() == ISD::SINT_TO_FP || 8458 N->getOpcode() == ISD::UINT_TO_FP) && 8459 "Need an int -> FP conversion node here"); 8460 8461 if (!Subtarget.has64BitSupport()) 8462 return SDValue(); 8463 8464 SelectionDAG &DAG = DCI.DAG; 8465 SDLoc dl(N); 8466 SDValue Op(N, 0); 8467 8468 // Don't handle ppc_fp128 here or i1 conversions. 8469 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 8470 return SDValue(); 8471 if (Op.getOperand(0).getValueType() == MVT::i1) 8472 return SDValue(); 8473 8474 // For i32 intermediate values, unfortunately, the conversion functions 8475 // leave the upper 32 bits of the value are undefined. Within the set of 8476 // scalar instructions, we have no method for zero- or sign-extending the 8477 // value. Thus, we cannot handle i32 intermediate values here. 8478 if (Op.getOperand(0).getValueType() == MVT::i32) 8479 return SDValue(); 8480 8481 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 8482 "UINT_TO_FP is supported only with FPCVT"); 8483 8484 // If we have FCFIDS, then use it when converting to single-precision. 8485 // Otherwise, convert to double-precision and then round. 8486 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 8487 (Op.getOpcode() == ISD::UINT_TO_FP ? 8488 PPCISD::FCFIDUS : PPCISD::FCFIDS) : 8489 (Op.getOpcode() == ISD::UINT_TO_FP ? 8490 PPCISD::FCFIDU : PPCISD::FCFID); 8491 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 8492 MVT::f32 : MVT::f64; 8493 8494 // If we're converting from a float, to an int, and back to a float again, 8495 // then we don't need the store/load pair at all. 8496 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 8497 Subtarget.hasFPCVT()) || 8498 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 8499 SDValue Src = Op.getOperand(0).getOperand(0); 8500 if (Src.getValueType() == MVT::f32) { 8501 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 8502 DCI.AddToWorklist(Src.getNode()); 8503 } 8504 8505 unsigned FCTOp = 8506 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 8507 PPCISD::FCTIDUZ; 8508 8509 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 8510 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 8511 8512 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 8513 FP = DAG.getNode(ISD::FP_ROUND, dl, 8514 MVT::f32, FP, DAG.getIntPtrConstant(0)); 8515 DCI.AddToWorklist(FP.getNode()); 8516 } 8517 8518 return FP; 8519 } 8520 8521 return SDValue(); 8522 } 8523 8524 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 8525 // builtins) into loads with swaps. 8526 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 8527 DAGCombinerInfo &DCI) const { 8528 SelectionDAG &DAG = DCI.DAG; 8529 SDLoc dl(N); 8530 SDValue Chain; 8531 SDValue Base; 8532 MachineMemOperand *MMO; 8533 8534 switch (N->getOpcode()) { 8535 default: 8536 llvm_unreachable("Unexpected opcode for little endian VSX load"); 8537 case ISD::LOAD: { 8538 LoadSDNode *LD = cast<LoadSDNode>(N); 8539 Chain = LD->getChain(); 8540 Base = LD->getBasePtr(); 8541 MMO = LD->getMemOperand(); 8542 // If the MMO suggests this isn't a load of a full vector, leave 8543 // things alone. For a built-in, we have to make the change for 8544 // correctness, so if there is a size problem that will be a bug. 8545 if (MMO->getSize() < 16) 8546 return SDValue(); 8547 break; 8548 } 8549 case ISD::INTRINSIC_W_CHAIN: { 8550 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 8551 Chain = Intrin->getChain(); 8552 Base = Intrin->getBasePtr(); 8553 MMO = Intrin->getMemOperand(); 8554 break; 8555 } 8556 } 8557 8558 MVT VecTy = N->getValueType(0).getSimpleVT(); 8559 SDValue LoadOps[] = { Chain, Base }; 8560 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 8561 DAG.getVTList(VecTy, MVT::Other), 8562 LoadOps, VecTy, MMO); 8563 DCI.AddToWorklist(Load.getNode()); 8564 Chain = Load.getValue(1); 8565 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 8566 DAG.getVTList(VecTy, MVT::Other), Chain, Load); 8567 DCI.AddToWorklist(Swap.getNode()); 8568 return Swap; 8569 } 8570 8571 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 8572 // builtins) into stores with swaps. 8573 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 8574 DAGCombinerInfo &DCI) const { 8575 SelectionDAG &DAG = DCI.DAG; 8576 SDLoc dl(N); 8577 SDValue Chain; 8578 SDValue Base; 8579 unsigned SrcOpnd; 8580 MachineMemOperand *MMO; 8581 8582 switch (N->getOpcode()) { 8583 default: 8584 llvm_unreachable("Unexpected opcode for little endian VSX store"); 8585 case ISD::STORE: { 8586 StoreSDNode *ST = cast<StoreSDNode>(N); 8587 Chain = ST->getChain(); 8588 Base = ST->getBasePtr(); 8589 MMO = ST->getMemOperand(); 8590 SrcOpnd = 1; 8591 // If the MMO suggests this isn't a store of a full vector, leave 8592 // things alone. For a built-in, we have to make the change for 8593 // correctness, so if there is a size problem that will be a bug. 8594 if (MMO->getSize() < 16) 8595 return SDValue(); 8596 break; 8597 } 8598 case ISD::INTRINSIC_VOID: { 8599 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 8600 Chain = Intrin->getChain(); 8601 // Intrin->getBasePtr() oddly does not get what we want. 8602 Base = Intrin->getOperand(3); 8603 MMO = Intrin->getMemOperand(); 8604 SrcOpnd = 2; 8605 break; 8606 } 8607 } 8608 8609 SDValue Src = N->getOperand(SrcOpnd); 8610 MVT VecTy = Src.getValueType().getSimpleVT(); 8611 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 8612 DAG.getVTList(VecTy, MVT::Other), Chain, Src); 8613 DCI.AddToWorklist(Swap.getNode()); 8614 Chain = Swap.getValue(1); 8615 SDValue StoreOps[] = { Chain, Swap, Base }; 8616 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 8617 DAG.getVTList(MVT::Other), 8618 StoreOps, VecTy, MMO); 8619 DCI.AddToWorklist(Store.getNode()); 8620 return Store; 8621 } 8622 8623 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 8624 DAGCombinerInfo &DCI) const { 8625 const TargetMachine &TM = getTargetMachine(); 8626 SelectionDAG &DAG = DCI.DAG; 8627 SDLoc dl(N); 8628 switch (N->getOpcode()) { 8629 default: break; 8630 case PPCISD::SHL: 8631 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 8632 if (C->isNullValue()) // 0 << V -> 0. 8633 return N->getOperand(0); 8634 } 8635 break; 8636 case PPCISD::SRL: 8637 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 8638 if (C->isNullValue()) // 0 >>u V -> 0. 8639 return N->getOperand(0); 8640 } 8641 break; 8642 case PPCISD::SRA: 8643 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 8644 if (C->isNullValue() || // 0 >>s V -> 0. 8645 C->isAllOnesValue()) // -1 >>s V -> -1. 8646 return N->getOperand(0); 8647 } 8648 break; 8649 case ISD::SIGN_EXTEND: 8650 case ISD::ZERO_EXTEND: 8651 case ISD::ANY_EXTEND: 8652 return DAGCombineExtBoolTrunc(N, DCI); 8653 case ISD::TRUNCATE: 8654 case ISD::SETCC: 8655 case ISD::SELECT_CC: 8656 return DAGCombineTruncBoolExt(N, DCI); 8657 case ISD::SINT_TO_FP: 8658 case ISD::UINT_TO_FP: 8659 return combineFPToIntToFP(N, DCI); 8660 case ISD::STORE: { 8661 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 8662 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 8663 !cast<StoreSDNode>(N)->isTruncatingStore() && 8664 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 8665 N->getOperand(1).getValueType() == MVT::i32 && 8666 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 8667 SDValue Val = N->getOperand(1).getOperand(0); 8668 if (Val.getValueType() == MVT::f32) { 8669 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 8670 DCI.AddToWorklist(Val.getNode()); 8671 } 8672 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 8673 DCI.AddToWorklist(Val.getNode()); 8674 8675 SDValue Ops[] = { 8676 N->getOperand(0), Val, N->getOperand(2), 8677 DAG.getValueType(N->getOperand(1).getValueType()) 8678 }; 8679 8680 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 8681 DAG.getVTList(MVT::Other), Ops, 8682 cast<StoreSDNode>(N)->getMemoryVT(), 8683 cast<StoreSDNode>(N)->getMemOperand()); 8684 DCI.AddToWorklist(Val.getNode()); 8685 return Val; 8686 } 8687 8688 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 8689 if (cast<StoreSDNode>(N)->isUnindexed() && 8690 N->getOperand(1).getOpcode() == ISD::BSWAP && 8691 N->getOperand(1).getNode()->hasOneUse() && 8692 (N->getOperand(1).getValueType() == MVT::i32 || 8693 N->getOperand(1).getValueType() == MVT::i16 || 8694 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 8695 TM.getSubtarget<PPCSubtarget>().isPPC64() && 8696 N->getOperand(1).getValueType() == MVT::i64))) { 8697 SDValue BSwapOp = N->getOperand(1).getOperand(0); 8698 // Do an any-extend to 32-bits if this is a half-word input. 8699 if (BSwapOp.getValueType() == MVT::i16) 8700 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 8701 8702 SDValue Ops[] = { 8703 N->getOperand(0), BSwapOp, N->getOperand(2), 8704 DAG.getValueType(N->getOperand(1).getValueType()) 8705 }; 8706 return 8707 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 8708 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 8709 cast<StoreSDNode>(N)->getMemOperand()); 8710 } 8711 8712 // For little endian, VSX stores require generating xxswapd/lxvd2x. 8713 EVT VT = N->getOperand(1).getValueType(); 8714 if (VT.isSimple()) { 8715 MVT StoreVT = VT.getSimpleVT(); 8716 if (TM.getSubtarget<PPCSubtarget>().hasVSX() && 8717 TM.getSubtarget<PPCSubtarget>().isLittleEndian() && 8718 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 8719 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 8720 return expandVSXStoreForLE(N, DCI); 8721 } 8722 break; 8723 } 8724 case ISD::LOAD: { 8725 LoadSDNode *LD = cast<LoadSDNode>(N); 8726 EVT VT = LD->getValueType(0); 8727 8728 // For little endian, VSX loads require generating lxvd2x/xxswapd. 8729 if (VT.isSimple()) { 8730 MVT LoadVT = VT.getSimpleVT(); 8731 if (TM.getSubtarget<PPCSubtarget>().hasVSX() && 8732 TM.getSubtarget<PPCSubtarget>().isLittleEndian() && 8733 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 8734 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 8735 return expandVSXLoadForLE(N, DCI); 8736 } 8737 8738 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 8739 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty); 8740 if (ISD::isNON_EXTLoad(N) && VT.isVector() && 8741 TM.getSubtarget<PPCSubtarget>().hasAltivec() && 8742 // P8 and later hardware should just use LOAD. 8743 !TM.getSubtarget<PPCSubtarget>().hasP8Vector() && 8744 (VT == MVT::v16i8 || VT == MVT::v8i16 || 8745 VT == MVT::v4i32 || VT == MVT::v4f32) && 8746 LD->getAlignment() < ABIAlignment) { 8747 // This is a type-legal unaligned Altivec load. 8748 SDValue Chain = LD->getChain(); 8749 SDValue Ptr = LD->getBasePtr(); 8750 bool isLittleEndian = Subtarget.isLittleEndian(); 8751 8752 // This implements the loading of unaligned vectors as described in 8753 // the venerable Apple Velocity Engine overview. Specifically: 8754 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 8755 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 8756 // 8757 // The general idea is to expand a sequence of one or more unaligned 8758 // loads into an alignment-based permutation-control instruction (lvsl 8759 // or lvsr), a series of regular vector loads (which always truncate 8760 // their input address to an aligned address), and a series of 8761 // permutations. The results of these permutations are the requested 8762 // loaded values. The trick is that the last "extra" load is not taken 8763 // from the address you might suspect (sizeof(vector) bytes after the 8764 // last requested load), but rather sizeof(vector) - 1 bytes after the 8765 // last requested vector. The point of this is to avoid a page fault if 8766 // the base address happened to be aligned. This works because if the 8767 // base address is aligned, then adding less than a full vector length 8768 // will cause the last vector in the sequence to be (re)loaded. 8769 // Otherwise, the next vector will be fetched as you might suspect was 8770 // necessary. 8771 8772 // We might be able to reuse the permutation generation from 8773 // a different base address offset from this one by an aligned amount. 8774 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 8775 // optimization later. 8776 Intrinsic::ID Intr = (isLittleEndian ? 8777 Intrinsic::ppc_altivec_lvsr : 8778 Intrinsic::ppc_altivec_lvsl); 8779 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, MVT::v16i8); 8780 8781 // Create the new MMO for the new base load. It is like the original MMO, 8782 // but represents an area in memory almost twice the vector size centered 8783 // on the original address. If the address is unaligned, we might start 8784 // reading up to (sizeof(vector)-1) bytes below the address of the 8785 // original unaligned load. 8786 MachineFunction &MF = DAG.getMachineFunction(); 8787 MachineMemOperand *BaseMMO = 8788 MF.getMachineMemOperand(LD->getMemOperand(), 8789 -LD->getMemoryVT().getStoreSize()+1, 8790 2*LD->getMemoryVT().getStoreSize()-1); 8791 8792 // Create the new base load. 8793 SDValue LDXIntID = DAG.getTargetConstant(Intrinsic::ppc_altivec_lvx, 8794 getPointerTy()); 8795 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 8796 SDValue BaseLoad = 8797 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 8798 DAG.getVTList(MVT::v4i32, MVT::Other), 8799 BaseLoadOps, MVT::v4i32, BaseMMO); 8800 8801 // Note that the value of IncOffset (which is provided to the next 8802 // load's pointer info offset value, and thus used to calculate the 8803 // alignment), and the value of IncValue (which is actually used to 8804 // increment the pointer value) are different! This is because we 8805 // require the next load to appear to be aligned, even though it 8806 // is actually offset from the base pointer by a lesser amount. 8807 int IncOffset = VT.getSizeInBits() / 8; 8808 int IncValue = IncOffset; 8809 8810 // Walk (both up and down) the chain looking for another load at the real 8811 // (aligned) offset (the alignment of the other load does not matter in 8812 // this case). If found, then do not use the offset reduction trick, as 8813 // that will prevent the loads from being later combined (as they would 8814 // otherwise be duplicates). 8815 if (!findConsecutiveLoad(LD, DAG)) 8816 --IncValue; 8817 8818 SDValue Increment = DAG.getConstant(IncValue, getPointerTy()); 8819 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 8820 8821 MachineMemOperand *ExtraMMO = 8822 MF.getMachineMemOperand(LD->getMemOperand(), 8823 1, 2*LD->getMemoryVT().getStoreSize()-1); 8824 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 8825 SDValue ExtraLoad = 8826 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 8827 DAG.getVTList(MVT::v4i32, MVT::Other), 8828 ExtraLoadOps, MVT::v4i32, ExtraMMO); 8829 8830 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 8831 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 8832 8833 // Because vperm has a big-endian bias, we must reverse the order 8834 // of the input vectors and complement the permute control vector 8835 // when generating little endian code. We have already handled the 8836 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 8837 // and ExtraLoad here. 8838 SDValue Perm; 8839 if (isLittleEndian) 8840 Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm, 8841 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 8842 else 8843 Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm, 8844 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 8845 8846 if (VT != MVT::v4i32) 8847 Perm = DAG.getNode(ISD::BITCAST, dl, VT, Perm); 8848 8849 // The output of the permutation is our loaded result, the TokenFactor is 8850 // our new chain. 8851 DCI.CombineTo(N, Perm, TF); 8852 return SDValue(N, 0); 8853 } 8854 } 8855 break; 8856 case ISD::INTRINSIC_WO_CHAIN: { 8857 bool isLittleEndian = Subtarget.isLittleEndian(); 8858 Intrinsic::ID Intr = (isLittleEndian ? 8859 Intrinsic::ppc_altivec_lvsr : 8860 Intrinsic::ppc_altivec_lvsl); 8861 if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() == Intr && 8862 N->getOperand(1)->getOpcode() == ISD::ADD) { 8863 SDValue Add = N->getOperand(1); 8864 8865 if (DAG.MaskedValueIsZero(Add->getOperand(1), 8866 APInt::getAllOnesValue(4 /* 16 byte alignment */).zext( 8867 Add.getValueType().getScalarType().getSizeInBits()))) { 8868 SDNode *BasePtr = Add->getOperand(0).getNode(); 8869 for (SDNode::use_iterator UI = BasePtr->use_begin(), 8870 UE = BasePtr->use_end(); UI != UE; ++UI) { 8871 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 8872 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == 8873 Intr) { 8874 // We've found another LVSL/LVSR, and this address is an aligned 8875 // multiple of that one. The results will be the same, so use the 8876 // one we've just found instead. 8877 8878 return SDValue(*UI, 0); 8879 } 8880 } 8881 } 8882 } 8883 } 8884 8885 break; 8886 case ISD::INTRINSIC_W_CHAIN: { 8887 // For little endian, VSX loads require generating lxvd2x/xxswapd. 8888 if (TM.getSubtarget<PPCSubtarget>().hasVSX() && 8889 TM.getSubtarget<PPCSubtarget>().isLittleEndian()) { 8890 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 8891 default: 8892 break; 8893 case Intrinsic::ppc_vsx_lxvw4x: 8894 case Intrinsic::ppc_vsx_lxvd2x: 8895 return expandVSXLoadForLE(N, DCI); 8896 } 8897 } 8898 break; 8899 } 8900 case ISD::INTRINSIC_VOID: { 8901 // For little endian, VSX stores require generating xxswapd/stxvd2x. 8902 if (TM.getSubtarget<PPCSubtarget>().hasVSX() && 8903 TM.getSubtarget<PPCSubtarget>().isLittleEndian()) { 8904 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 8905 default: 8906 break; 8907 case Intrinsic::ppc_vsx_stxvw4x: 8908 case Intrinsic::ppc_vsx_stxvd2x: 8909 return expandVSXStoreForLE(N, DCI); 8910 } 8911 } 8912 break; 8913 } 8914 case ISD::BSWAP: 8915 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 8916 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 8917 N->getOperand(0).hasOneUse() && 8918 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 8919 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 8920 TM.getSubtarget<PPCSubtarget>().isPPC64() && 8921 N->getValueType(0) == MVT::i64))) { 8922 SDValue Load = N->getOperand(0); 8923 LoadSDNode *LD = cast<LoadSDNode>(Load); 8924 // Create the byte-swapping load. 8925 SDValue Ops[] = { 8926 LD->getChain(), // Chain 8927 LD->getBasePtr(), // Ptr 8928 DAG.getValueType(N->getValueType(0)) // VT 8929 }; 8930 SDValue BSLoad = 8931 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 8932 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 8933 MVT::i64 : MVT::i32, MVT::Other), 8934 Ops, LD->getMemoryVT(), LD->getMemOperand()); 8935 8936 // If this is an i16 load, insert the truncate. 8937 SDValue ResVal = BSLoad; 8938 if (N->getValueType(0) == MVT::i16) 8939 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 8940 8941 // First, combine the bswap away. This makes the value produced by the 8942 // load dead. 8943 DCI.CombineTo(N, ResVal); 8944 8945 // Next, combine the load away, we give it a bogus result value but a real 8946 // chain result. The result value is dead because the bswap is dead. 8947 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 8948 8949 // Return N so it doesn't get rechecked! 8950 return SDValue(N, 0); 8951 } 8952 8953 break; 8954 case PPCISD::VCMP: { 8955 // If a VCMPo node already exists with exactly the same operands as this 8956 // node, use its result instead of this node (VCMPo computes both a CR6 and 8957 // a normal output). 8958 // 8959 if (!N->getOperand(0).hasOneUse() && 8960 !N->getOperand(1).hasOneUse() && 8961 !N->getOperand(2).hasOneUse()) { 8962 8963 // Scan all of the users of the LHS, looking for VCMPo's that match. 8964 SDNode *VCMPoNode = nullptr; 8965 8966 SDNode *LHSN = N->getOperand(0).getNode(); 8967 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 8968 UI != E; ++UI) 8969 if (UI->getOpcode() == PPCISD::VCMPo && 8970 UI->getOperand(1) == N->getOperand(1) && 8971 UI->getOperand(2) == N->getOperand(2) && 8972 UI->getOperand(0) == N->getOperand(0)) { 8973 VCMPoNode = *UI; 8974 break; 8975 } 8976 8977 // If there is no VCMPo node, or if the flag value has a single use, don't 8978 // transform this. 8979 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 8980 break; 8981 8982 // Look at the (necessarily single) use of the flag value. If it has a 8983 // chain, this transformation is more complex. Note that multiple things 8984 // could use the value result, which we should ignore. 8985 SDNode *FlagUser = nullptr; 8986 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 8987 FlagUser == nullptr; ++UI) { 8988 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 8989 SDNode *User = *UI; 8990 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 8991 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 8992 FlagUser = User; 8993 break; 8994 } 8995 } 8996 } 8997 8998 // If the user is a MFOCRF instruction, we know this is safe. 8999 // Otherwise we give up for right now. 9000 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 9001 return SDValue(VCMPoNode, 0); 9002 } 9003 break; 9004 } 9005 case ISD::BRCOND: { 9006 SDValue Cond = N->getOperand(1); 9007 SDValue Target = N->getOperand(2); 9008 9009 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 9010 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 9011 Intrinsic::ppc_is_decremented_ctr_nonzero) { 9012 9013 // We now need to make the intrinsic dead (it cannot be instruction 9014 // selected). 9015 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 9016 assert(Cond.getNode()->hasOneUse() && 9017 "Counter decrement has more than one use"); 9018 9019 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 9020 N->getOperand(0), Target); 9021 } 9022 } 9023 break; 9024 case ISD::BR_CC: { 9025 // If this is a branch on an altivec predicate comparison, lower this so 9026 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 9027 // lowering is done pre-legalize, because the legalizer lowers the predicate 9028 // compare down to code that is difficult to reassemble. 9029 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 9030 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 9031 9032 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 9033 // value. If so, pass-through the AND to get to the intrinsic. 9034 if (LHS.getOpcode() == ISD::AND && 9035 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 9036 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 9037 Intrinsic::ppc_is_decremented_ctr_nonzero && 9038 isa<ConstantSDNode>(LHS.getOperand(1)) && 9039 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 9040 isZero()) 9041 LHS = LHS.getOperand(0); 9042 9043 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 9044 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 9045 Intrinsic::ppc_is_decremented_ctr_nonzero && 9046 isa<ConstantSDNode>(RHS)) { 9047 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 9048 "Counter decrement comparison is not EQ or NE"); 9049 9050 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 9051 bool isBDNZ = (CC == ISD::SETEQ && Val) || 9052 (CC == ISD::SETNE && !Val); 9053 9054 // We now need to make the intrinsic dead (it cannot be instruction 9055 // selected). 9056 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 9057 assert(LHS.getNode()->hasOneUse() && 9058 "Counter decrement has more than one use"); 9059 9060 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 9061 N->getOperand(0), N->getOperand(4)); 9062 } 9063 9064 int CompareOpc; 9065 bool isDot; 9066 9067 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 9068 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 9069 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 9070 assert(isDot && "Can't compare against a vector result!"); 9071 9072 // If this is a comparison against something other than 0/1, then we know 9073 // that the condition is never/always true. 9074 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 9075 if (Val != 0 && Val != 1) { 9076 if (CC == ISD::SETEQ) // Cond never true, remove branch. 9077 return N->getOperand(0); 9078 // Always !=, turn it into an unconditional branch. 9079 return DAG.getNode(ISD::BR, dl, MVT::Other, 9080 N->getOperand(0), N->getOperand(4)); 9081 } 9082 9083 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 9084 9085 // Create the PPCISD altivec 'dot' comparison node. 9086 SDValue Ops[] = { 9087 LHS.getOperand(2), // LHS of compare 9088 LHS.getOperand(3), // RHS of compare 9089 DAG.getConstant(CompareOpc, MVT::i32) 9090 }; 9091 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 9092 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 9093 9094 // Unpack the result based on how the target uses it. 9095 PPC::Predicate CompOpc; 9096 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 9097 default: // Can't happen, don't crash on invalid number though. 9098 case 0: // Branch on the value of the EQ bit of CR6. 9099 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 9100 break; 9101 case 1: // Branch on the inverted value of the EQ bit of CR6. 9102 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 9103 break; 9104 case 2: // Branch on the value of the LT bit of CR6. 9105 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 9106 break; 9107 case 3: // Branch on the inverted value of the LT bit of CR6. 9108 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 9109 break; 9110 } 9111 9112 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 9113 DAG.getConstant(CompOpc, MVT::i32), 9114 DAG.getRegister(PPC::CR6, MVT::i32), 9115 N->getOperand(4), CompNode.getValue(1)); 9116 } 9117 break; 9118 } 9119 } 9120 9121 return SDValue(); 9122 } 9123 9124 SDValue 9125 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 9126 SelectionDAG &DAG, 9127 std::vector<SDNode *> *Created) const { 9128 // fold (sdiv X, pow2) 9129 EVT VT = N->getValueType(0); 9130 if (VT == MVT::i64 && !Subtarget.isPPC64()) 9131 return SDValue(); 9132 if ((VT != MVT::i32 && VT != MVT::i64) || 9133 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 9134 return SDValue(); 9135 9136 SDLoc DL(N); 9137 SDValue N0 = N->getOperand(0); 9138 9139 bool IsNegPow2 = (-Divisor).isPowerOf2(); 9140 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 9141 SDValue ShiftAmt = DAG.getConstant(Lg2, VT); 9142 9143 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 9144 if (Created) 9145 Created->push_back(Op.getNode()); 9146 9147 if (IsNegPow2) { 9148 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT), Op); 9149 if (Created) 9150 Created->push_back(Op.getNode()); 9151 } 9152 9153 return Op; 9154 } 9155 9156 //===----------------------------------------------------------------------===// 9157 // Inline Assembly Support 9158 //===----------------------------------------------------------------------===// 9159 9160 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 9161 APInt &KnownZero, 9162 APInt &KnownOne, 9163 const SelectionDAG &DAG, 9164 unsigned Depth) const { 9165 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 9166 switch (Op.getOpcode()) { 9167 default: break; 9168 case PPCISD::LBRX: { 9169 // lhbrx is known to have the top bits cleared out. 9170 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 9171 KnownZero = 0xFFFF0000; 9172 break; 9173 } 9174 case ISD::INTRINSIC_WO_CHAIN: { 9175 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 9176 default: break; 9177 case Intrinsic::ppc_altivec_vcmpbfp_p: 9178 case Intrinsic::ppc_altivec_vcmpeqfp_p: 9179 case Intrinsic::ppc_altivec_vcmpequb_p: 9180 case Intrinsic::ppc_altivec_vcmpequh_p: 9181 case Intrinsic::ppc_altivec_vcmpequw_p: 9182 case Intrinsic::ppc_altivec_vcmpgefp_p: 9183 case Intrinsic::ppc_altivec_vcmpgtfp_p: 9184 case Intrinsic::ppc_altivec_vcmpgtsb_p: 9185 case Intrinsic::ppc_altivec_vcmpgtsh_p: 9186 case Intrinsic::ppc_altivec_vcmpgtsw_p: 9187 case Intrinsic::ppc_altivec_vcmpgtub_p: 9188 case Intrinsic::ppc_altivec_vcmpgtuh_p: 9189 case Intrinsic::ppc_altivec_vcmpgtuw_p: 9190 KnownZero = ~1U; // All bits but the low one are known to be zero. 9191 break; 9192 } 9193 } 9194 } 9195 } 9196 9197 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 9198 switch (Subtarget.getDarwinDirective()) { 9199 default: break; 9200 case PPC::DIR_970: 9201 case PPC::DIR_PWR4: 9202 case PPC::DIR_PWR5: 9203 case PPC::DIR_PWR5X: 9204 case PPC::DIR_PWR6: 9205 case PPC::DIR_PWR6X: 9206 case PPC::DIR_PWR7: 9207 case PPC::DIR_PWR8: { 9208 if (!ML) 9209 break; 9210 9211 const PPCInstrInfo *TII = 9212 static_cast<const PPCInstrInfo *>(getTargetMachine().getSubtargetImpl()-> 9213 getInstrInfo()); 9214 9215 // For small loops (between 5 and 8 instructions), align to a 32-byte 9216 // boundary so that the entire loop fits in one instruction-cache line. 9217 uint64_t LoopSize = 0; 9218 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 9219 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) 9220 LoopSize += TII->GetInstSizeInBytes(J); 9221 9222 if (LoopSize > 16 && LoopSize <= 32) 9223 return 5; 9224 9225 break; 9226 } 9227 } 9228 9229 return TargetLowering::getPrefLoopAlignment(ML); 9230 } 9231 9232 /// getConstraintType - Given a constraint, return the type of 9233 /// constraint it is for this target. 9234 PPCTargetLowering::ConstraintType 9235 PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 9236 if (Constraint.size() == 1) { 9237 switch (Constraint[0]) { 9238 default: break; 9239 case 'b': 9240 case 'r': 9241 case 'f': 9242 case 'v': 9243 case 'y': 9244 return C_RegisterClass; 9245 case 'Z': 9246 // FIXME: While Z does indicate a memory constraint, it specifically 9247 // indicates an r+r address (used in conjunction with the 'y' modifier 9248 // in the replacement string). Currently, we're forcing the base 9249 // register to be r0 in the asm printer (which is interpreted as zero) 9250 // and forming the complete address in the second register. This is 9251 // suboptimal. 9252 return C_Memory; 9253 } 9254 } else if (Constraint == "wc") { // individual CR bits. 9255 return C_RegisterClass; 9256 } else if (Constraint == "wa" || Constraint == "wd" || 9257 Constraint == "wf" || Constraint == "ws") { 9258 return C_RegisterClass; // VSX registers. 9259 } 9260 return TargetLowering::getConstraintType(Constraint); 9261 } 9262 9263 /// Examine constraint type and operand type and determine a weight value. 9264 /// This object must already have been set up with the operand type 9265 /// and the current alternative constraint selected. 9266 TargetLowering::ConstraintWeight 9267 PPCTargetLowering::getSingleConstraintMatchWeight( 9268 AsmOperandInfo &info, const char *constraint) const { 9269 ConstraintWeight weight = CW_Invalid; 9270 Value *CallOperandVal = info.CallOperandVal; 9271 // If we don't have a value, we can't do a match, 9272 // but allow it at the lowest weight. 9273 if (!CallOperandVal) 9274 return CW_Default; 9275 Type *type = CallOperandVal->getType(); 9276 9277 // Look at the constraint type. 9278 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 9279 return CW_Register; // an individual CR bit. 9280 else if ((StringRef(constraint) == "wa" || 9281 StringRef(constraint) == "wd" || 9282 StringRef(constraint) == "wf") && 9283 type->isVectorTy()) 9284 return CW_Register; 9285 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 9286 return CW_Register; 9287 9288 switch (*constraint) { 9289 default: 9290 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 9291 break; 9292 case 'b': 9293 if (type->isIntegerTy()) 9294 weight = CW_Register; 9295 break; 9296 case 'f': 9297 if (type->isFloatTy()) 9298 weight = CW_Register; 9299 break; 9300 case 'd': 9301 if (type->isDoubleTy()) 9302 weight = CW_Register; 9303 break; 9304 case 'v': 9305 if (type->isVectorTy()) 9306 weight = CW_Register; 9307 break; 9308 case 'y': 9309 weight = CW_Register; 9310 break; 9311 case 'Z': 9312 weight = CW_Memory; 9313 break; 9314 } 9315 return weight; 9316 } 9317 9318 std::pair<unsigned, const TargetRegisterClass*> 9319 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 9320 MVT VT) const { 9321 if (Constraint.size() == 1) { 9322 // GCC RS6000 Constraint Letters 9323 switch (Constraint[0]) { 9324 case 'b': // R1-R31 9325 if (VT == MVT::i64 && Subtarget.isPPC64()) 9326 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 9327 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 9328 case 'r': // R0-R31 9329 if (VT == MVT::i64 && Subtarget.isPPC64()) 9330 return std::make_pair(0U, &PPC::G8RCRegClass); 9331 return std::make_pair(0U, &PPC::GPRCRegClass); 9332 case 'f': 9333 if (VT == MVT::f32 || VT == MVT::i32) 9334 return std::make_pair(0U, &PPC::F4RCRegClass); 9335 if (VT == MVT::f64 || VT == MVT::i64) 9336 return std::make_pair(0U, &PPC::F8RCRegClass); 9337 break; 9338 case 'v': 9339 return std::make_pair(0U, &PPC::VRRCRegClass); 9340 case 'y': // crrc 9341 return std::make_pair(0U, &PPC::CRRCRegClass); 9342 } 9343 } else if (Constraint == "wc") { // an individual CR bit. 9344 return std::make_pair(0U, &PPC::CRBITRCRegClass); 9345 } else if (Constraint == "wa" || Constraint == "wd" || 9346 Constraint == "wf") { 9347 return std::make_pair(0U, &PPC::VSRCRegClass); 9348 } else if (Constraint == "ws") { 9349 return std::make_pair(0U, &PPC::VSFRCRegClass); 9350 } 9351 9352 std::pair<unsigned, const TargetRegisterClass*> R = 9353 TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 9354 9355 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 9356 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 9357 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 9358 // register. 9359 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 9360 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 9361 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 9362 PPC::GPRCRegClass.contains(R.first)) { 9363 const TargetRegisterInfo *TRI = 9364 getTargetMachine().getSubtargetImpl()->getRegisterInfo(); 9365 return std::make_pair(TRI->getMatchingSuperReg(R.first, 9366 PPC::sub_32, &PPC::G8RCRegClass), 9367 &PPC::G8RCRegClass); 9368 } 9369 9370 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 9371 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 9372 R.first = PPC::CR0; 9373 R.second = &PPC::CRRCRegClass; 9374 } 9375 9376 return R; 9377 } 9378 9379 9380 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 9381 /// vector. If it is invalid, don't add anything to Ops. 9382 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 9383 std::string &Constraint, 9384 std::vector<SDValue>&Ops, 9385 SelectionDAG &DAG) const { 9386 SDValue Result; 9387 9388 // Only support length 1 constraints. 9389 if (Constraint.length() > 1) return; 9390 9391 char Letter = Constraint[0]; 9392 switch (Letter) { 9393 default: break; 9394 case 'I': 9395 case 'J': 9396 case 'K': 9397 case 'L': 9398 case 'M': 9399 case 'N': 9400 case 'O': 9401 case 'P': { 9402 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 9403 if (!CST) return; // Must be an immediate to match. 9404 int64_t Value = CST->getSExtValue(); 9405 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 9406 // numbers are printed as such. 9407 switch (Letter) { 9408 default: llvm_unreachable("Unknown constraint letter!"); 9409 case 'I': // "I" is a signed 16-bit constant. 9410 if (isInt<16>(Value)) 9411 Result = DAG.getTargetConstant(Value, TCVT); 9412 break; 9413 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 9414 if (isShiftedUInt<16, 16>(Value)) 9415 Result = DAG.getTargetConstant(Value, TCVT); 9416 break; 9417 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 9418 if (isShiftedInt<16, 16>(Value)) 9419 Result = DAG.getTargetConstant(Value, TCVT); 9420 break; 9421 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 9422 if (isUInt<16>(Value)) 9423 Result = DAG.getTargetConstant(Value, TCVT); 9424 break; 9425 case 'M': // "M" is a constant that is greater than 31. 9426 if (Value > 31) 9427 Result = DAG.getTargetConstant(Value, TCVT); 9428 break; 9429 case 'N': // "N" is a positive constant that is an exact power of two. 9430 if (Value > 0 && isPowerOf2_64(Value)) 9431 Result = DAG.getTargetConstant(Value, TCVT); 9432 break; 9433 case 'O': // "O" is the constant zero. 9434 if (Value == 0) 9435 Result = DAG.getTargetConstant(Value, TCVT); 9436 break; 9437 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 9438 if (isInt<16>(-Value)) 9439 Result = DAG.getTargetConstant(Value, TCVT); 9440 break; 9441 } 9442 break; 9443 } 9444 } 9445 9446 if (Result.getNode()) { 9447 Ops.push_back(Result); 9448 return; 9449 } 9450 9451 // Handle standard constraint letters. 9452 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 9453 } 9454 9455 // isLegalAddressingMode - Return true if the addressing mode represented 9456 // by AM is legal for this target, for a load/store of the specified type. 9457 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 9458 Type *Ty) const { 9459 // FIXME: PPC does not allow r+i addressing modes for vectors! 9460 9461 // PPC allows a sign-extended 16-bit immediate field. 9462 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 9463 return false; 9464 9465 // No global is ever allowed as a base. 9466 if (AM.BaseGV) 9467 return false; 9468 9469 // PPC only support r+r, 9470 switch (AM.Scale) { 9471 case 0: // "r+i" or just "i", depending on HasBaseReg. 9472 break; 9473 case 1: 9474 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 9475 return false; 9476 // Otherwise we have r+r or r+i. 9477 break; 9478 case 2: 9479 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 9480 return false; 9481 // Allow 2*r as r+r. 9482 break; 9483 default: 9484 // No other scales are supported. 9485 return false; 9486 } 9487 9488 return true; 9489 } 9490 9491 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 9492 SelectionDAG &DAG) const { 9493 MachineFunction &MF = DAG.getMachineFunction(); 9494 MachineFrameInfo *MFI = MF.getFrameInfo(); 9495 MFI->setReturnAddressIsTaken(true); 9496 9497 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 9498 return SDValue(); 9499 9500 SDLoc dl(Op); 9501 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9502 9503 // Make sure the function does not optimize away the store of the RA to 9504 // the stack. 9505 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 9506 FuncInfo->setLRStoreRequired(); 9507 bool isPPC64 = Subtarget.isPPC64(); 9508 bool isDarwinABI = Subtarget.isDarwinABI(); 9509 9510 if (Depth > 0) { 9511 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 9512 SDValue Offset = 9513 9514 DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI), 9515 isPPC64? MVT::i64 : MVT::i32); 9516 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9517 DAG.getNode(ISD::ADD, dl, getPointerTy(), 9518 FrameAddr, Offset), 9519 MachinePointerInfo(), false, false, false, 0); 9520 } 9521 9522 // Just load the return address off the stack. 9523 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 9524 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9525 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 9526 } 9527 9528 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 9529 SelectionDAG &DAG) const { 9530 SDLoc dl(Op); 9531 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9532 9533 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 9534 bool isPPC64 = PtrVT == MVT::i64; 9535 9536 MachineFunction &MF = DAG.getMachineFunction(); 9537 MachineFrameInfo *MFI = MF.getFrameInfo(); 9538 MFI->setFrameAddressIsTaken(true); 9539 9540 // Naked functions never have a frame pointer, and so we use r1. For all 9541 // other functions, this decision must be delayed until during PEI. 9542 unsigned FrameReg; 9543 if (MF.getFunction()->getAttributes().hasAttribute( 9544 AttributeSet::FunctionIndex, Attribute::Naked)) 9545 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 9546 else 9547 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 9548 9549 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 9550 PtrVT); 9551 while (Depth--) 9552 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 9553 FrameAddr, MachinePointerInfo(), false, false, 9554 false, 0); 9555 return FrameAddr; 9556 } 9557 9558 // FIXME? Maybe this could be a TableGen attribute on some registers and 9559 // this table could be generated automatically from RegInfo. 9560 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, 9561 EVT VT) const { 9562 bool isPPC64 = Subtarget.isPPC64(); 9563 bool isDarwinABI = Subtarget.isDarwinABI(); 9564 9565 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 9566 (!isPPC64 && VT != MVT::i32)) 9567 report_fatal_error("Invalid register global variable type"); 9568 9569 bool is64Bit = isPPC64 && VT == MVT::i64; 9570 unsigned Reg = StringSwitch<unsigned>(RegName) 9571 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 9572 .Case("r2", isDarwinABI ? 0 : (is64Bit ? PPC::X2 : PPC::R2)) 9573 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 9574 (is64Bit ? PPC::X13 : PPC::R13)) 9575 .Default(0); 9576 9577 if (Reg) 9578 return Reg; 9579 report_fatal_error("Invalid register name global variable"); 9580 } 9581 9582 bool 9583 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 9584 // The PowerPC target isn't yet aware of offsets. 9585 return false; 9586 } 9587 9588 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 9589 const CallInst &I, 9590 unsigned Intrinsic) const { 9591 9592 switch (Intrinsic) { 9593 case Intrinsic::ppc_altivec_lvx: 9594 case Intrinsic::ppc_altivec_lvxl: 9595 case Intrinsic::ppc_altivec_lvebx: 9596 case Intrinsic::ppc_altivec_lvehx: 9597 case Intrinsic::ppc_altivec_lvewx: 9598 case Intrinsic::ppc_vsx_lxvd2x: 9599 case Intrinsic::ppc_vsx_lxvw4x: { 9600 EVT VT; 9601 switch (Intrinsic) { 9602 case Intrinsic::ppc_altivec_lvebx: 9603 VT = MVT::i8; 9604 break; 9605 case Intrinsic::ppc_altivec_lvehx: 9606 VT = MVT::i16; 9607 break; 9608 case Intrinsic::ppc_altivec_lvewx: 9609 VT = MVT::i32; 9610 break; 9611 case Intrinsic::ppc_vsx_lxvd2x: 9612 VT = MVT::v2f64; 9613 break; 9614 default: 9615 VT = MVT::v4i32; 9616 break; 9617 } 9618 9619 Info.opc = ISD::INTRINSIC_W_CHAIN; 9620 Info.memVT = VT; 9621 Info.ptrVal = I.getArgOperand(0); 9622 Info.offset = -VT.getStoreSize()+1; 9623 Info.size = 2*VT.getStoreSize()-1; 9624 Info.align = 1; 9625 Info.vol = false; 9626 Info.readMem = true; 9627 Info.writeMem = false; 9628 return true; 9629 } 9630 case Intrinsic::ppc_altivec_stvx: 9631 case Intrinsic::ppc_altivec_stvxl: 9632 case Intrinsic::ppc_altivec_stvebx: 9633 case Intrinsic::ppc_altivec_stvehx: 9634 case Intrinsic::ppc_altivec_stvewx: 9635 case Intrinsic::ppc_vsx_stxvd2x: 9636 case Intrinsic::ppc_vsx_stxvw4x: { 9637 EVT VT; 9638 switch (Intrinsic) { 9639 case Intrinsic::ppc_altivec_stvebx: 9640 VT = MVT::i8; 9641 break; 9642 case Intrinsic::ppc_altivec_stvehx: 9643 VT = MVT::i16; 9644 break; 9645 case Intrinsic::ppc_altivec_stvewx: 9646 VT = MVT::i32; 9647 break; 9648 case Intrinsic::ppc_vsx_stxvd2x: 9649 VT = MVT::v2f64; 9650 break; 9651 default: 9652 VT = MVT::v4i32; 9653 break; 9654 } 9655 9656 Info.opc = ISD::INTRINSIC_VOID; 9657 Info.memVT = VT; 9658 Info.ptrVal = I.getArgOperand(1); 9659 Info.offset = -VT.getStoreSize()+1; 9660 Info.size = 2*VT.getStoreSize()-1; 9661 Info.align = 1; 9662 Info.vol = false; 9663 Info.readMem = false; 9664 Info.writeMem = true; 9665 return true; 9666 } 9667 default: 9668 break; 9669 } 9670 9671 return false; 9672 } 9673 9674 /// getOptimalMemOpType - Returns the target specific optimal type for load 9675 /// and store operations as a result of memset, memcpy, and memmove 9676 /// lowering. If DstAlign is zero that means it's safe to destination 9677 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 9678 /// means there isn't a need to check it against alignment requirement, 9679 /// probably because the source does not need to be loaded. If 'IsMemset' is 9680 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 9681 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 9682 /// source is constant so it does not need to be loaded. 9683 /// It returns EVT::Other if the type should be determined using generic 9684 /// target-independent logic. 9685 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 9686 unsigned DstAlign, unsigned SrcAlign, 9687 bool IsMemset, bool ZeroMemset, 9688 bool MemcpyStrSrc, 9689 MachineFunction &MF) const { 9690 if (Subtarget.isPPC64()) { 9691 return MVT::i64; 9692 } else { 9693 return MVT::i32; 9694 } 9695 } 9696 9697 /// \brief Returns true if it is beneficial to convert a load of a constant 9698 /// to just the constant itself. 9699 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 9700 Type *Ty) const { 9701 assert(Ty->isIntegerTy()); 9702 9703 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 9704 if (BitSize == 0 || BitSize > 64) 9705 return false; 9706 return true; 9707 } 9708 9709 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 9710 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 9711 return false; 9712 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 9713 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 9714 return NumBits1 == 64 && NumBits2 == 32; 9715 } 9716 9717 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 9718 if (!VT1.isInteger() || !VT2.isInteger()) 9719 return false; 9720 unsigned NumBits1 = VT1.getSizeInBits(); 9721 unsigned NumBits2 = VT2.getSizeInBits(); 9722 return NumBits1 == 64 && NumBits2 == 32; 9723 } 9724 9725 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 9726 return isInt<16>(Imm) || isUInt<16>(Imm); 9727 } 9728 9729 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 9730 return isInt<16>(Imm) || isUInt<16>(Imm); 9731 } 9732 9733 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 9734 unsigned, 9735 unsigned, 9736 bool *Fast) const { 9737 if (DisablePPCUnaligned) 9738 return false; 9739 9740 // PowerPC supports unaligned memory access for simple non-vector types. 9741 // Although accessing unaligned addresses is not as efficient as accessing 9742 // aligned addresses, it is generally more efficient than manual expansion, 9743 // and generally only traps for software emulation when crossing page 9744 // boundaries. 9745 9746 if (!VT.isSimple()) 9747 return false; 9748 9749 if (VT.getSimpleVT().isVector()) { 9750 if (Subtarget.hasVSX()) { 9751 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 9752 VT != MVT::v4f32 && VT != MVT::v4i32) 9753 return false; 9754 } else { 9755 return false; 9756 } 9757 } 9758 9759 if (VT == MVT::ppcf128) 9760 return false; 9761 9762 if (Fast) 9763 *Fast = true; 9764 9765 return true; 9766 } 9767 9768 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 9769 VT = VT.getScalarType(); 9770 9771 if (!VT.isSimple()) 9772 return false; 9773 9774 switch (VT.getSimpleVT().SimpleTy) { 9775 case MVT::f32: 9776 case MVT::f64: 9777 return true; 9778 default: 9779 break; 9780 } 9781 9782 return false; 9783 } 9784 9785 bool 9786 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 9787 EVT VT , unsigned DefinedValues) const { 9788 if (VT == MVT::v2i64) 9789 return false; 9790 9791 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 9792 } 9793 9794 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 9795 if (DisableILPPref || Subtarget.enableMachineScheduler()) 9796 return TargetLowering::getSchedulingPreference(N); 9797 9798 return Sched::ILP; 9799 } 9800 9801 // Create a fast isel object. 9802 FastISel * 9803 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 9804 const TargetLibraryInfo *LibInfo) const { 9805 return PPC::createFastISel(FuncInfo, LibInfo); 9806 } 9807