1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "PPCMachineFunctionInfo.h" 16 #include "PPCPerfectShuffle.h" 17 #include "PPCTargetMachine.h" 18 #include "MCTargetDesc/PPCPredicates.h" 19 #include "llvm/CallingConv.h" 20 #include "llvm/Constants.h" 21 #include "llvm/DerivedTypes.h" 22 #include "llvm/Function.h" 23 #include "llvm/Intrinsics.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/CodeGen/CallingConvLower.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/ErrorHandling.h" 34 #include "llvm/Support/MathExtras.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Target/TargetOptions.h" 37 using namespace llvm; 38 39 static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 40 CCValAssign::LocInfo &LocInfo, 41 ISD::ArgFlagsTy &ArgFlags, 42 CCState &State); 43 static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 44 MVT &LocVT, 45 CCValAssign::LocInfo &LocInfo, 46 ISD::ArgFlagsTy &ArgFlags, 47 CCState &State); 48 static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 49 MVT &LocVT, 50 CCValAssign::LocInfo &LocInfo, 51 ISD::ArgFlagsTy &ArgFlags, 52 CCState &State); 53 54 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 55 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 56 57 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 58 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 59 60 static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) { 61 if (TM.getSubtargetImpl()->isDarwin()) 62 return new TargetLoweringObjectFileMachO(); 63 64 return new TargetLoweringObjectFileELF(); 65 } 66 67 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 68 : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) { 69 const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>(); 70 71 setPow2DivIsCheap(); 72 73 // Use _setjmp/_longjmp instead of setjmp/longjmp. 74 setUseUnderscoreSetJmp(true); 75 setUseUnderscoreLongJmp(true); 76 77 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 78 // arguments are at least 4/8 bytes aligned. 79 bool isPPC64 = Subtarget->isPPC64(); 80 setMinStackArgumentAlignment(isPPC64 ? 8:4); 81 82 // Set up the register classes. 83 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 84 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 85 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 86 87 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 88 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 89 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 90 91 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 92 93 // PowerPC has pre-inc load and store's. 94 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 95 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 96 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 97 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 98 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 99 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 100 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 101 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 102 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 103 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 104 105 // This is used in the ppcf128->int sequence. Note it has different semantics 106 // from FP_ROUND: that rounds to nearest, this rounds to zero. 107 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 108 109 // We do not currently implement these libm ops for PowerPC. 110 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 111 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 112 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 113 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 114 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 115 116 // PowerPC has no SREM/UREM instructions 117 setOperationAction(ISD::SREM, MVT::i32, Expand); 118 setOperationAction(ISD::UREM, MVT::i32, Expand); 119 setOperationAction(ISD::SREM, MVT::i64, Expand); 120 setOperationAction(ISD::UREM, MVT::i64, Expand); 121 122 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 123 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 124 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 125 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 126 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 127 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 128 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 129 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 130 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 131 132 // We don't support sin/cos/sqrt/fmod/pow 133 setOperationAction(ISD::FSIN , MVT::f64, Expand); 134 setOperationAction(ISD::FCOS , MVT::f64, Expand); 135 setOperationAction(ISD::FREM , MVT::f64, Expand); 136 setOperationAction(ISD::FPOW , MVT::f64, Expand); 137 setOperationAction(ISD::FMA , MVT::f64, Legal); 138 setOperationAction(ISD::FSIN , MVT::f32, Expand); 139 setOperationAction(ISD::FCOS , MVT::f32, Expand); 140 setOperationAction(ISD::FREM , MVT::f32, Expand); 141 setOperationAction(ISD::FPOW , MVT::f32, Expand); 142 setOperationAction(ISD::FMA , MVT::f32, Legal); 143 144 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 145 146 // If we're enabling GP optimizations, use hardware square root 147 if (!Subtarget->hasFSQRT()) { 148 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 149 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 150 } 151 152 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 153 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 154 155 // PowerPC does not have BSWAP, CTPOP or CTTZ 156 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 157 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 158 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 159 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 160 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 161 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 162 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 163 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 164 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 165 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 166 167 // PowerPC does not have ROTR 168 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 169 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 170 171 // PowerPC does not have Select 172 setOperationAction(ISD::SELECT, MVT::i32, Expand); 173 setOperationAction(ISD::SELECT, MVT::i64, Expand); 174 setOperationAction(ISD::SELECT, MVT::f32, Expand); 175 setOperationAction(ISD::SELECT, MVT::f64, Expand); 176 177 // PowerPC wants to turn select_cc of FP into fsel when possible. 178 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 179 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 180 181 // PowerPC wants to optimize integer setcc a bit 182 setOperationAction(ISD::SETCC, MVT::i32, Custom); 183 184 // PowerPC does not have BRCOND which requires SetCC 185 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 186 187 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 188 189 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 190 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 191 192 // PowerPC does not have [U|S]INT_TO_FP 193 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 194 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 195 196 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 197 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 198 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 199 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 200 201 // We cannot sextinreg(i1). Expand to shifts. 202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 203 204 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 205 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 206 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 207 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 208 209 210 // We want to legalize GlobalAddress and ConstantPool nodes into the 211 // appropriate instructions to materialize the address. 212 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 213 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 214 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 215 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 216 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 217 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 218 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 219 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 220 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 221 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 222 223 // TRAP is legal. 224 setOperationAction(ISD::TRAP, MVT::Other, Legal); 225 226 // TRAMPOLINE is custom lowered. 227 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 228 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 229 230 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 231 setOperationAction(ISD::VASTART , MVT::Other, Custom); 232 233 if (Subtarget->isSVR4ABI()) { 234 if (isPPC64) { 235 // VAARG always uses double-word chunks, so promote anything smaller. 236 setOperationAction(ISD::VAARG, MVT::i1, Promote); 237 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 238 setOperationAction(ISD::VAARG, MVT::i8, Promote); 239 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 240 setOperationAction(ISD::VAARG, MVT::i16, Promote); 241 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 242 setOperationAction(ISD::VAARG, MVT::i32, Promote); 243 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 244 setOperationAction(ISD::VAARG, MVT::Other, Expand); 245 } else { 246 // VAARG is custom lowered with the 32-bit SVR4 ABI. 247 setOperationAction(ISD::VAARG, MVT::Other, Custom); 248 setOperationAction(ISD::VAARG, MVT::i64, Custom); 249 } 250 } else 251 setOperationAction(ISD::VAARG, MVT::Other, Expand); 252 253 // Use the default implementation. 254 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 255 setOperationAction(ISD::VAEND , MVT::Other, Expand); 256 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 257 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 258 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 259 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 260 261 // We want to custom lower some of our intrinsics. 262 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 263 264 // Comparisons that require checking two conditions. 265 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 266 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 267 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 268 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 269 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 270 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 271 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 272 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 273 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 274 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 275 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 276 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 277 278 if (Subtarget->has64BitSupport()) { 279 // They also have instructions for converting between i64 and fp. 280 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 281 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 282 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 283 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 284 // This is just the low 32 bits of a (signed) fp->i64 conversion. 285 // We cannot do this with Promote because i64 is not a legal type. 286 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 287 288 // FIXME: disable this lowered code. This generates 64-bit register values, 289 // and we don't model the fact that the top part is clobbered by calls. We 290 // need to flag these together so that the value isn't live across a call. 291 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 292 } else { 293 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 294 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 295 } 296 297 if (Subtarget->use64BitRegs()) { 298 // 64-bit PowerPC implementations can support i64 types directly 299 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 300 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 301 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 302 // 64-bit PowerPC wants to expand i128 shifts itself. 303 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 304 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 305 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 306 } else { 307 // 32-bit PowerPC wants to expand i64 shifts itself. 308 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 309 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 310 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 311 } 312 313 if (Subtarget->hasAltivec()) { 314 // First set operation action for all vector types to expand. Then we 315 // will selectively turn on ones that can be effectively codegen'd. 316 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 317 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 318 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 319 320 // add/sub are legal for all supported vector VT's. 321 setOperationAction(ISD::ADD , VT, Legal); 322 setOperationAction(ISD::SUB , VT, Legal); 323 324 // We promote all shuffles to v16i8. 325 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 326 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 327 328 // We promote all non-typed operations to v4i32. 329 setOperationAction(ISD::AND , VT, Promote); 330 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 331 setOperationAction(ISD::OR , VT, Promote); 332 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 333 setOperationAction(ISD::XOR , VT, Promote); 334 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 335 setOperationAction(ISD::LOAD , VT, Promote); 336 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 337 setOperationAction(ISD::SELECT, VT, Promote); 338 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 339 setOperationAction(ISD::STORE, VT, Promote); 340 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 341 342 // No other operations are legal. 343 setOperationAction(ISD::MUL , VT, Expand); 344 setOperationAction(ISD::SDIV, VT, Expand); 345 setOperationAction(ISD::SREM, VT, Expand); 346 setOperationAction(ISD::UDIV, VT, Expand); 347 setOperationAction(ISD::UREM, VT, Expand); 348 setOperationAction(ISD::FDIV, VT, Expand); 349 setOperationAction(ISD::FNEG, VT, Expand); 350 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 351 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 352 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 353 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 354 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 355 setOperationAction(ISD::UDIVREM, VT, Expand); 356 setOperationAction(ISD::SDIVREM, VT, Expand); 357 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 358 setOperationAction(ISD::FPOW, VT, Expand); 359 setOperationAction(ISD::CTPOP, VT, Expand); 360 setOperationAction(ISD::CTLZ, VT, Expand); 361 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 362 setOperationAction(ISD::CTTZ, VT, Expand); 363 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 364 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 365 366 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 367 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) { 368 MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j; 369 setTruncStoreAction(VT, InnerVT, Expand); 370 } 371 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 372 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 373 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 374 } 375 376 for (unsigned i = (unsigned)MVT::FIRST_FP_VECTOR_VALUETYPE; 377 i <= (unsigned)MVT::LAST_FP_VECTOR_VALUETYPE; ++i) { 378 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 379 setOperationAction(ISD::FSQRT, VT, Expand); 380 } 381 382 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 383 // with merges, splats, etc. 384 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 385 386 setOperationAction(ISD::AND , MVT::v4i32, Legal); 387 setOperationAction(ISD::OR , MVT::v4i32, Legal); 388 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 389 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 390 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 391 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 392 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 393 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 394 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 395 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 396 397 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 398 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 399 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 400 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 401 402 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 403 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 404 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 405 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 406 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 407 408 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 409 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 410 411 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 412 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 413 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 414 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 415 416 // Altivec does not contain unordered floating-point compare instructions 417 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 418 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 419 setCondCodeAction(ISD::SETUGT, MVT::v4f32, Expand); 420 setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand); 421 setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand); 422 setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand); 423 } 424 425 if (Subtarget->has64BitSupport()) { 426 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 427 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 428 } 429 430 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); 431 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); 432 433 setBooleanContents(ZeroOrOneBooleanContent); 434 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 435 436 if (isPPC64) { 437 setStackPointerRegisterToSaveRestore(PPC::X1); 438 setExceptionPointerRegister(PPC::X3); 439 setExceptionSelectorRegister(PPC::X4); 440 } else { 441 setStackPointerRegisterToSaveRestore(PPC::R1); 442 setExceptionPointerRegister(PPC::R3); 443 setExceptionSelectorRegister(PPC::R4); 444 } 445 446 // We have target-specific dag combine patterns for the following nodes: 447 setTargetDAGCombine(ISD::SINT_TO_FP); 448 setTargetDAGCombine(ISD::STORE); 449 setTargetDAGCombine(ISD::BR_CC); 450 setTargetDAGCombine(ISD::BSWAP); 451 452 // Darwin long double math library functions have $LDBL128 appended. 453 if (Subtarget->isDarwin()) { 454 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 455 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 456 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 457 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 458 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 459 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 460 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 461 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 462 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 463 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 464 } 465 466 setMinFunctionAlignment(2); 467 if (PPCSubTarget.isDarwin()) 468 setPrefFunctionAlignment(4); 469 470 if (isPPC64 && Subtarget->isJITCodeModel()) 471 // Temporary workaround for the inability of PPC64 JIT to handle jump 472 // tables. 473 setSupportJumpTables(false); 474 475 setInsertFencesForAtomic(true); 476 477 setSchedulingPreference(Sched::Hybrid); 478 479 computeRegisterProperties(); 480 481 // The Freescale cores does better with aggressive inlining of memcpy and 482 // friends. Gcc uses same threshold of 128 bytes (= 32 word stores). 483 if (Subtarget->getDarwinDirective() == PPC::DIR_E500mc || 484 Subtarget->getDarwinDirective() == PPC::DIR_E5500) { 485 maxStoresPerMemset = 32; 486 maxStoresPerMemsetOptSize = 16; 487 maxStoresPerMemcpy = 32; 488 maxStoresPerMemcpyOptSize = 8; 489 maxStoresPerMemmove = 32; 490 maxStoresPerMemmoveOptSize = 8; 491 492 setPrefFunctionAlignment(4); 493 benefitFromCodePlacementOpt = true; 494 } 495 } 496 497 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 498 /// function arguments in the caller parameter area. 499 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { 500 const TargetMachine &TM = getTargetMachine(); 501 // Darwin passes everything on 4 byte boundary. 502 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 503 return 4; 504 505 // 16byte and wider vectors are passed on 16byte boundary. 506 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 507 if (VTy->getBitWidth() >= 128) 508 return 16; 509 510 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 511 if (PPCSubTarget.isPPC64()) 512 return 8; 513 514 return 4; 515 } 516 517 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 518 switch (Opcode) { 519 default: return 0; 520 case PPCISD::FSEL: return "PPCISD::FSEL"; 521 case PPCISD::FCFID: return "PPCISD::FCFID"; 522 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 523 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 524 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 525 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 526 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 527 case PPCISD::VPERM: return "PPCISD::VPERM"; 528 case PPCISD::Hi: return "PPCISD::Hi"; 529 case PPCISD::Lo: return "PPCISD::Lo"; 530 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 531 case PPCISD::TOC_RESTORE: return "PPCISD::TOC_RESTORE"; 532 case PPCISD::LOAD: return "PPCISD::LOAD"; 533 case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; 534 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 535 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 536 case PPCISD::SRL: return "PPCISD::SRL"; 537 case PPCISD::SRA: return "PPCISD::SRA"; 538 case PPCISD::SHL: return "PPCISD::SHL"; 539 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 540 case PPCISD::STD_32: return "PPCISD::STD_32"; 541 case PPCISD::CALL_SVR4: return "PPCISD::CALL_SVR4"; 542 case PPCISD::CALL_NOP_SVR4: return "PPCISD::CALL_NOP_SVR4"; 543 case PPCISD::CALL_Darwin: return "PPCISD::CALL_Darwin"; 544 case PPCISD::NOP: return "PPCISD::NOP"; 545 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 546 case PPCISD::BCTRL_Darwin: return "PPCISD::BCTRL_Darwin"; 547 case PPCISD::BCTRL_SVR4: return "PPCISD::BCTRL_SVR4"; 548 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 549 case PPCISD::MFCR: return "PPCISD::MFCR"; 550 case PPCISD::VCMP: return "PPCISD::VCMP"; 551 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 552 case PPCISD::LBRX: return "PPCISD::LBRX"; 553 case PPCISD::STBRX: return "PPCISD::STBRX"; 554 case PPCISD::LARX: return "PPCISD::LARX"; 555 case PPCISD::STCX: return "PPCISD::STCX"; 556 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 557 case PPCISD::MFFS: return "PPCISD::MFFS"; 558 case PPCISD::MTFSB0: return "PPCISD::MTFSB0"; 559 case PPCISD::MTFSB1: return "PPCISD::MTFSB1"; 560 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 561 case PPCISD::MTFSF: return "PPCISD::MTFSF"; 562 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 563 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 564 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 565 } 566 } 567 568 EVT PPCTargetLowering::getSetCCResultType(EVT VT) const { 569 if (!VT.isVector()) 570 return MVT::i32; 571 return VT.changeVectorElementTypeToInteger(); 572 } 573 574 //===----------------------------------------------------------------------===// 575 // Node matching predicates, for use by the tblgen matching code. 576 //===----------------------------------------------------------------------===// 577 578 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 579 static bool isFloatingPointZero(SDValue Op) { 580 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 581 return CFP->getValueAPF().isZero(); 582 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 583 // Maybe this has already been legalized into the constant pool? 584 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 585 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 586 return CFP->getValueAPF().isZero(); 587 } 588 return false; 589 } 590 591 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 592 /// true if Op is undef or if it matches the specified value. 593 static bool isConstantOrUndef(int Op, int Val) { 594 return Op < 0 || Op == Val; 595 } 596 597 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 598 /// VPKUHUM instruction. 599 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 600 if (!isUnary) { 601 for (unsigned i = 0; i != 16; ++i) 602 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 603 return false; 604 } else { 605 for (unsigned i = 0; i != 8; ++i) 606 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) || 607 !isConstantOrUndef(N->getMaskElt(i+8), i*2+1)) 608 return false; 609 } 610 return true; 611 } 612 613 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 614 /// VPKUWUM instruction. 615 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 616 if (!isUnary) { 617 for (unsigned i = 0; i != 16; i += 2) 618 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 619 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 620 return false; 621 } else { 622 for (unsigned i = 0; i != 8; i += 2) 623 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 624 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) || 625 !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) || 626 !isConstantOrUndef(N->getMaskElt(i+9), i*2+3)) 627 return false; 628 } 629 return true; 630 } 631 632 /// isVMerge - Common function, used to match vmrg* shuffles. 633 /// 634 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 635 unsigned LHSStart, unsigned RHSStart) { 636 assert(N->getValueType(0) == MVT::v16i8 && 637 "PPC only supports shuffles by bytes!"); 638 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 639 "Unsupported merge size!"); 640 641 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 642 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 643 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 644 LHSStart+j+i*UnitSize) || 645 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 646 RHSStart+j+i*UnitSize)) 647 return false; 648 } 649 return true; 650 } 651 652 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 653 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 654 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 655 bool isUnary) { 656 if (!isUnary) 657 return isVMerge(N, UnitSize, 8, 24); 658 return isVMerge(N, UnitSize, 8, 8); 659 } 660 661 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 662 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 663 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 664 bool isUnary) { 665 if (!isUnary) 666 return isVMerge(N, UnitSize, 0, 16); 667 return isVMerge(N, UnitSize, 0, 0); 668 } 669 670 671 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 672 /// amount, otherwise return -1. 673 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 674 assert(N->getValueType(0) == MVT::v16i8 && 675 "PPC only supports shuffles by bytes!"); 676 677 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 678 679 // Find the first non-undef value in the shuffle mask. 680 unsigned i; 681 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 682 /*search*/; 683 684 if (i == 16) return -1; // all undef. 685 686 // Otherwise, check to see if the rest of the elements are consecutively 687 // numbered from this value. 688 unsigned ShiftAmt = SVOp->getMaskElt(i); 689 if (ShiftAmt < i) return -1; 690 ShiftAmt -= i; 691 692 if (!isUnary) { 693 // Check the rest of the elements to see if they are consecutive. 694 for (++i; i != 16; ++i) 695 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 696 return -1; 697 } else { 698 // Check the rest of the elements to see if they are consecutive. 699 for (++i; i != 16; ++i) 700 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 701 return -1; 702 } 703 return ShiftAmt; 704 } 705 706 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 707 /// specifies a splat of a single element that is suitable for input to 708 /// VSPLTB/VSPLTH/VSPLTW. 709 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 710 assert(N->getValueType(0) == MVT::v16i8 && 711 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 712 713 // This is a splat operation if each element of the permute is the same, and 714 // if the value doesn't reference the second vector. 715 unsigned ElementBase = N->getMaskElt(0); 716 717 // FIXME: Handle UNDEF elements too! 718 if (ElementBase >= 16) 719 return false; 720 721 // Check that the indices are consecutive, in the case of a multi-byte element 722 // splatted with a v16i8 mask. 723 for (unsigned i = 1; i != EltSize; ++i) 724 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 725 return false; 726 727 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 728 if (N->getMaskElt(i) < 0) continue; 729 for (unsigned j = 0; j != EltSize; ++j) 730 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 731 return false; 732 } 733 return true; 734 } 735 736 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 737 /// are -0.0. 738 bool PPC::isAllNegativeZeroVector(SDNode *N) { 739 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 740 741 APInt APVal, APUndef; 742 unsigned BitSize; 743 bool HasAnyUndefs; 744 745 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) 746 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 747 return CFP->getValueAPF().isNegZero(); 748 749 return false; 750 } 751 752 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 753 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 754 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 755 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 756 assert(isSplatShuffleMask(SVOp, EltSize)); 757 return SVOp->getMaskElt(0) / EltSize; 758 } 759 760 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 761 /// by using a vspltis[bhw] instruction of the specified element size, return 762 /// the constant being splatted. The ByteSize field indicates the number of 763 /// bytes of each element [124] -> [bhw]. 764 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 765 SDValue OpVal(0, 0); 766 767 // If ByteSize of the splat is bigger than the element size of the 768 // build_vector, then we have a case where we are checking for a splat where 769 // multiple elements of the buildvector are folded together into a single 770 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 771 unsigned EltSize = 16/N->getNumOperands(); 772 if (EltSize < ByteSize) { 773 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 774 SDValue UniquedVals[4]; 775 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 776 777 // See if all of the elements in the buildvector agree across. 778 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 779 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 780 // If the element isn't a constant, bail fully out. 781 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 782 783 784 if (UniquedVals[i&(Multiple-1)].getNode() == 0) 785 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 786 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 787 return SDValue(); // no match. 788 } 789 790 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 791 // either constant or undef values that are identical for each chunk. See 792 // if these chunks can form into a larger vspltis*. 793 794 // Check to see if all of the leading entries are either 0 or -1. If 795 // neither, then this won't fit into the immediate field. 796 bool LeadingZero = true; 797 bool LeadingOnes = true; 798 for (unsigned i = 0; i != Multiple-1; ++i) { 799 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. 800 801 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 802 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 803 } 804 // Finally, check the least significant entry. 805 if (LeadingZero) { 806 if (UniquedVals[Multiple-1].getNode() == 0) 807 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 808 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 809 if (Val < 16) 810 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 811 } 812 if (LeadingOnes) { 813 if (UniquedVals[Multiple-1].getNode() == 0) 814 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 815 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 816 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 817 return DAG.getTargetConstant(Val, MVT::i32); 818 } 819 820 return SDValue(); 821 } 822 823 // Check to see if this buildvec has a single non-undef value in its elements. 824 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 825 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 826 if (OpVal.getNode() == 0) 827 OpVal = N->getOperand(i); 828 else if (OpVal != N->getOperand(i)) 829 return SDValue(); 830 } 831 832 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. 833 834 unsigned ValSizeInBytes = EltSize; 835 uint64_t Value = 0; 836 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 837 Value = CN->getZExtValue(); 838 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 839 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 840 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 841 } 842 843 // If the splat value is larger than the element value, then we can never do 844 // this splat. The only case that we could fit the replicated bits into our 845 // immediate field for would be zero, and we prefer to use vxor for it. 846 if (ValSizeInBytes < ByteSize) return SDValue(); 847 848 // If the element value is larger than the splat value, cut it in half and 849 // check to see if the two halves are equal. Continue doing this until we 850 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 851 while (ValSizeInBytes > ByteSize) { 852 ValSizeInBytes >>= 1; 853 854 // If the top half equals the bottom half, we're still ok. 855 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 856 (Value & ((1 << (8*ValSizeInBytes))-1))) 857 return SDValue(); 858 } 859 860 // Properly sign extend the value. 861 int MaskVal = SignExtend32(Value, ByteSize * 8); 862 863 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 864 if (MaskVal == 0) return SDValue(); 865 866 // Finally, if this value fits in a 5 bit sext field, return it 867 if (SignExtend32<5>(MaskVal) == MaskVal) 868 return DAG.getTargetConstant(MaskVal, MVT::i32); 869 return SDValue(); 870 } 871 872 //===----------------------------------------------------------------------===// 873 // Addressing Mode Selection 874 //===----------------------------------------------------------------------===// 875 876 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 877 /// or 64-bit immediate, and if the value can be accurately represented as a 878 /// sign extension from a 16-bit value. If so, this returns true and the 879 /// immediate. 880 static bool isIntS16Immediate(SDNode *N, short &Imm) { 881 if (N->getOpcode() != ISD::Constant) 882 return false; 883 884 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 885 if (N->getValueType(0) == MVT::i32) 886 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 887 else 888 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 889 } 890 static bool isIntS16Immediate(SDValue Op, short &Imm) { 891 return isIntS16Immediate(Op.getNode(), Imm); 892 } 893 894 895 /// SelectAddressRegReg - Given the specified addressed, check to see if it 896 /// can be represented as an indexed [r+r] operation. Returns false if it 897 /// can be more efficiently represented with [r+imm]. 898 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 899 SDValue &Index, 900 SelectionDAG &DAG) const { 901 short imm = 0; 902 if (N.getOpcode() == ISD::ADD) { 903 if (isIntS16Immediate(N.getOperand(1), imm)) 904 return false; // r+i 905 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 906 return false; // r+i 907 908 Base = N.getOperand(0); 909 Index = N.getOperand(1); 910 return true; 911 } else if (N.getOpcode() == ISD::OR) { 912 if (isIntS16Immediate(N.getOperand(1), imm)) 913 return false; // r+i can fold it if we can. 914 915 // If this is an or of disjoint bitfields, we can codegen this as an add 916 // (for better address arithmetic) if the LHS and RHS of the OR are provably 917 // disjoint. 918 APInt LHSKnownZero, LHSKnownOne; 919 APInt RHSKnownZero, RHSKnownOne; 920 DAG.ComputeMaskedBits(N.getOperand(0), 921 LHSKnownZero, LHSKnownOne); 922 923 if (LHSKnownZero.getBoolValue()) { 924 DAG.ComputeMaskedBits(N.getOperand(1), 925 RHSKnownZero, RHSKnownOne); 926 // If all of the bits are known zero on the LHS or RHS, the add won't 927 // carry. 928 if (~(LHSKnownZero | RHSKnownZero) == 0) { 929 Base = N.getOperand(0); 930 Index = N.getOperand(1); 931 return true; 932 } 933 } 934 } 935 936 return false; 937 } 938 939 /// Returns true if the address N can be represented by a base register plus 940 /// a signed 16-bit displacement [r+imm], and if it is not better 941 /// represented as reg+reg. 942 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 943 SDValue &Base, 944 SelectionDAG &DAG) const { 945 // FIXME dl should come from parent load or store, not from address 946 DebugLoc dl = N.getDebugLoc(); 947 // If this can be more profitably realized as r+r, fail. 948 if (SelectAddressRegReg(N, Disp, Base, DAG)) 949 return false; 950 951 if (N.getOpcode() == ISD::ADD) { 952 short imm = 0; 953 if (isIntS16Immediate(N.getOperand(1), imm)) { 954 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 955 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 956 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 957 } else { 958 Base = N.getOperand(0); 959 } 960 return true; // [r+i] 961 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 962 // Match LOAD (ADD (X, Lo(G))). 963 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 964 && "Cannot handle constant offsets yet!"); 965 Disp = N.getOperand(1).getOperand(0); // The global address. 966 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 967 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 968 Disp.getOpcode() == ISD::TargetConstantPool || 969 Disp.getOpcode() == ISD::TargetJumpTable); 970 Base = N.getOperand(0); 971 return true; // [&g+r] 972 } 973 } else if (N.getOpcode() == ISD::OR) { 974 short imm = 0; 975 if (isIntS16Immediate(N.getOperand(1), imm)) { 976 // If this is an or of disjoint bitfields, we can codegen this as an add 977 // (for better address arithmetic) if the LHS and RHS of the OR are 978 // provably disjoint. 979 APInt LHSKnownZero, LHSKnownOne; 980 DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 981 982 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 983 // If all of the bits are known zero on the LHS or RHS, the add won't 984 // carry. 985 Base = N.getOperand(0); 986 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 987 return true; 988 } 989 } 990 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 991 // Loading from a constant address. 992 993 // If this address fits entirely in a 16-bit sext immediate field, codegen 994 // this as "d, 0" 995 short Imm; 996 if (isIntS16Immediate(CN, Imm)) { 997 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 998 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0, 999 CN->getValueType(0)); 1000 return true; 1001 } 1002 1003 // Handle 32-bit sext immediates with LIS + addr mode. 1004 if (CN->getValueType(0) == MVT::i32 || 1005 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) { 1006 int Addr = (int)CN->getZExtValue(); 1007 1008 // Otherwise, break this down into an LIS + disp. 1009 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 1010 1011 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 1012 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1013 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1014 return true; 1015 } 1016 } 1017 1018 Disp = DAG.getTargetConstant(0, getPointerTy()); 1019 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 1020 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1021 else 1022 Base = N; 1023 return true; // [r+0] 1024 } 1025 1026 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1027 /// represented as an indexed [r+r] operation. 1028 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1029 SDValue &Index, 1030 SelectionDAG &DAG) const { 1031 // Check to see if we can easily represent this as an [r+r] address. This 1032 // will fail if it thinks that the address is more profitably represented as 1033 // reg+imm, e.g. where imm = 0. 1034 if (SelectAddressRegReg(N, Base, Index, DAG)) 1035 return true; 1036 1037 // If the operand is an addition, always emit this as [r+r], since this is 1038 // better (for code size, and execution, as the memop does the add for free) 1039 // than emitting an explicit add. 1040 if (N.getOpcode() == ISD::ADD) { 1041 Base = N.getOperand(0); 1042 Index = N.getOperand(1); 1043 return true; 1044 } 1045 1046 // Otherwise, do it the hard way, using R0 as the base register. 1047 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0, 1048 N.getValueType()); 1049 Index = N; 1050 return true; 1051 } 1052 1053 /// SelectAddressRegImmShift - Returns true if the address N can be 1054 /// represented by a base register plus a signed 14-bit displacement 1055 /// [r+imm*4]. Suitable for use by STD and friends. 1056 bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp, 1057 SDValue &Base, 1058 SelectionDAG &DAG) const { 1059 // FIXME dl should come from the parent load or store, not the address 1060 DebugLoc dl = N.getDebugLoc(); 1061 // If this can be more profitably realized as r+r, fail. 1062 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1063 return false; 1064 1065 if (N.getOpcode() == ISD::ADD) { 1066 short imm = 0; 1067 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 1068 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 1069 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1070 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1071 } else { 1072 Base = N.getOperand(0); 1073 } 1074 return true; // [r+i] 1075 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1076 // Match LOAD (ADD (X, Lo(G))). 1077 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1078 && "Cannot handle constant offsets yet!"); 1079 Disp = N.getOperand(1).getOperand(0); // The global address. 1080 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1081 Disp.getOpcode() == ISD::TargetConstantPool || 1082 Disp.getOpcode() == ISD::TargetJumpTable); 1083 Base = N.getOperand(0); 1084 return true; // [&g+r] 1085 } 1086 } else if (N.getOpcode() == ISD::OR) { 1087 short imm = 0; 1088 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 1089 // If this is an or of disjoint bitfields, we can codegen this as an add 1090 // (for better address arithmetic) if the LHS and RHS of the OR are 1091 // provably disjoint. 1092 APInt LHSKnownZero, LHSKnownOne; 1093 DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1094 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1095 // If all of the bits are known zero on the LHS or RHS, the add won't 1096 // carry. 1097 Base = N.getOperand(0); 1098 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 1099 return true; 1100 } 1101 } 1102 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1103 // Loading from a constant address. Verify low two bits are clear. 1104 if ((CN->getZExtValue() & 3) == 0) { 1105 // If this address fits entirely in a 14-bit sext immediate field, codegen 1106 // this as "d, 0" 1107 short Imm; 1108 if (isIntS16Immediate(CN, Imm)) { 1109 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 1110 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0, 1111 CN->getValueType(0)); 1112 return true; 1113 } 1114 1115 // Fold the low-part of 32-bit absolute addresses into addr mode. 1116 if (CN->getValueType(0) == MVT::i32 || 1117 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) { 1118 int Addr = (int)CN->getZExtValue(); 1119 1120 // Otherwise, break this down into an LIS + disp. 1121 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 1122 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); 1123 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1124 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base),0); 1125 return true; 1126 } 1127 } 1128 } 1129 1130 Disp = DAG.getTargetConstant(0, getPointerTy()); 1131 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 1132 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1133 else 1134 Base = N; 1135 return true; // [r+0] 1136 } 1137 1138 1139 /// getPreIndexedAddressParts - returns true by value, base pointer and 1140 /// offset pointer and addressing mode by reference if the node's address 1141 /// can be legally represented as pre-indexed load / store address. 1142 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1143 SDValue &Offset, 1144 ISD::MemIndexedMode &AM, 1145 SelectionDAG &DAG) const { 1146 if (DisablePPCPreinc) return false; 1147 1148 SDValue Ptr; 1149 EVT VT; 1150 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1151 Ptr = LD->getBasePtr(); 1152 VT = LD->getMemoryVT(); 1153 1154 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1155 Ptr = ST->getBasePtr(); 1156 VT = ST->getMemoryVT(); 1157 } else 1158 return false; 1159 1160 // PowerPC doesn't have preinc load/store instructions for vectors. 1161 if (VT.isVector()) 1162 return false; 1163 1164 if (SelectAddressRegReg(Ptr, Offset, Base, DAG)) { 1165 AM = ISD::PRE_INC; 1166 return true; 1167 } 1168 1169 // LDU/STU use reg+imm*4, others use reg+imm. 1170 if (VT != MVT::i64) { 1171 // reg + imm 1172 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 1173 return false; 1174 } else { 1175 // reg + imm * 4. 1176 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 1177 return false; 1178 } 1179 1180 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1181 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1182 // sext i32 to i64 when addr mode is r+i. 1183 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1184 LD->getExtensionType() == ISD::SEXTLOAD && 1185 isa<ConstantSDNode>(Offset)) 1186 return false; 1187 } 1188 1189 AM = ISD::PRE_INC; 1190 return true; 1191 } 1192 1193 //===----------------------------------------------------------------------===// 1194 // LowerOperation implementation 1195 //===----------------------------------------------------------------------===// 1196 1197 /// GetLabelAccessInfo - Return true if we should reference labels using a 1198 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1199 static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, 1200 unsigned &LoOpFlags, const GlobalValue *GV = 0) { 1201 HiOpFlags = PPCII::MO_HA16; 1202 LoOpFlags = PPCII::MO_LO16; 1203 1204 // Don't use the pic base if not in PIC relocation model. Or if we are on a 1205 // non-darwin platform. We don't support PIC on other platforms yet. 1206 bool isPIC = TM.getRelocationModel() == Reloc::PIC_ && 1207 TM.getSubtarget<PPCSubtarget>().isDarwin(); 1208 if (isPIC) { 1209 HiOpFlags |= PPCII::MO_PIC_FLAG; 1210 LoOpFlags |= PPCII::MO_PIC_FLAG; 1211 } 1212 1213 // If this is a reference to a global value that requires a non-lazy-ptr, make 1214 // sure that instruction lowering adds it. 1215 if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) { 1216 HiOpFlags |= PPCII::MO_NLP_FLAG; 1217 LoOpFlags |= PPCII::MO_NLP_FLAG; 1218 1219 if (GV->hasHiddenVisibility()) { 1220 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1221 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1222 } 1223 } 1224 1225 return isPIC; 1226 } 1227 1228 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1229 SelectionDAG &DAG) { 1230 EVT PtrVT = HiPart.getValueType(); 1231 SDValue Zero = DAG.getConstant(0, PtrVT); 1232 DebugLoc DL = HiPart.getDebugLoc(); 1233 1234 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1235 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1236 1237 // With PIC, the first instruction is actually "GR+hi(&G)". 1238 if (isPIC) 1239 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1240 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1241 1242 // Generate non-pic code that has direct accesses to the constant pool. 1243 // The address of the global is just (hi(&g)+lo(&g)). 1244 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1245 } 1246 1247 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1248 SelectionDAG &DAG) const { 1249 EVT PtrVT = Op.getValueType(); 1250 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1251 const Constant *C = CP->getConstVal(); 1252 1253 // 64-bit SVR4 ABI code is always position-independent. 1254 // The actual address of the GlobalValue is stored in the TOC. 1255 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1256 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 1257 return DAG.getNode(PPCISD::TOC_ENTRY, CP->getDebugLoc(), MVT::i64, GA, 1258 DAG.getRegister(PPC::X2, MVT::i64)); 1259 } 1260 1261 unsigned MOHiFlag, MOLoFlag; 1262 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1263 SDValue CPIHi = 1264 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 1265 SDValue CPILo = 1266 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 1267 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 1268 } 1269 1270 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1271 EVT PtrVT = Op.getValueType(); 1272 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1273 1274 // 64-bit SVR4 ABI code is always position-independent. 1275 // The actual address of the GlobalValue is stored in the TOC. 1276 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1277 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1278 return DAG.getNode(PPCISD::TOC_ENTRY, JT->getDebugLoc(), MVT::i64, GA, 1279 DAG.getRegister(PPC::X2, MVT::i64)); 1280 } 1281 1282 unsigned MOHiFlag, MOLoFlag; 1283 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1284 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 1285 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 1286 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 1287 } 1288 1289 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1290 SelectionDAG &DAG) const { 1291 EVT PtrVT = Op.getValueType(); 1292 1293 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1294 1295 unsigned MOHiFlag, MOLoFlag; 1296 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1297 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 1298 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 1299 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 1300 } 1301 1302 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1303 SelectionDAG &DAG) const { 1304 1305 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1306 DebugLoc dl = GA->getDebugLoc(); 1307 const GlobalValue *GV = GA->getGlobal(); 1308 EVT PtrVT = getPointerTy(); 1309 bool is64bit = PPCSubTarget.isPPC64(); 1310 1311 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 1312 1313 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1314 PPCII::MO_TPREL16_HA); 1315 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1316 PPCII::MO_TPREL16_LO); 1317 1318 if (model != TLSModel::LocalExec) 1319 llvm_unreachable("only local-exec TLS mode supported"); 1320 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 1321 is64bit ? MVT::i64 : MVT::i32); 1322 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 1323 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 1324 } 1325 1326 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1327 SelectionDAG &DAG) const { 1328 EVT PtrVT = Op.getValueType(); 1329 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1330 DebugLoc DL = GSDN->getDebugLoc(); 1331 const GlobalValue *GV = GSDN->getGlobal(); 1332 1333 // 64-bit SVR4 ABI code is always position-independent. 1334 // The actual address of the GlobalValue is stored in the TOC. 1335 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1336 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 1337 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA, 1338 DAG.getRegister(PPC::X2, MVT::i64)); 1339 } 1340 1341 unsigned MOHiFlag, MOLoFlag; 1342 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV); 1343 1344 SDValue GAHi = 1345 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 1346 SDValue GALo = 1347 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 1348 1349 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 1350 1351 // If the global reference is actually to a non-lazy-pointer, we have to do an 1352 // extra load to get the address of the global. 1353 if (MOHiFlag & PPCII::MO_NLP_FLAG) 1354 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 1355 false, false, false, 0); 1356 return Ptr; 1357 } 1358 1359 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1360 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1361 DebugLoc dl = Op.getDebugLoc(); 1362 1363 // If we're comparing for equality to zero, expose the fact that this is 1364 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1365 // fold the new nodes. 1366 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1367 if (C->isNullValue() && CC == ISD::SETEQ) { 1368 EVT VT = Op.getOperand(0).getValueType(); 1369 SDValue Zext = Op.getOperand(0); 1370 if (VT.bitsLT(MVT::i32)) { 1371 VT = MVT::i32; 1372 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1373 } 1374 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1375 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1376 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1377 DAG.getConstant(Log2b, MVT::i32)); 1378 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1379 } 1380 // Leave comparisons against 0 and -1 alone for now, since they're usually 1381 // optimized. FIXME: revisit this when we can custom lower all setcc 1382 // optimizations. 1383 if (C->isAllOnesValue() || C->isNullValue()) 1384 return SDValue(); 1385 } 1386 1387 // If we have an integer seteq/setne, turn it into a compare against zero 1388 // by xor'ing the rhs with the lhs, which is faster than setting a 1389 // condition register, reading it back out, and masking the correct bit. The 1390 // normal approach here uses sub to do this instead of xor. Using xor exposes 1391 // the result to other bit-twiddling opportunities. 1392 EVT LHSVT = Op.getOperand(0).getValueType(); 1393 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1394 EVT VT = Op.getValueType(); 1395 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1396 Op.getOperand(1)); 1397 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1398 } 1399 return SDValue(); 1400 } 1401 1402 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1403 const PPCSubtarget &Subtarget) const { 1404 SDNode *Node = Op.getNode(); 1405 EVT VT = Node->getValueType(0); 1406 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1407 SDValue InChain = Node->getOperand(0); 1408 SDValue VAListPtr = Node->getOperand(1); 1409 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1410 DebugLoc dl = Node->getDebugLoc(); 1411 1412 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 1413 1414 // gpr_index 1415 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1416 VAListPtr, MachinePointerInfo(SV), MVT::i8, 1417 false, false, 0); 1418 InChain = GprIndex.getValue(1); 1419 1420 if (VT == MVT::i64) { 1421 // Check if GprIndex is even 1422 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 1423 DAG.getConstant(1, MVT::i32)); 1424 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 1425 DAG.getConstant(0, MVT::i32), ISD::SETNE); 1426 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 1427 DAG.getConstant(1, MVT::i32)); 1428 // Align GprIndex to be even if it isn't 1429 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 1430 GprIndex); 1431 } 1432 1433 // fpr index is 1 byte after gpr 1434 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1435 DAG.getConstant(1, MVT::i32)); 1436 1437 // fpr 1438 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1439 FprPtr, MachinePointerInfo(SV), MVT::i8, 1440 false, false, 0); 1441 InChain = FprIndex.getValue(1); 1442 1443 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1444 DAG.getConstant(8, MVT::i32)); 1445 1446 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1447 DAG.getConstant(4, MVT::i32)); 1448 1449 // areas 1450 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 1451 MachinePointerInfo(), false, false, 1452 false, 0); 1453 InChain = OverflowArea.getValue(1); 1454 1455 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 1456 MachinePointerInfo(), false, false, 1457 false, 0); 1458 InChain = RegSaveArea.getValue(1); 1459 1460 // select overflow_area if index > 8 1461 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 1462 DAG.getConstant(8, MVT::i32), ISD::SETLT); 1463 1464 // adjustment constant gpr_index * 4/8 1465 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 1466 VT.isInteger() ? GprIndex : FprIndex, 1467 DAG.getConstant(VT.isInteger() ? 4 : 8, 1468 MVT::i32)); 1469 1470 // OurReg = RegSaveArea + RegConstant 1471 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 1472 RegConstant); 1473 1474 // Floating types are 32 bytes into RegSaveArea 1475 if (VT.isFloatingPoint()) 1476 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 1477 DAG.getConstant(32, MVT::i32)); 1478 1479 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 1480 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 1481 VT.isInteger() ? GprIndex : FprIndex, 1482 DAG.getConstant(VT == MVT::i64 ? 2 : 1, 1483 MVT::i32)); 1484 1485 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 1486 VT.isInteger() ? VAListPtr : FprPtr, 1487 MachinePointerInfo(SV), 1488 MVT::i8, false, false, 0); 1489 1490 // determine if we should load from reg_save_area or overflow_area 1491 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 1492 1493 // increase overflow_area by 4/8 if gpr/fpr > 8 1494 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 1495 DAG.getConstant(VT.isInteger() ? 4 : 8, 1496 MVT::i32)); 1497 1498 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 1499 OverflowAreaPlusN); 1500 1501 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 1502 OverflowAreaPtr, 1503 MachinePointerInfo(), 1504 MVT::i32, false, false, 0); 1505 1506 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 1507 false, false, false, 0); 1508 } 1509 1510 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 1511 SelectionDAG &DAG) const { 1512 return Op.getOperand(0); 1513 } 1514 1515 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 1516 SelectionDAG &DAG) const { 1517 SDValue Chain = Op.getOperand(0); 1518 SDValue Trmp = Op.getOperand(1); // trampoline 1519 SDValue FPtr = Op.getOperand(2); // nested function 1520 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 1521 DebugLoc dl = Op.getDebugLoc(); 1522 1523 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1524 bool isPPC64 = (PtrVT == MVT::i64); 1525 Type *IntPtrTy = 1526 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType( 1527 *DAG.getContext()); 1528 1529 TargetLowering::ArgListTy Args; 1530 TargetLowering::ArgListEntry Entry; 1531 1532 Entry.Ty = IntPtrTy; 1533 Entry.Node = Trmp; Args.push_back(Entry); 1534 1535 // TrampSize == (isPPC64 ? 48 : 40); 1536 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 1537 isPPC64 ? MVT::i64 : MVT::i32); 1538 Args.push_back(Entry); 1539 1540 Entry.Node = FPtr; Args.push_back(Entry); 1541 Entry.Node = Nest; Args.push_back(Entry); 1542 1543 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 1544 TargetLowering::CallLoweringInfo CLI(Chain, 1545 Type::getVoidTy(*DAG.getContext()), 1546 false, false, false, false, 0, 1547 CallingConv::C, 1548 /*isTailCall=*/false, 1549 /*doesNotRet=*/false, 1550 /*isReturnValueUsed=*/true, 1551 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 1552 Args, DAG, dl); 1553 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 1554 1555 return CallResult.second; 1556 } 1557 1558 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1559 const PPCSubtarget &Subtarget) const { 1560 MachineFunction &MF = DAG.getMachineFunction(); 1561 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1562 1563 DebugLoc dl = Op.getDebugLoc(); 1564 1565 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 1566 // vastart just stores the address of the VarArgsFrameIndex slot into the 1567 // memory location argument. 1568 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1569 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1570 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1571 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 1572 MachinePointerInfo(SV), 1573 false, false, 0); 1574 } 1575 1576 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 1577 // We suppose the given va_list is already allocated. 1578 // 1579 // typedef struct { 1580 // char gpr; /* index into the array of 8 GPRs 1581 // * stored in the register save area 1582 // * gpr=0 corresponds to r3, 1583 // * gpr=1 to r4, etc. 1584 // */ 1585 // char fpr; /* index into the array of 8 FPRs 1586 // * stored in the register save area 1587 // * fpr=0 corresponds to f1, 1588 // * fpr=1 to f2, etc. 1589 // */ 1590 // char *overflow_arg_area; 1591 // /* location on stack that holds 1592 // * the next overflow argument 1593 // */ 1594 // char *reg_save_area; 1595 // /* where r3:r10 and f1:f8 (if saved) 1596 // * are stored 1597 // */ 1598 // } va_list[1]; 1599 1600 1601 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32); 1602 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32); 1603 1604 1605 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1606 1607 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 1608 PtrVT); 1609 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1610 PtrVT); 1611 1612 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 1613 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1614 1615 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 1616 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1617 1618 uint64_t FPROffset = 1; 1619 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1620 1621 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1622 1623 // Store first byte : number of int regs 1624 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 1625 Op.getOperand(1), 1626 MachinePointerInfo(SV), 1627 MVT::i8, false, false, 0); 1628 uint64_t nextOffset = FPROffset; 1629 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 1630 ConstFPROffset); 1631 1632 // Store second byte : number of float regs 1633 SDValue secondStore = 1634 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 1635 MachinePointerInfo(SV, nextOffset), MVT::i8, 1636 false, false, 0); 1637 nextOffset += StackOffset; 1638 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 1639 1640 // Store second word : arguments given on stack 1641 SDValue thirdStore = 1642 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 1643 MachinePointerInfo(SV, nextOffset), 1644 false, false, 0); 1645 nextOffset += FrameOffset; 1646 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 1647 1648 // Store third word : arguments given in registers 1649 return DAG.getStore(thirdStore, dl, FR, nextPtr, 1650 MachinePointerInfo(SV, nextOffset), 1651 false, false, 0); 1652 1653 } 1654 1655 #include "PPCGenCallingConv.inc" 1656 1657 static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 1658 CCValAssign::LocInfo &LocInfo, 1659 ISD::ArgFlagsTy &ArgFlags, 1660 CCState &State) { 1661 return true; 1662 } 1663 1664 static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 1665 MVT &LocVT, 1666 CCValAssign::LocInfo &LocInfo, 1667 ISD::ArgFlagsTy &ArgFlags, 1668 CCState &State) { 1669 static const uint16_t ArgRegs[] = { 1670 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1671 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1672 }; 1673 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1674 1675 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1676 1677 // Skip one register if the first unallocated register has an even register 1678 // number and there are still argument registers available which have not been 1679 // allocated yet. RegNum is actually an index into ArgRegs, which means we 1680 // need to skip a register if RegNum is odd. 1681 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 1682 State.AllocateReg(ArgRegs[RegNum]); 1683 } 1684 1685 // Always return false here, as this function only makes sure that the first 1686 // unallocated register has an odd register number and does not actually 1687 // allocate a register for the current argument. 1688 return false; 1689 } 1690 1691 static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 1692 MVT &LocVT, 1693 CCValAssign::LocInfo &LocInfo, 1694 ISD::ArgFlagsTy &ArgFlags, 1695 CCState &State) { 1696 static const uint16_t ArgRegs[] = { 1697 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1698 PPC::F8 1699 }; 1700 1701 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1702 1703 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1704 1705 // If there is only one Floating-point register left we need to put both f64 1706 // values of a split ppc_fp128 value on the stack. 1707 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 1708 State.AllocateReg(ArgRegs[RegNum]); 1709 } 1710 1711 // Always return false here, as this function only makes sure that the two f64 1712 // values a ppc_fp128 value is split into are both passed in registers or both 1713 // passed on the stack and does not actually allocate a register for the 1714 // current argument. 1715 return false; 1716 } 1717 1718 /// GetFPR - Get the set of FP registers that should be allocated for arguments, 1719 /// on Darwin. 1720 static const uint16_t *GetFPR() { 1721 static const uint16_t FPR[] = { 1722 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1723 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1724 }; 1725 1726 return FPR; 1727 } 1728 1729 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 1730 /// the stack. 1731 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 1732 unsigned PtrByteSize) { 1733 unsigned ArgSize = ArgVT.getSizeInBits()/8; 1734 if (Flags.isByVal()) 1735 ArgSize = Flags.getByValSize(); 1736 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1737 1738 return ArgSize; 1739 } 1740 1741 SDValue 1742 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 1743 CallingConv::ID CallConv, bool isVarArg, 1744 const SmallVectorImpl<ISD::InputArg> 1745 &Ins, 1746 DebugLoc dl, SelectionDAG &DAG, 1747 SmallVectorImpl<SDValue> &InVals) 1748 const { 1749 if (PPCSubTarget.isSVR4ABI()) { 1750 if (PPCSubTarget.isPPC64()) 1751 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 1752 dl, DAG, InVals); 1753 else 1754 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 1755 dl, DAG, InVals); 1756 } else { 1757 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 1758 dl, DAG, InVals); 1759 } 1760 } 1761 1762 SDValue 1763 PPCTargetLowering::LowerFormalArguments_32SVR4( 1764 SDValue Chain, 1765 CallingConv::ID CallConv, bool isVarArg, 1766 const SmallVectorImpl<ISD::InputArg> 1767 &Ins, 1768 DebugLoc dl, SelectionDAG &DAG, 1769 SmallVectorImpl<SDValue> &InVals) const { 1770 1771 // 32-bit SVR4 ABI Stack Frame Layout: 1772 // +-----------------------------------+ 1773 // +--> | Back chain | 1774 // | +-----------------------------------+ 1775 // | | Floating-point register save area | 1776 // | +-----------------------------------+ 1777 // | | General register save area | 1778 // | +-----------------------------------+ 1779 // | | CR save word | 1780 // | +-----------------------------------+ 1781 // | | VRSAVE save word | 1782 // | +-----------------------------------+ 1783 // | | Alignment padding | 1784 // | +-----------------------------------+ 1785 // | | Vector register save area | 1786 // | +-----------------------------------+ 1787 // | | Local variable space | 1788 // | +-----------------------------------+ 1789 // | | Parameter list area | 1790 // | +-----------------------------------+ 1791 // | | LR save word | 1792 // | +-----------------------------------+ 1793 // SP--> +--- | Back chain | 1794 // +-----------------------------------+ 1795 // 1796 // Specifications: 1797 // System V Application Binary Interface PowerPC Processor Supplement 1798 // AltiVec Technology Programming Interface Manual 1799 1800 MachineFunction &MF = DAG.getMachineFunction(); 1801 MachineFrameInfo *MFI = MF.getFrameInfo(); 1802 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1803 1804 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1805 // Potential tail calls could cause overwriting of argument stack slots. 1806 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 1807 (CallConv == CallingConv::Fast)); 1808 unsigned PtrByteSize = 4; 1809 1810 // Assign locations to all of the incoming arguments. 1811 SmallVector<CCValAssign, 16> ArgLocs; 1812 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1813 getTargetMachine(), ArgLocs, *DAG.getContext()); 1814 1815 // Reserve space for the linkage area on the stack. 1816 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 1817 1818 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4); 1819 1820 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1821 CCValAssign &VA = ArgLocs[i]; 1822 1823 // Arguments stored in registers. 1824 if (VA.isRegLoc()) { 1825 const TargetRegisterClass *RC; 1826 EVT ValVT = VA.getValVT(); 1827 1828 switch (ValVT.getSimpleVT().SimpleTy) { 1829 default: 1830 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 1831 case MVT::i32: 1832 RC = &PPC::GPRCRegClass; 1833 break; 1834 case MVT::f32: 1835 RC = &PPC::F4RCRegClass; 1836 break; 1837 case MVT::f64: 1838 RC = &PPC::F8RCRegClass; 1839 break; 1840 case MVT::v16i8: 1841 case MVT::v8i16: 1842 case MVT::v4i32: 1843 case MVT::v4f32: 1844 RC = &PPC::VRRCRegClass; 1845 break; 1846 } 1847 1848 // Transform the arguments stored in physical registers into virtual ones. 1849 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1850 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT); 1851 1852 InVals.push_back(ArgValue); 1853 } else { 1854 // Argument stored in memory. 1855 assert(VA.isMemLoc()); 1856 1857 unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; 1858 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 1859 isImmutable); 1860 1861 // Create load nodes to retrieve arguments from the stack. 1862 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1863 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 1864 MachinePointerInfo(), 1865 false, false, false, 0)); 1866 } 1867 } 1868 1869 // Assign locations to all of the incoming aggregate by value arguments. 1870 // Aggregates passed by value are stored in the local variable space of the 1871 // caller's stack frame, right above the parameter list area. 1872 SmallVector<CCValAssign, 16> ByValArgLocs; 1873 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1874 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 1875 1876 // Reserve stack space for the allocations in CCInfo. 1877 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 1878 1879 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4_ByVal); 1880 1881 // Area that is at least reserved in the caller of this function. 1882 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 1883 1884 // Set the size that is at least reserved in caller of this function. Tail 1885 // call optimized function's reserved stack space needs to be aligned so that 1886 // taking the difference between two stack areas will result in an aligned 1887 // stack. 1888 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1889 1890 MinReservedArea = 1891 std::max(MinReservedArea, 1892 PPCFrameLowering::getMinCallFrameSize(false, false)); 1893 1894 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> 1895 getStackAlignment(); 1896 unsigned AlignMask = TargetAlign-1; 1897 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 1898 1899 FI->setMinReservedArea(MinReservedArea); 1900 1901 SmallVector<SDValue, 8> MemOps; 1902 1903 // If the function takes variable number of arguments, make a frame index for 1904 // the start of the first vararg value... for expansion of llvm.va_start. 1905 if (isVarArg) { 1906 static const uint16_t GPArgRegs[] = { 1907 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1908 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1909 }; 1910 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 1911 1912 static const uint16_t FPArgRegs[] = { 1913 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1914 PPC::F8 1915 }; 1916 const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 1917 1918 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, 1919 NumGPArgRegs)); 1920 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, 1921 NumFPArgRegs)); 1922 1923 // Make room for NumGPArgRegs and NumFPArgRegs. 1924 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 1925 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8; 1926 1927 FuncInfo->setVarArgsStackOffset( 1928 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 1929 CCInfo.getNextStackOffset(), true)); 1930 1931 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 1932 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1933 1934 // The fixed integer arguments of a variadic function are stored to the 1935 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 1936 // the result of va_next. 1937 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 1938 // Get an existing live-in vreg, or add a new one. 1939 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 1940 if (!VReg) 1941 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 1942 1943 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 1944 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1945 MachinePointerInfo(), false, false, 0); 1946 MemOps.push_back(Store); 1947 // Increment the address by four for the next argument to store 1948 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 1949 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1950 } 1951 1952 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 1953 // is set. 1954 // The double arguments are stored to the VarArgsFrameIndex 1955 // on the stack. 1956 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 1957 // Get an existing live-in vreg, or add a new one. 1958 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 1959 if (!VReg) 1960 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 1961 1962 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 1963 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1964 MachinePointerInfo(), false, false, 0); 1965 MemOps.push_back(Store); 1966 // Increment the address by eight for the next argument to store 1967 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 1968 PtrVT); 1969 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1970 } 1971 } 1972 1973 if (!MemOps.empty()) 1974 Chain = DAG.getNode(ISD::TokenFactor, dl, 1975 MVT::Other, &MemOps[0], MemOps.size()); 1976 1977 return Chain; 1978 } 1979 1980 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 1981 // value to MVT::i64 and then truncate to the correct register size. 1982 SDValue 1983 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 1984 SelectionDAG &DAG, SDValue ArgVal, 1985 DebugLoc dl) const { 1986 if (Flags.isSExt()) 1987 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 1988 DAG.getValueType(ObjectVT)); 1989 else if (Flags.isZExt()) 1990 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 1991 DAG.getValueType(ObjectVT)); 1992 1993 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 1994 } 1995 1996 // Set the size that is at least reserved in caller of this function. Tail 1997 // call optimized functions' reserved stack space needs to be aligned so that 1998 // taking the difference between two stack areas will result in an aligned 1999 // stack. 2000 void 2001 PPCTargetLowering::setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, 2002 unsigned nAltivecParamsAtEnd, 2003 unsigned MinReservedArea, 2004 bool isPPC64) const { 2005 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2006 // Add the Altivec parameters at the end, if needed. 2007 if (nAltivecParamsAtEnd) { 2008 MinReservedArea = ((MinReservedArea+15)/16)*16; 2009 MinReservedArea += 16*nAltivecParamsAtEnd; 2010 } 2011 MinReservedArea = 2012 std::max(MinReservedArea, 2013 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2014 unsigned TargetAlign 2015 = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2016 getStackAlignment(); 2017 unsigned AlignMask = TargetAlign-1; 2018 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2019 FI->setMinReservedArea(MinReservedArea); 2020 } 2021 2022 SDValue 2023 PPCTargetLowering::LowerFormalArguments_64SVR4( 2024 SDValue Chain, 2025 CallingConv::ID CallConv, bool isVarArg, 2026 const SmallVectorImpl<ISD::InputArg> 2027 &Ins, 2028 DebugLoc dl, SelectionDAG &DAG, 2029 SmallVectorImpl<SDValue> &InVals) const { 2030 // TODO: add description of PPC stack frame format, or at least some docs. 2031 // 2032 MachineFunction &MF = DAG.getMachineFunction(); 2033 MachineFrameInfo *MFI = MF.getFrameInfo(); 2034 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2035 2036 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2037 // Potential tail calls could cause overwriting of argument stack slots. 2038 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2039 (CallConv == CallingConv::Fast)); 2040 unsigned PtrByteSize = 8; 2041 2042 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 2043 // Area that is at least reserved in caller of this function. 2044 unsigned MinReservedArea = ArgOffset; 2045 2046 static const uint16_t GPR[] = { 2047 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2048 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2049 }; 2050 2051 static const uint16_t *FPR = GetFPR(); 2052 2053 static const uint16_t VR[] = { 2054 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2055 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2056 }; 2057 2058 const unsigned Num_GPR_Regs = array_lengthof(GPR); 2059 const unsigned Num_FPR_Regs = 13; 2060 const unsigned Num_VR_Regs = array_lengthof(VR); 2061 2062 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2063 2064 // Add DAG nodes to load the arguments or copy them out of registers. On 2065 // entry to a function on PPC, the arguments start after the linkage area, 2066 // although the first ones are often in registers. 2067 2068 SmallVector<SDValue, 8> MemOps; 2069 unsigned nAltivecParamsAtEnd = 0; 2070 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2071 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo, ++FuncArg) { 2072 SDValue ArgVal; 2073 bool needsLoad = false; 2074 EVT ObjectVT = Ins[ArgNo].VT; 2075 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2076 unsigned ArgSize = ObjSize; 2077 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2078 2079 unsigned CurArgOffset = ArgOffset; 2080 2081 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2082 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2083 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2084 if (isVarArg) { 2085 MinReservedArea = ((MinReservedArea+15)/16)*16; 2086 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2087 Flags, 2088 PtrByteSize); 2089 } else 2090 nAltivecParamsAtEnd++; 2091 } else 2092 // Calculate min reserved area. 2093 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2094 Flags, 2095 PtrByteSize); 2096 2097 // FIXME the codegen can be much improved in some cases. 2098 // We do not have to keep everything in memory. 2099 if (Flags.isByVal()) { 2100 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2101 ObjSize = Flags.getByValSize(); 2102 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2103 // Empty aggregate parameters do not take up registers. Examples: 2104 // struct { } a; 2105 // union { } b; 2106 // int c[0]; 2107 // etc. However, we have to provide a place-holder in InVals, so 2108 // pretend we have an 8-byte item at the current address for that 2109 // purpose. 2110 if (!ObjSize) { 2111 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2112 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2113 InVals.push_back(FIN); 2114 continue; 2115 } 2116 // All aggregates smaller than 8 bytes must be passed right-justified. 2117 if (ObjSize < PtrByteSize) 2118 CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize); 2119 // The value of the object is its address. 2120 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2121 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2122 InVals.push_back(FIN); 2123 2124 if (ObjSize < 8) { 2125 if (GPR_idx != Num_GPR_Regs) { 2126 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2127 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2128 SDValue Store; 2129 2130 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 2131 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 2132 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 2133 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2134 MachinePointerInfo(FuncArg, CurArgOffset), 2135 ObjType, false, false, 0); 2136 } else { 2137 // For sizes that don't fit a truncating store (3, 5, 6, 7), 2138 // store the whole register as-is to the parameter save area 2139 // slot. The address of the parameter was already calculated 2140 // above (InVals.push_back(FIN)) to be the right-justified 2141 // offset within the slot. For this store, we need a new 2142 // frame index that points at the beginning of the slot. 2143 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2144 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2145 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2146 MachinePointerInfo(FuncArg, ArgOffset), 2147 false, false, 0); 2148 } 2149 2150 MemOps.push_back(Store); 2151 ++GPR_idx; 2152 } 2153 // Whether we copied from a register or not, advance the offset 2154 // into the parameter save area by a full doubleword. 2155 ArgOffset += PtrByteSize; 2156 continue; 2157 } 2158 2159 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2160 // Store whatever pieces of the object are in registers 2161 // to memory. ArgOffset will be the address of the beginning 2162 // of the object. 2163 if (GPR_idx != Num_GPR_Regs) { 2164 unsigned VReg; 2165 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2166 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2167 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2168 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2169 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2170 MachinePointerInfo(FuncArg, ArgOffset), 2171 false, false, 0); 2172 MemOps.push_back(Store); 2173 ++GPR_idx; 2174 ArgOffset += PtrByteSize; 2175 } else { 2176 ArgOffset += ArgSize - j; 2177 break; 2178 } 2179 } 2180 continue; 2181 } 2182 2183 switch (ObjectVT.getSimpleVT().SimpleTy) { 2184 default: llvm_unreachable("Unhandled argument type!"); 2185 case MVT::i32: 2186 case MVT::i64: 2187 if (GPR_idx != Num_GPR_Regs) { 2188 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2189 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2190 2191 if (ObjectVT == MVT::i32) 2192 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2193 // value to MVT::i64 and then truncate to the correct register size. 2194 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2195 2196 ++GPR_idx; 2197 } else { 2198 needsLoad = true; 2199 ArgSize = PtrByteSize; 2200 } 2201 ArgOffset += 8; 2202 break; 2203 2204 case MVT::f32: 2205 case MVT::f64: 2206 // Every 8 bytes of argument space consumes one of the GPRs available for 2207 // argument passing. 2208 if (GPR_idx != Num_GPR_Regs) { 2209 ++GPR_idx; 2210 } 2211 if (FPR_idx != Num_FPR_Regs) { 2212 unsigned VReg; 2213 2214 if (ObjectVT == MVT::f32) 2215 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2216 else 2217 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2218 2219 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2220 ++FPR_idx; 2221 } else { 2222 needsLoad = true; 2223 ArgSize = PtrByteSize; 2224 } 2225 2226 ArgOffset += 8; 2227 break; 2228 case MVT::v4f32: 2229 case MVT::v4i32: 2230 case MVT::v8i16: 2231 case MVT::v16i8: 2232 // Note that vector arguments in registers don't reserve stack space, 2233 // except in varargs functions. 2234 if (VR_idx != Num_VR_Regs) { 2235 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2236 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2237 if (isVarArg) { 2238 while ((ArgOffset % 16) != 0) { 2239 ArgOffset += PtrByteSize; 2240 if (GPR_idx != Num_GPR_Regs) 2241 GPR_idx++; 2242 } 2243 ArgOffset += 16; 2244 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2245 } 2246 ++VR_idx; 2247 } else { 2248 // Vectors are aligned. 2249 ArgOffset = ((ArgOffset+15)/16)*16; 2250 CurArgOffset = ArgOffset; 2251 ArgOffset += 16; 2252 needsLoad = true; 2253 } 2254 break; 2255 } 2256 2257 // We need to load the argument to a virtual register if we determined 2258 // above that we ran out of physical registers of the appropriate type. 2259 if (needsLoad) { 2260 int FI = MFI->CreateFixedObject(ObjSize, 2261 CurArgOffset + (ArgSize - ObjSize), 2262 isImmutable); 2263 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2264 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2265 false, false, false, 0); 2266 } 2267 2268 InVals.push_back(ArgVal); 2269 } 2270 2271 // Set the size that is at least reserved in caller of this function. Tail 2272 // call optimized functions' reserved stack space needs to be aligned so that 2273 // taking the difference between two stack areas will result in an aligned 2274 // stack. 2275 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, true); 2276 2277 // If the function takes variable number of arguments, make a frame index for 2278 // the start of the first vararg value... for expansion of llvm.va_start. 2279 if (isVarArg) { 2280 int Depth = ArgOffset; 2281 2282 FuncInfo->setVarArgsFrameIndex( 2283 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 2284 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2285 2286 // If this function is vararg, store any remaining integer argument regs 2287 // to their spots on the stack so that they may be loaded by deferencing the 2288 // result of va_next. 2289 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2290 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2291 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2292 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2293 MachinePointerInfo(), false, false, 0); 2294 MemOps.push_back(Store); 2295 // Increment the address by four for the next argument to store 2296 SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT); 2297 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2298 } 2299 } 2300 2301 if (!MemOps.empty()) 2302 Chain = DAG.getNode(ISD::TokenFactor, dl, 2303 MVT::Other, &MemOps[0], MemOps.size()); 2304 2305 return Chain; 2306 } 2307 2308 SDValue 2309 PPCTargetLowering::LowerFormalArguments_Darwin( 2310 SDValue Chain, 2311 CallingConv::ID CallConv, bool isVarArg, 2312 const SmallVectorImpl<ISD::InputArg> 2313 &Ins, 2314 DebugLoc dl, SelectionDAG &DAG, 2315 SmallVectorImpl<SDValue> &InVals) const { 2316 // TODO: add description of PPC stack frame format, or at least some docs. 2317 // 2318 MachineFunction &MF = DAG.getMachineFunction(); 2319 MachineFrameInfo *MFI = MF.getFrameInfo(); 2320 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2321 2322 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2323 bool isPPC64 = PtrVT == MVT::i64; 2324 // Potential tail calls could cause overwriting of argument stack slots. 2325 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2326 (CallConv == CallingConv::Fast)); 2327 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2328 2329 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 2330 // Area that is at least reserved in caller of this function. 2331 unsigned MinReservedArea = ArgOffset; 2332 2333 static const uint16_t GPR_32[] = { // 32-bit registers. 2334 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2335 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2336 }; 2337 static const uint16_t GPR_64[] = { // 64-bit registers. 2338 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2339 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2340 }; 2341 2342 static const uint16_t *FPR = GetFPR(); 2343 2344 static const uint16_t VR[] = { 2345 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2346 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2347 }; 2348 2349 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 2350 const unsigned Num_FPR_Regs = 13; 2351 const unsigned Num_VR_Regs = array_lengthof( VR); 2352 2353 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2354 2355 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 2356 2357 // In 32-bit non-varargs functions, the stack space for vectors is after the 2358 // stack space for non-vectors. We do not use this space unless we have 2359 // too many vectors to fit in registers, something that only occurs in 2360 // constructed examples:), but we have to walk the arglist to figure 2361 // that out...for the pathological case, compute VecArgOffset as the 2362 // start of the vector parameter area. Computing VecArgOffset is the 2363 // entire point of the following loop. 2364 unsigned VecArgOffset = ArgOffset; 2365 if (!isVarArg && !isPPC64) { 2366 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 2367 ++ArgNo) { 2368 EVT ObjectVT = Ins[ArgNo].VT; 2369 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2370 2371 if (Flags.isByVal()) { 2372 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 2373 unsigned ObjSize = Flags.getByValSize(); 2374 unsigned ArgSize = 2375 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2376 VecArgOffset += ArgSize; 2377 continue; 2378 } 2379 2380 switch(ObjectVT.getSimpleVT().SimpleTy) { 2381 default: llvm_unreachable("Unhandled argument type!"); 2382 case MVT::i32: 2383 case MVT::f32: 2384 VecArgOffset += 4; 2385 break; 2386 case MVT::i64: // PPC64 2387 case MVT::f64: 2388 // FIXME: We are guaranteed to be !isPPC64 at this point. 2389 // Does MVT::i64 apply? 2390 VecArgOffset += 8; 2391 break; 2392 case MVT::v4f32: 2393 case MVT::v4i32: 2394 case MVT::v8i16: 2395 case MVT::v16i8: 2396 // Nothing to do, we're only looking at Nonvector args here. 2397 break; 2398 } 2399 } 2400 } 2401 // We've found where the vector parameter area in memory is. Skip the 2402 // first 12 parameters; these don't use that memory. 2403 VecArgOffset = ((VecArgOffset+15)/16)*16; 2404 VecArgOffset += 12*16; 2405 2406 // Add DAG nodes to load the arguments or copy them out of registers. On 2407 // entry to a function on PPC, the arguments start after the linkage area, 2408 // although the first ones are often in registers. 2409 2410 SmallVector<SDValue, 8> MemOps; 2411 unsigned nAltivecParamsAtEnd = 0; 2412 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2413 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo, ++FuncArg) { 2414 SDValue ArgVal; 2415 bool needsLoad = false; 2416 EVT ObjectVT = Ins[ArgNo].VT; 2417 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2418 unsigned ArgSize = ObjSize; 2419 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2420 2421 unsigned CurArgOffset = ArgOffset; 2422 2423 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2424 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2425 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2426 if (isVarArg || isPPC64) { 2427 MinReservedArea = ((MinReservedArea+15)/16)*16; 2428 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2429 Flags, 2430 PtrByteSize); 2431 } else nAltivecParamsAtEnd++; 2432 } else 2433 // Calculate min reserved area. 2434 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2435 Flags, 2436 PtrByteSize); 2437 2438 // FIXME the codegen can be much improved in some cases. 2439 // We do not have to keep everything in memory. 2440 if (Flags.isByVal()) { 2441 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2442 ObjSize = Flags.getByValSize(); 2443 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2444 // Objects of size 1 and 2 are right justified, everything else is 2445 // left justified. This means the memory address is adjusted forwards. 2446 if (ObjSize==1 || ObjSize==2) { 2447 CurArgOffset = CurArgOffset + (4 - ObjSize); 2448 } 2449 // The value of the object is its address. 2450 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2451 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2452 InVals.push_back(FIN); 2453 if (ObjSize==1 || ObjSize==2) { 2454 if (GPR_idx != Num_GPR_Regs) { 2455 unsigned VReg; 2456 if (isPPC64) 2457 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2458 else 2459 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2460 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2461 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 2462 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2463 MachinePointerInfo(FuncArg, 2464 CurArgOffset), 2465 ObjType, false, false, 0); 2466 MemOps.push_back(Store); 2467 ++GPR_idx; 2468 } 2469 2470 ArgOffset += PtrByteSize; 2471 2472 continue; 2473 } 2474 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2475 // Store whatever pieces of the object are in registers 2476 // to memory. ArgOffset will be the address of the beginning 2477 // of the object. 2478 if (GPR_idx != Num_GPR_Regs) { 2479 unsigned VReg; 2480 if (isPPC64) 2481 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2482 else 2483 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2484 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2485 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2486 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2487 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2488 MachinePointerInfo(FuncArg, ArgOffset), 2489 false, false, 0); 2490 MemOps.push_back(Store); 2491 ++GPR_idx; 2492 ArgOffset += PtrByteSize; 2493 } else { 2494 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 2495 break; 2496 } 2497 } 2498 continue; 2499 } 2500 2501 switch (ObjectVT.getSimpleVT().SimpleTy) { 2502 default: llvm_unreachable("Unhandled argument type!"); 2503 case MVT::i32: 2504 if (!isPPC64) { 2505 if (GPR_idx != Num_GPR_Regs) { 2506 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2507 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2508 ++GPR_idx; 2509 } else { 2510 needsLoad = true; 2511 ArgSize = PtrByteSize; 2512 } 2513 // All int arguments reserve stack space in the Darwin ABI. 2514 ArgOffset += PtrByteSize; 2515 break; 2516 } 2517 // FALLTHROUGH 2518 case MVT::i64: // PPC64 2519 if (GPR_idx != Num_GPR_Regs) { 2520 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2521 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2522 2523 if (ObjectVT == MVT::i32) 2524 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2525 // value to MVT::i64 and then truncate to the correct register size. 2526 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2527 2528 ++GPR_idx; 2529 } else { 2530 needsLoad = true; 2531 ArgSize = PtrByteSize; 2532 } 2533 // All int arguments reserve stack space in the Darwin ABI. 2534 ArgOffset += 8; 2535 break; 2536 2537 case MVT::f32: 2538 case MVT::f64: 2539 // Every 4 bytes of argument space consumes one of the GPRs available for 2540 // argument passing. 2541 if (GPR_idx != Num_GPR_Regs) { 2542 ++GPR_idx; 2543 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 2544 ++GPR_idx; 2545 } 2546 if (FPR_idx != Num_FPR_Regs) { 2547 unsigned VReg; 2548 2549 if (ObjectVT == MVT::f32) 2550 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2551 else 2552 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2553 2554 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2555 ++FPR_idx; 2556 } else { 2557 needsLoad = true; 2558 } 2559 2560 // All FP arguments reserve stack space in the Darwin ABI. 2561 ArgOffset += isPPC64 ? 8 : ObjSize; 2562 break; 2563 case MVT::v4f32: 2564 case MVT::v4i32: 2565 case MVT::v8i16: 2566 case MVT::v16i8: 2567 // Note that vector arguments in registers don't reserve stack space, 2568 // except in varargs functions. 2569 if (VR_idx != Num_VR_Regs) { 2570 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2571 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2572 if (isVarArg) { 2573 while ((ArgOffset % 16) != 0) { 2574 ArgOffset += PtrByteSize; 2575 if (GPR_idx != Num_GPR_Regs) 2576 GPR_idx++; 2577 } 2578 ArgOffset += 16; 2579 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2580 } 2581 ++VR_idx; 2582 } else { 2583 if (!isVarArg && !isPPC64) { 2584 // Vectors go after all the nonvectors. 2585 CurArgOffset = VecArgOffset; 2586 VecArgOffset += 16; 2587 } else { 2588 // Vectors are aligned. 2589 ArgOffset = ((ArgOffset+15)/16)*16; 2590 CurArgOffset = ArgOffset; 2591 ArgOffset += 16; 2592 } 2593 needsLoad = true; 2594 } 2595 break; 2596 } 2597 2598 // We need to load the argument to a virtual register if we determined above 2599 // that we ran out of physical registers of the appropriate type. 2600 if (needsLoad) { 2601 int FI = MFI->CreateFixedObject(ObjSize, 2602 CurArgOffset + (ArgSize - ObjSize), 2603 isImmutable); 2604 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2605 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2606 false, false, false, 0); 2607 } 2608 2609 InVals.push_back(ArgVal); 2610 } 2611 2612 // Set the size that is at least reserved in caller of this function. Tail 2613 // call optimized functions' reserved stack space needs to be aligned so that 2614 // taking the difference between two stack areas will result in an aligned 2615 // stack. 2616 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, isPPC64); 2617 2618 // If the function takes variable number of arguments, make a frame index for 2619 // the start of the first vararg value... for expansion of llvm.va_start. 2620 if (isVarArg) { 2621 int Depth = ArgOffset; 2622 2623 FuncInfo->setVarArgsFrameIndex( 2624 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2625 Depth, true)); 2626 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2627 2628 // If this function is vararg, store any remaining integer argument regs 2629 // to their spots on the stack so that they may be loaded by deferencing the 2630 // result of va_next. 2631 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2632 unsigned VReg; 2633 2634 if (isPPC64) 2635 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2636 else 2637 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2638 2639 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2640 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2641 MachinePointerInfo(), false, false, 0); 2642 MemOps.push_back(Store); 2643 // Increment the address by four for the next argument to store 2644 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2645 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2646 } 2647 } 2648 2649 if (!MemOps.empty()) 2650 Chain = DAG.getNode(ISD::TokenFactor, dl, 2651 MVT::Other, &MemOps[0], MemOps.size()); 2652 2653 return Chain; 2654 } 2655 2656 /// CalculateParameterAndLinkageAreaSize - Get the size of the parameter plus 2657 /// linkage area for the Darwin ABI, or the 64-bit SVR4 ABI. 2658 static unsigned 2659 CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, 2660 bool isPPC64, 2661 bool isVarArg, 2662 unsigned CC, 2663 const SmallVectorImpl<ISD::OutputArg> 2664 &Outs, 2665 const SmallVectorImpl<SDValue> &OutVals, 2666 unsigned &nAltivecParamsAtEnd) { 2667 // Count how many bytes are to be pushed on the stack, including the linkage 2668 // area, and parameter passing area. We start with 24/48 bytes, which is 2669 // prereserved space for [SP][CR][LR][3 x unused]. 2670 unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true); 2671 unsigned NumOps = Outs.size(); 2672 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2673 2674 // Add up all the space actually used. 2675 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 2676 // they all go in registers, but we must reserve stack space for them for 2677 // possible use by the caller. In varargs or 64-bit calls, parameters are 2678 // assigned stack space in order, with padding so Altivec parameters are 2679 // 16-byte aligned. 2680 nAltivecParamsAtEnd = 0; 2681 for (unsigned i = 0; i != NumOps; ++i) { 2682 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2683 EVT ArgVT = Outs[i].VT; 2684 // Varargs Altivec parameters are padded to a 16 byte boundary. 2685 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 2686 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 2687 if (!isVarArg && !isPPC64) { 2688 // Non-varargs Altivec parameters go after all the non-Altivec 2689 // parameters; handle those later so we know how much padding we need. 2690 nAltivecParamsAtEnd++; 2691 continue; 2692 } 2693 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 2694 NumBytes = ((NumBytes+15)/16)*16; 2695 } 2696 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2697 } 2698 2699 // Allow for Altivec parameters at the end, if needed. 2700 if (nAltivecParamsAtEnd) { 2701 NumBytes = ((NumBytes+15)/16)*16; 2702 NumBytes += 16*nAltivecParamsAtEnd; 2703 } 2704 2705 // The prolog code of the callee may store up to 8 GPR argument registers to 2706 // the stack, allowing va_start to index over them in memory if its varargs. 2707 // Because we cannot tell if this is needed on the caller side, we have to 2708 // conservatively assume that it is needed. As such, make sure we have at 2709 // least enough stack space for the caller to store the 8 GPRs. 2710 NumBytes = std::max(NumBytes, 2711 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2712 2713 // Tail call needs the stack to be aligned. 2714 if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){ 2715 unsigned TargetAlign = DAG.getMachineFunction().getTarget(). 2716 getFrameLowering()->getStackAlignment(); 2717 unsigned AlignMask = TargetAlign-1; 2718 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2719 } 2720 2721 return NumBytes; 2722 } 2723 2724 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 2725 /// adjusted to accommodate the arguments for the tailcall. 2726 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 2727 unsigned ParamSize) { 2728 2729 if (!isTailCall) return 0; 2730 2731 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 2732 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 2733 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 2734 // Remember only if the new adjustement is bigger. 2735 if (SPDiff < FI->getTailCallSPDelta()) 2736 FI->setTailCallSPDelta(SPDiff); 2737 2738 return SPDiff; 2739 } 2740 2741 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2742 /// for tail call optimization. Targets which want to do tail call 2743 /// optimization should implement this function. 2744 bool 2745 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2746 CallingConv::ID CalleeCC, 2747 bool isVarArg, 2748 const SmallVectorImpl<ISD::InputArg> &Ins, 2749 SelectionDAG& DAG) const { 2750 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 2751 return false; 2752 2753 // Variable argument functions are not supported. 2754 if (isVarArg) 2755 return false; 2756 2757 MachineFunction &MF = DAG.getMachineFunction(); 2758 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 2759 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 2760 // Functions containing by val parameters are not supported. 2761 for (unsigned i = 0; i != Ins.size(); i++) { 2762 ISD::ArgFlagsTy Flags = Ins[i].Flags; 2763 if (Flags.isByVal()) return false; 2764 } 2765 2766 // Non PIC/GOT tail calls are supported. 2767 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 2768 return true; 2769 2770 // At the moment we can only do local tail calls (in same module, hidden 2771 // or protected) if we are generating PIC. 2772 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2773 return G->getGlobal()->hasHiddenVisibility() 2774 || G->getGlobal()->hasProtectedVisibility(); 2775 } 2776 2777 return false; 2778 } 2779 2780 /// isCallCompatibleAddress - Return the immediate to use if the specified 2781 /// 32-bit value is representable in the immediate field of a BxA instruction. 2782 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 2783 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2784 if (!C) return 0; 2785 2786 int Addr = C->getZExtValue(); 2787 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 2788 SignExtend32<26>(Addr) != Addr) 2789 return 0; // Top 6 bits have to be sext of immediate. 2790 2791 return DAG.getConstant((int)C->getZExtValue() >> 2, 2792 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 2793 } 2794 2795 namespace { 2796 2797 struct TailCallArgumentInfo { 2798 SDValue Arg; 2799 SDValue FrameIdxOp; 2800 int FrameIdx; 2801 2802 TailCallArgumentInfo() : FrameIdx(0) {} 2803 }; 2804 2805 } 2806 2807 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 2808 static void 2809 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 2810 SDValue Chain, 2811 const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs, 2812 SmallVector<SDValue, 8> &MemOpChains, 2813 DebugLoc dl) { 2814 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 2815 SDValue Arg = TailCallArgs[i].Arg; 2816 SDValue FIN = TailCallArgs[i].FrameIdxOp; 2817 int FI = TailCallArgs[i].FrameIdx; 2818 // Store relative to framepointer. 2819 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 2820 MachinePointerInfo::getFixedStack(FI), 2821 false, false, 0)); 2822 } 2823 } 2824 2825 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 2826 /// the appropriate stack slot for the tail call optimized function call. 2827 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 2828 MachineFunction &MF, 2829 SDValue Chain, 2830 SDValue OldRetAddr, 2831 SDValue OldFP, 2832 int SPDiff, 2833 bool isPPC64, 2834 bool isDarwinABI, 2835 DebugLoc dl) { 2836 if (SPDiff) { 2837 // Calculate the new stack slot for the return address. 2838 int SlotSize = isPPC64 ? 8 : 4; 2839 int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64, 2840 isDarwinABI); 2841 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 2842 NewRetAddrLoc, true); 2843 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2844 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 2845 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 2846 MachinePointerInfo::getFixedStack(NewRetAddr), 2847 false, false, 0); 2848 2849 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 2850 // slot as the FP is never overwritten. 2851 if (isDarwinABI) { 2852 int NewFPLoc = 2853 SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI); 2854 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 2855 true); 2856 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 2857 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 2858 MachinePointerInfo::getFixedStack(NewFPIdx), 2859 false, false, 0); 2860 } 2861 } 2862 return Chain; 2863 } 2864 2865 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 2866 /// the position of the argument. 2867 static void 2868 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 2869 SDValue Arg, int SPDiff, unsigned ArgOffset, 2870 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { 2871 int Offset = ArgOffset + SPDiff; 2872 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 2873 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2874 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2875 SDValue FIN = DAG.getFrameIndex(FI, VT); 2876 TailCallArgumentInfo Info; 2877 Info.Arg = Arg; 2878 Info.FrameIdxOp = FIN; 2879 Info.FrameIdx = FI; 2880 TailCallArguments.push_back(Info); 2881 } 2882 2883 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 2884 /// stack slot. Returns the chain as result and the loaded frame pointers in 2885 /// LROpOut/FPOpout. Used when tail calling. 2886 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 2887 int SPDiff, 2888 SDValue Chain, 2889 SDValue &LROpOut, 2890 SDValue &FPOpOut, 2891 bool isDarwinABI, 2892 DebugLoc dl) const { 2893 if (SPDiff) { 2894 // Load the LR and FP stack slot for later adjusting. 2895 EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; 2896 LROpOut = getReturnAddrFrameIndex(DAG); 2897 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 2898 false, false, false, 0); 2899 Chain = SDValue(LROpOut.getNode(), 1); 2900 2901 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 2902 // slot as the FP is never overwritten. 2903 if (isDarwinABI) { 2904 FPOpOut = getFramePointerFrameIndex(DAG); 2905 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 2906 false, false, false, 0); 2907 Chain = SDValue(FPOpOut.getNode(), 1); 2908 } 2909 } 2910 return Chain; 2911 } 2912 2913 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 2914 /// by "Src" to address "Dst" of size "Size". Alignment information is 2915 /// specified by the specific parameter attribute. The copy will be passed as 2916 /// a byval function parameter. 2917 /// Sometimes what we are copying is the end of a larger object, the part that 2918 /// does not fit in registers. 2919 static SDValue 2920 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 2921 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 2922 DebugLoc dl) { 2923 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 2924 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 2925 false, false, MachinePointerInfo(0), 2926 MachinePointerInfo(0)); 2927 } 2928 2929 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 2930 /// tail calls. 2931 static void 2932 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 2933 SDValue Arg, SDValue PtrOff, int SPDiff, 2934 unsigned ArgOffset, bool isPPC64, bool isTailCall, 2935 bool isVector, SmallVector<SDValue, 8> &MemOpChains, 2936 SmallVector<TailCallArgumentInfo, 8> &TailCallArguments, 2937 DebugLoc dl) { 2938 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2939 if (!isTailCall) { 2940 if (isVector) { 2941 SDValue StackPtr; 2942 if (isPPC64) 2943 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 2944 else 2945 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2946 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 2947 DAG.getConstant(ArgOffset, PtrVT)); 2948 } 2949 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 2950 MachinePointerInfo(), false, false, 0)); 2951 // Calculate and remember argument location. 2952 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 2953 TailCallArguments); 2954 } 2955 2956 static 2957 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 2958 DebugLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 2959 SDValue LROp, SDValue FPOp, bool isDarwinABI, 2960 SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) { 2961 MachineFunction &MF = DAG.getMachineFunction(); 2962 2963 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 2964 // might overwrite each other in case of tail call optimization. 2965 SmallVector<SDValue, 8> MemOpChains2; 2966 // Do not flag preceding copytoreg stuff together with the following stuff. 2967 InFlag = SDValue(); 2968 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 2969 MemOpChains2, dl); 2970 if (!MemOpChains2.empty()) 2971 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2972 &MemOpChains2[0], MemOpChains2.size()); 2973 2974 // Store the return address to the appropriate stack slot. 2975 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 2976 isPPC64, isDarwinABI, dl); 2977 2978 // Emit callseq_end just before tailcall node. 2979 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2980 DAG.getIntPtrConstant(0, true), InFlag); 2981 InFlag = Chain.getValue(1); 2982 } 2983 2984 static 2985 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 2986 SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall, 2987 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 2988 SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys, 2989 const PPCSubtarget &PPCSubTarget) { 2990 2991 bool isPPC64 = PPCSubTarget.isPPC64(); 2992 bool isSVR4ABI = PPCSubTarget.isSVR4ABI(); 2993 2994 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2995 NodeTys.push_back(MVT::Other); // Returns a chain 2996 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 2997 2998 unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin; 2999 3000 bool needIndirectCall = true; 3001 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 3002 // If this is an absolute destination address, use the munged value. 3003 Callee = SDValue(Dest, 0); 3004 needIndirectCall = false; 3005 } 3006 3007 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3008 // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201 3009 // Use indirect calls for ALL functions calls in JIT mode, since the 3010 // far-call stubs may be outside relocation limits for a BL instruction. 3011 if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) { 3012 unsigned OpFlags = 0; 3013 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3014 (PPCSubTarget.getTargetTriple().isMacOSX() && 3015 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 3016 (G->getGlobal()->isDeclaration() || 3017 G->getGlobal()->isWeakForLinker())) { 3018 // PC-relative references to external symbols should go through $stub, 3019 // unless we're building with the leopard linker or later, which 3020 // automatically synthesizes these stubs. 3021 OpFlags = PPCII::MO_DARWIN_STUB; 3022 } 3023 3024 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 3025 // every direct call is) turn it into a TargetGlobalAddress / 3026 // TargetExternalSymbol node so that legalize doesn't hack it. 3027 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 3028 Callee.getValueType(), 3029 0, OpFlags); 3030 needIndirectCall = false; 3031 } 3032 } 3033 3034 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 3035 unsigned char OpFlags = 0; 3036 3037 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3038 (PPCSubTarget.getTargetTriple().isMacOSX() && 3039 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5))) { 3040 // PC-relative references to external symbols should go through $stub, 3041 // unless we're building with the leopard linker or later, which 3042 // automatically synthesizes these stubs. 3043 OpFlags = PPCII::MO_DARWIN_STUB; 3044 } 3045 3046 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 3047 OpFlags); 3048 needIndirectCall = false; 3049 } 3050 3051 if (needIndirectCall) { 3052 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 3053 // to do the call, we can't use PPCISD::CALL. 3054 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 3055 3056 if (isSVR4ABI && isPPC64) { 3057 // Function pointers in the 64-bit SVR4 ABI do not point to the function 3058 // entry point, but to the function descriptor (the function entry point 3059 // address is part of the function descriptor though). 3060 // The function descriptor is a three doubleword structure with the 3061 // following fields: function entry point, TOC base address and 3062 // environment pointer. 3063 // Thus for a call through a function pointer, the following actions need 3064 // to be performed: 3065 // 1. Save the TOC of the caller in the TOC save area of its stack 3066 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 3067 // 2. Load the address of the function entry point from the function 3068 // descriptor. 3069 // 3. Load the TOC of the callee from the function descriptor into r2. 3070 // 4. Load the environment pointer from the function descriptor into 3071 // r11. 3072 // 5. Branch to the function entry point address. 3073 // 6. On return of the callee, the TOC of the caller needs to be 3074 // restored (this is done in FinishCall()). 3075 // 3076 // All those operations are flagged together to ensure that no other 3077 // operations can be scheduled in between. E.g. without flagging the 3078 // operations together, a TOC access in the caller could be scheduled 3079 // between the load of the callee TOC and the branch to the callee, which 3080 // results in the TOC access going through the TOC of the callee instead 3081 // of going through the TOC of the caller, which leads to incorrect code. 3082 3083 // Load the address of the function entry point from the function 3084 // descriptor. 3085 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue); 3086 SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps, 3087 InFlag.getNode() ? 3 : 2); 3088 Chain = LoadFuncPtr.getValue(1); 3089 InFlag = LoadFuncPtr.getValue(2); 3090 3091 // Load environment pointer into r11. 3092 // Offset of the environment pointer within the function descriptor. 3093 SDValue PtrOff = DAG.getIntPtrConstant(16); 3094 3095 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 3096 SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, 3097 InFlag); 3098 Chain = LoadEnvPtr.getValue(1); 3099 InFlag = LoadEnvPtr.getValue(2); 3100 3101 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 3102 InFlag); 3103 Chain = EnvVal.getValue(0); 3104 InFlag = EnvVal.getValue(1); 3105 3106 // Load TOC of the callee into r2. We are using a target-specific load 3107 // with r2 hard coded, because the result of a target-independent load 3108 // would never go directly into r2, since r2 is a reserved register (which 3109 // prevents the register allocator from allocating it), resulting in an 3110 // additional register being allocated and an unnecessary move instruction 3111 // being generated. 3112 VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3113 SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, 3114 Callee, InFlag); 3115 Chain = LoadTOCPtr.getValue(0); 3116 InFlag = LoadTOCPtr.getValue(1); 3117 3118 MTCTROps[0] = Chain; 3119 MTCTROps[1] = LoadFuncPtr; 3120 MTCTROps[2] = InFlag; 3121 } 3122 3123 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, 3124 2 + (InFlag.getNode() != 0)); 3125 InFlag = Chain.getValue(1); 3126 3127 NodeTys.clear(); 3128 NodeTys.push_back(MVT::Other); 3129 NodeTys.push_back(MVT::Glue); 3130 Ops.push_back(Chain); 3131 CallOpc = isSVR4ABI ? PPCISD::BCTRL_SVR4 : PPCISD::BCTRL_Darwin; 3132 Callee.setNode(0); 3133 // Add CTR register as callee so a bctr can be emitted later. 3134 if (isTailCall) 3135 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 3136 } 3137 3138 // If this is a direct call, pass the chain and the callee. 3139 if (Callee.getNode()) { 3140 Ops.push_back(Chain); 3141 Ops.push_back(Callee); 3142 } 3143 // If this is a tail call add stack pointer delta. 3144 if (isTailCall) 3145 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 3146 3147 // Add argument registers to the end of the list so that they are known live 3148 // into the call. 3149 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 3150 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 3151 RegsToPass[i].second.getValueType())); 3152 3153 return CallOpc; 3154 } 3155 3156 static 3157 bool isLocalCall(const SDValue &Callee) 3158 { 3159 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3160 return !G->getGlobal()->isDeclaration() && 3161 !G->getGlobal()->isWeakForLinker(); 3162 return false; 3163 } 3164 3165 SDValue 3166 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 3167 CallingConv::ID CallConv, bool isVarArg, 3168 const SmallVectorImpl<ISD::InputArg> &Ins, 3169 DebugLoc dl, SelectionDAG &DAG, 3170 SmallVectorImpl<SDValue> &InVals) const { 3171 3172 SmallVector<CCValAssign, 16> RVLocs; 3173 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3174 getTargetMachine(), RVLocs, *DAG.getContext()); 3175 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 3176 3177 // Copy all of the result registers out of their specified physreg. 3178 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 3179 CCValAssign &VA = RVLocs[i]; 3180 assert(VA.isRegLoc() && "Can only return in registers!"); 3181 3182 SDValue Val = DAG.getCopyFromReg(Chain, dl, 3183 VA.getLocReg(), VA.getLocVT(), InFlag); 3184 Chain = Val.getValue(1); 3185 InFlag = Val.getValue(2); 3186 3187 switch (VA.getLocInfo()) { 3188 default: llvm_unreachable("Unknown loc info!"); 3189 case CCValAssign::Full: break; 3190 case CCValAssign::AExt: 3191 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3192 break; 3193 case CCValAssign::ZExt: 3194 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 3195 DAG.getValueType(VA.getValVT())); 3196 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3197 break; 3198 case CCValAssign::SExt: 3199 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 3200 DAG.getValueType(VA.getValVT())); 3201 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3202 break; 3203 } 3204 3205 InVals.push_back(Val); 3206 } 3207 3208 return Chain; 3209 } 3210 3211 SDValue 3212 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, 3213 bool isTailCall, bool isVarArg, 3214 SelectionDAG &DAG, 3215 SmallVector<std::pair<unsigned, SDValue>, 8> 3216 &RegsToPass, 3217 SDValue InFlag, SDValue Chain, 3218 SDValue &Callee, 3219 int SPDiff, unsigned NumBytes, 3220 const SmallVectorImpl<ISD::InputArg> &Ins, 3221 SmallVectorImpl<SDValue> &InVals) const { 3222 std::vector<EVT> NodeTys; 3223 SmallVector<SDValue, 8> Ops; 3224 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 3225 isTailCall, RegsToPass, Ops, NodeTys, 3226 PPCSubTarget); 3227 3228 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 3229 if (isVarArg && PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) 3230 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 3231 3232 // When performing tail call optimization the callee pops its arguments off 3233 // the stack. Account for this here so these bytes can be pushed back on in 3234 // PPCRegisterInfo::eliminateCallFramePseudoInstr. 3235 int BytesCalleePops = 3236 (CallConv == CallingConv::Fast && 3237 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 3238 3239 // Add a register mask operand representing the call-preserved registers. 3240 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 3241 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 3242 assert(Mask && "Missing call preserved mask for calling convention"); 3243 Ops.push_back(DAG.getRegisterMask(Mask)); 3244 3245 if (InFlag.getNode()) 3246 Ops.push_back(InFlag); 3247 3248 // Emit tail call. 3249 if (isTailCall) { 3250 // If this is the first return lowered for this function, add the regs 3251 // to the liveout set for the function. 3252 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 3253 SmallVector<CCValAssign, 16> RVLocs; 3254 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3255 getTargetMachine(), RVLocs, *DAG.getContext()); 3256 CCInfo.AnalyzeCallResult(Ins, RetCC_PPC); 3257 for (unsigned i = 0; i != RVLocs.size(); ++i) 3258 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 3259 } 3260 3261 assert(((Callee.getOpcode() == ISD::Register && 3262 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 3263 Callee.getOpcode() == ISD::TargetExternalSymbol || 3264 Callee.getOpcode() == ISD::TargetGlobalAddress || 3265 isa<ConstantSDNode>(Callee)) && 3266 "Expecting an global address, external symbol, absolute value or register"); 3267 3268 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size()); 3269 } 3270 3271 // Add a NOP immediately after the branch instruction when using the 64-bit 3272 // SVR4 ABI. At link time, if caller and callee are in a different module and 3273 // thus have a different TOC, the call will be replaced with a call to a stub 3274 // function which saves the current TOC, loads the TOC of the callee and 3275 // branches to the callee. The NOP will be replaced with a load instruction 3276 // which restores the TOC of the caller from the TOC save slot of the current 3277 // stack frame. If caller and callee belong to the same module (and have the 3278 // same TOC), the NOP will remain unchanged. 3279 3280 bool needsTOCRestore = false; 3281 if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { 3282 if (CallOpc == PPCISD::BCTRL_SVR4) { 3283 // This is a call through a function pointer. 3284 // Restore the caller TOC from the save area into R2. 3285 // See PrepareCall() for more information about calls through function 3286 // pointers in the 64-bit SVR4 ABI. 3287 // We are using a target-specific load with r2 hard coded, because the 3288 // result of a target-independent load would never go directly into r2, 3289 // since r2 is a reserved register (which prevents the register allocator 3290 // from allocating it), resulting in an additional register being 3291 // allocated and an unnecessary move instruction being generated. 3292 needsTOCRestore = true; 3293 } else if ((CallOpc == PPCISD::CALL_SVR4) && !isLocalCall(Callee)) { 3294 // Otherwise insert NOP for non-local calls. 3295 CallOpc = PPCISD::CALL_NOP_SVR4; 3296 } 3297 } 3298 3299 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 3300 InFlag = Chain.getValue(1); 3301 3302 if (needsTOCRestore) { 3303 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3304 Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag); 3305 InFlag = Chain.getValue(1); 3306 } 3307 3308 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3309 DAG.getIntPtrConstant(BytesCalleePops, true), 3310 InFlag); 3311 if (!Ins.empty()) 3312 InFlag = Chain.getValue(1); 3313 3314 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 3315 Ins, dl, DAG, InVals); 3316 } 3317 3318 SDValue 3319 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 3320 SmallVectorImpl<SDValue> &InVals) const { 3321 SelectionDAG &DAG = CLI.DAG; 3322 DebugLoc &dl = CLI.DL; 3323 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 3324 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 3325 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 3326 SDValue Chain = CLI.Chain; 3327 SDValue Callee = CLI.Callee; 3328 bool &isTailCall = CLI.IsTailCall; 3329 CallingConv::ID CallConv = CLI.CallConv; 3330 bool isVarArg = CLI.IsVarArg; 3331 3332 if (isTailCall) 3333 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 3334 Ins, DAG); 3335 3336 if (PPCSubTarget.isSVR4ABI()) { 3337 if (PPCSubTarget.isPPC64()) 3338 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 3339 isTailCall, Outs, OutVals, Ins, 3340 dl, DAG, InVals); 3341 else 3342 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 3343 isTailCall, Outs, OutVals, Ins, 3344 dl, DAG, InVals); 3345 } 3346 3347 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 3348 isTailCall, Outs, OutVals, Ins, 3349 dl, DAG, InVals); 3350 } 3351 3352 SDValue 3353 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 3354 CallingConv::ID CallConv, bool isVarArg, 3355 bool isTailCall, 3356 const SmallVectorImpl<ISD::OutputArg> &Outs, 3357 const SmallVectorImpl<SDValue> &OutVals, 3358 const SmallVectorImpl<ISD::InputArg> &Ins, 3359 DebugLoc dl, SelectionDAG &DAG, 3360 SmallVectorImpl<SDValue> &InVals) const { 3361 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 3362 // of the 32-bit SVR4 ABI stack frame layout. 3363 3364 assert((CallConv == CallingConv::C || 3365 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 3366 3367 unsigned PtrByteSize = 4; 3368 3369 MachineFunction &MF = DAG.getMachineFunction(); 3370 3371 // Mark this function as potentially containing a function that contains a 3372 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3373 // and restoring the callers stack pointer in this functions epilog. This is 3374 // done because by tail calling the called function might overwrite the value 3375 // in this function's (MF) stack pointer stack slot 0(SP). 3376 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3377 CallConv == CallingConv::Fast) 3378 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3379 3380 // Count how many bytes are to be pushed on the stack, including the linkage 3381 // area, parameter list area and the part of the local variable space which 3382 // contains copies of aggregates which are passed by value. 3383 3384 // Assign locations to all of the outgoing arguments. 3385 SmallVector<CCValAssign, 16> ArgLocs; 3386 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3387 getTargetMachine(), ArgLocs, *DAG.getContext()); 3388 3389 // Reserve space for the linkage area on the stack. 3390 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 3391 3392 if (isVarArg) { 3393 // Handle fixed and variable vector arguments differently. 3394 // Fixed vector arguments go into registers as long as registers are 3395 // available. Variable vector arguments always go into memory. 3396 unsigned NumArgs = Outs.size(); 3397 3398 for (unsigned i = 0; i != NumArgs; ++i) { 3399 MVT ArgVT = Outs[i].VT; 3400 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3401 bool Result; 3402 3403 if (Outs[i].IsFixed) { 3404 Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 3405 CCInfo); 3406 } else { 3407 Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 3408 ArgFlags, CCInfo); 3409 } 3410 3411 if (Result) { 3412 #ifndef NDEBUG 3413 errs() << "Call operand #" << i << " has unhandled type " 3414 << EVT(ArgVT).getEVTString() << "\n"; 3415 #endif 3416 llvm_unreachable(0); 3417 } 3418 } 3419 } else { 3420 // All arguments are treated the same. 3421 CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4); 3422 } 3423 3424 // Assign locations to all of the outgoing aggregate by value arguments. 3425 SmallVector<CCValAssign, 16> ByValArgLocs; 3426 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3427 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 3428 3429 // Reserve stack space for the allocations in CCInfo. 3430 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3431 3432 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4_ByVal); 3433 3434 // Size of the linkage area, parameter list area and the part of the local 3435 // space variable where copies of aggregates which are passed by value are 3436 // stored. 3437 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 3438 3439 // Calculate by how many bytes the stack has to be adjusted in case of tail 3440 // call optimization. 3441 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3442 3443 // Adjust the stack pointer for the new arguments... 3444 // These operations are automatically eliminated by the prolog/epilog pass 3445 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 3446 SDValue CallSeqStart = Chain; 3447 3448 // Load the return address and frame pointer so it can be moved somewhere else 3449 // later. 3450 SDValue LROp, FPOp; 3451 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 3452 dl); 3453 3454 // Set up a copy of the stack pointer for use loading and storing any 3455 // arguments that may not fit in the registers available for argument 3456 // passing. 3457 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3458 3459 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3460 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3461 SmallVector<SDValue, 8> MemOpChains; 3462 3463 bool seenFloatArg = false; 3464 // Walk the register/memloc assignments, inserting copies/loads. 3465 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 3466 i != e; 3467 ++i) { 3468 CCValAssign &VA = ArgLocs[i]; 3469 SDValue Arg = OutVals[i]; 3470 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3471 3472 if (Flags.isByVal()) { 3473 // Argument is an aggregate which is passed by value, thus we need to 3474 // create a copy of it in the local variable space of the current stack 3475 // frame (which is the stack frame of the caller) and pass the address of 3476 // this copy to the callee. 3477 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 3478 CCValAssign &ByValVA = ByValArgLocs[j++]; 3479 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 3480 3481 // Memory reserved in the local variable space of the callers stack frame. 3482 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 3483 3484 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3485 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3486 3487 // Create a copy of the argument in the local area of the current 3488 // stack frame. 3489 SDValue MemcpyCall = 3490 CreateCopyOfByValArgument(Arg, PtrOff, 3491 CallSeqStart.getNode()->getOperand(0), 3492 Flags, DAG, dl); 3493 3494 // This must go outside the CALLSEQ_START..END. 3495 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3496 CallSeqStart.getNode()->getOperand(1)); 3497 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3498 NewCallSeqStart.getNode()); 3499 Chain = CallSeqStart = NewCallSeqStart; 3500 3501 // Pass the address of the aggregate copy on the stack either in a 3502 // physical register or in the parameter list area of the current stack 3503 // frame to the callee. 3504 Arg = PtrOff; 3505 } 3506 3507 if (VA.isRegLoc()) { 3508 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 3509 // Put argument in a physical register. 3510 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 3511 } else { 3512 // Put argument in the parameter list area of the current stack frame. 3513 assert(VA.isMemLoc()); 3514 unsigned LocMemOffset = VA.getLocMemOffset(); 3515 3516 if (!isTailCall) { 3517 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3518 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3519 3520 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3521 MachinePointerInfo(), 3522 false, false, 0)); 3523 } else { 3524 // Calculate and remember argument location. 3525 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 3526 TailCallArguments); 3527 } 3528 } 3529 } 3530 3531 if (!MemOpChains.empty()) 3532 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3533 &MemOpChains[0], MemOpChains.size()); 3534 3535 // Build a sequence of copy-to-reg nodes chained together with token chain 3536 // and flag operands which copy the outgoing args into the appropriate regs. 3537 SDValue InFlag; 3538 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3539 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3540 RegsToPass[i].second, InFlag); 3541 InFlag = Chain.getValue(1); 3542 } 3543 3544 // Set CR bit 6 to true if this is a vararg call with floating args passed in 3545 // registers. 3546 if (isVarArg) { 3547 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3548 SDValue Ops[] = { Chain, InFlag }; 3549 3550 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 3551 dl, VTs, Ops, InFlag.getNode() ? 2 : 1); 3552 3553 InFlag = Chain.getValue(1); 3554 } 3555 3556 if (isTailCall) 3557 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 3558 false, TailCallArguments); 3559 3560 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3561 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3562 Ins, InVals); 3563 } 3564 3565 // Copy an argument into memory, being careful to do this outside the 3566 // call sequence for the call to which the argument belongs. 3567 SDValue 3568 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 3569 SDValue CallSeqStart, 3570 ISD::ArgFlagsTy Flags, 3571 SelectionDAG &DAG, 3572 DebugLoc dl) const { 3573 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 3574 CallSeqStart.getNode()->getOperand(0), 3575 Flags, DAG, dl); 3576 // The MEMCPY must go outside the CALLSEQ_START..END. 3577 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3578 CallSeqStart.getNode()->getOperand(1)); 3579 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3580 NewCallSeqStart.getNode()); 3581 return NewCallSeqStart; 3582 } 3583 3584 SDValue 3585 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 3586 CallingConv::ID CallConv, bool isVarArg, 3587 bool isTailCall, 3588 const SmallVectorImpl<ISD::OutputArg> &Outs, 3589 const SmallVectorImpl<SDValue> &OutVals, 3590 const SmallVectorImpl<ISD::InputArg> &Ins, 3591 DebugLoc dl, SelectionDAG &DAG, 3592 SmallVectorImpl<SDValue> &InVals) const { 3593 3594 unsigned NumOps = Outs.size(); 3595 3596 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3597 unsigned PtrByteSize = 8; 3598 3599 MachineFunction &MF = DAG.getMachineFunction(); 3600 3601 // Mark this function as potentially containing a function that contains a 3602 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3603 // and restoring the callers stack pointer in this functions epilog. This is 3604 // done because by tail calling the called function might overwrite the value 3605 // in this function's (MF) stack pointer stack slot 0(SP). 3606 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3607 CallConv == CallingConv::Fast) 3608 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3609 3610 unsigned nAltivecParamsAtEnd = 0; 3611 3612 // Count how many bytes are to be pushed on the stack, including the linkage 3613 // area, and parameter passing area. We start with at least 48 bytes, which 3614 // is reserved space for [SP][CR][LR][3 x unused]. 3615 // NOTE: For PPC64, nAltivecParamsAtEnd always remains zero as a result 3616 // of this call. 3617 unsigned NumBytes = 3618 CalculateParameterAndLinkageAreaSize(DAG, true, isVarArg, CallConv, 3619 Outs, OutVals, nAltivecParamsAtEnd); 3620 3621 // Calculate by how many bytes the stack has to be adjusted in case of tail 3622 // call optimization. 3623 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3624 3625 // To protect arguments on the stack from being clobbered in a tail call, 3626 // force all the loads to happen before doing any other lowering. 3627 if (isTailCall) 3628 Chain = DAG.getStackArgumentTokenFactor(Chain); 3629 3630 // Adjust the stack pointer for the new arguments... 3631 // These operations are automatically eliminated by the prolog/epilog pass 3632 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 3633 SDValue CallSeqStart = Chain; 3634 3635 // Load the return address and frame pointer so it can be move somewhere else 3636 // later. 3637 SDValue LROp, FPOp; 3638 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 3639 dl); 3640 3641 // Set up a copy of the stack pointer for use loading and storing any 3642 // arguments that may not fit in the registers available for argument 3643 // passing. 3644 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3645 3646 // Figure out which arguments are going to go in registers, and which in 3647 // memory. Also, if this is a vararg function, floating point operations 3648 // must be stored to our stack, and loaded into integer regs as well, if 3649 // any integer regs are available for argument passing. 3650 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 3651 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3652 3653 static const uint16_t GPR[] = { 3654 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3655 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3656 }; 3657 static const uint16_t *FPR = GetFPR(); 3658 3659 static const uint16_t VR[] = { 3660 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3661 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3662 }; 3663 const unsigned NumGPRs = array_lengthof(GPR); 3664 const unsigned NumFPRs = 13; 3665 const unsigned NumVRs = array_lengthof(VR); 3666 3667 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3668 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3669 3670 SmallVector<SDValue, 8> MemOpChains; 3671 for (unsigned i = 0; i != NumOps; ++i) { 3672 SDValue Arg = OutVals[i]; 3673 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3674 3675 // PtrOff will be used to store the current argument to the stack if a 3676 // register cannot be found for it. 3677 SDValue PtrOff; 3678 3679 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 3680 3681 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3682 3683 // Promote integers to 64-bit values. 3684 if (Arg.getValueType() == MVT::i32) { 3685 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 3686 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3687 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 3688 } 3689 3690 // FIXME memcpy is used way more than necessary. Correctness first. 3691 // Note: "by value" is code for passing a structure by value, not 3692 // basic types. 3693 if (Flags.isByVal()) { 3694 // Note: Size includes alignment padding, so 3695 // struct x { short a; char b; } 3696 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 3697 // These are the proper values we need for right-justifying the 3698 // aggregate in a parameter register. 3699 unsigned Size = Flags.getByValSize(); 3700 3701 // An empty aggregate parameter takes up no storage and no 3702 // registers. 3703 if (Size == 0) 3704 continue; 3705 3706 // All aggregates smaller than 8 bytes must be passed right-justified. 3707 if (Size==1 || Size==2 || Size==4) { 3708 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 3709 if (GPR_idx != NumGPRs) { 3710 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 3711 MachinePointerInfo(), VT, 3712 false, false, 0); 3713 MemOpChains.push_back(Load.getValue(1)); 3714 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3715 3716 ArgOffset += PtrByteSize; 3717 continue; 3718 } 3719 } 3720 3721 if (GPR_idx == NumGPRs && Size < 8) { 3722 SDValue Const = DAG.getConstant(PtrByteSize - Size, 3723 PtrOff.getValueType()); 3724 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3725 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3726 CallSeqStart, 3727 Flags, DAG, dl); 3728 ArgOffset += PtrByteSize; 3729 continue; 3730 } 3731 // Copy entire object into memory. There are cases where gcc-generated 3732 // code assumes it is there, even if it could be put entirely into 3733 // registers. (This is not what the doc says.) 3734 3735 // FIXME: The above statement is likely due to a misunderstanding of the 3736 // documents. All arguments must be copied into the parameter area BY 3737 // THE CALLEE in the event that the callee takes the address of any 3738 // formal argument. That has not yet been implemented. However, it is 3739 // reasonable to use the stack area as a staging area for the register 3740 // load. 3741 3742 // Skip this for small aggregates, as we will use the same slot for a 3743 // right-justified copy, below. 3744 if (Size >= 8) 3745 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 3746 CallSeqStart, 3747 Flags, DAG, dl); 3748 3749 // When a register is available, pass a small aggregate right-justified. 3750 if (Size < 8 && GPR_idx != NumGPRs) { 3751 // The easiest way to get this right-justified in a register 3752 // is to copy the structure into the rightmost portion of a 3753 // local variable slot, then load the whole slot into the 3754 // register. 3755 // FIXME: The memcpy seems to produce pretty awful code for 3756 // small aggregates, particularly for packed ones. 3757 // FIXME: It would be preferable to use the slot in the 3758 // parameter save area instead of a new local variable. 3759 SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType()); 3760 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3761 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3762 CallSeqStart, 3763 Flags, DAG, dl); 3764 3765 // Load the slot into the register. 3766 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 3767 MachinePointerInfo(), 3768 false, false, false, 0); 3769 MemOpChains.push_back(Load.getValue(1)); 3770 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3771 3772 // Done with this argument. 3773 ArgOffset += PtrByteSize; 3774 continue; 3775 } 3776 3777 // For aggregates larger than PtrByteSize, copy the pieces of the 3778 // object that fit into registers from the parameter save area. 3779 for (unsigned j=0; j<Size; j+=PtrByteSize) { 3780 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 3781 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 3782 if (GPR_idx != NumGPRs) { 3783 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 3784 MachinePointerInfo(), 3785 false, false, false, 0); 3786 MemOpChains.push_back(Load.getValue(1)); 3787 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3788 ArgOffset += PtrByteSize; 3789 } else { 3790 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 3791 break; 3792 } 3793 } 3794 continue; 3795 } 3796 3797 switch (Arg.getValueType().getSimpleVT().SimpleTy) { 3798 default: llvm_unreachable("Unexpected ValueType for argument!"); 3799 case MVT::i32: 3800 case MVT::i64: 3801 if (GPR_idx != NumGPRs) { 3802 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 3803 } else { 3804 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3805 true, isTailCall, false, MemOpChains, 3806 TailCallArguments, dl); 3807 } 3808 ArgOffset += PtrByteSize; 3809 break; 3810 case MVT::f32: 3811 case MVT::f64: 3812 if (FPR_idx != NumFPRs) { 3813 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 3814 3815 if (isVarArg) { 3816 // A single float or an aggregate containing only a single float 3817 // must be passed right-justified in the stack doubleword, and 3818 // in the GPR, if one is available. 3819 SDValue StoreOff; 3820 if (Arg.getValueType().getSimpleVT().SimpleTy == MVT::f32) { 3821 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3822 StoreOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3823 } else 3824 StoreOff = PtrOff; 3825 3826 SDValue Store = DAG.getStore(Chain, dl, Arg, StoreOff, 3827 MachinePointerInfo(), false, false, 0); 3828 MemOpChains.push_back(Store); 3829 3830 // Float varargs are always shadowed in available integer registers 3831 if (GPR_idx != NumGPRs) { 3832 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 3833 MachinePointerInfo(), false, false, 3834 false, 0); 3835 MemOpChains.push_back(Load.getValue(1)); 3836 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3837 } 3838 } else if (GPR_idx != NumGPRs) 3839 // If we have any FPRs remaining, we may also have GPRs remaining. 3840 ++GPR_idx; 3841 } else { 3842 // Single-precision floating-point values are mapped to the 3843 // second (rightmost) word of the stack doubleword. 3844 if (Arg.getValueType() == MVT::f32) { 3845 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3846 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3847 } 3848 3849 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3850 true, isTailCall, false, MemOpChains, 3851 TailCallArguments, dl); 3852 } 3853 ArgOffset += 8; 3854 break; 3855 case MVT::v4f32: 3856 case MVT::v4i32: 3857 case MVT::v8i16: 3858 case MVT::v16i8: 3859 if (isVarArg) { 3860 // These go aligned on the stack, or in the corresponding R registers 3861 // when within range. The Darwin PPC ABI doc claims they also go in 3862 // V registers; in fact gcc does this only for arguments that are 3863 // prototyped, not for those that match the ... We do it for all 3864 // arguments, seems to work. 3865 while (ArgOffset % 16 !=0) { 3866 ArgOffset += PtrByteSize; 3867 if (GPR_idx != NumGPRs) 3868 GPR_idx++; 3869 } 3870 // We could elide this store in the case where the object fits 3871 // entirely in R registers. Maybe later. 3872 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3873 DAG.getConstant(ArgOffset, PtrVT)); 3874 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 3875 MachinePointerInfo(), false, false, 0); 3876 MemOpChains.push_back(Store); 3877 if (VR_idx != NumVRs) { 3878 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 3879 MachinePointerInfo(), 3880 false, false, false, 0); 3881 MemOpChains.push_back(Load.getValue(1)); 3882 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 3883 } 3884 ArgOffset += 16; 3885 for (unsigned i=0; i<16; i+=PtrByteSize) { 3886 if (GPR_idx == NumGPRs) 3887 break; 3888 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 3889 DAG.getConstant(i, PtrVT)); 3890 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 3891 false, false, false, 0); 3892 MemOpChains.push_back(Load.getValue(1)); 3893 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3894 } 3895 break; 3896 } 3897 3898 // Non-varargs Altivec params generally go in registers, but have 3899 // stack space allocated at the end. 3900 if (VR_idx != NumVRs) { 3901 // Doesn't have GPR space allocated. 3902 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 3903 } else { 3904 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3905 true, isTailCall, true, MemOpChains, 3906 TailCallArguments, dl); 3907 ArgOffset += 16; 3908 } 3909 break; 3910 } 3911 } 3912 3913 if (!MemOpChains.empty()) 3914 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3915 &MemOpChains[0], MemOpChains.size()); 3916 3917 // Check if this is an indirect call (MTCTR/BCTRL). 3918 // See PrepareCall() for more information about calls through function 3919 // pointers in the 64-bit SVR4 ABI. 3920 if (!isTailCall && 3921 !dyn_cast<GlobalAddressSDNode>(Callee) && 3922 !dyn_cast<ExternalSymbolSDNode>(Callee) && 3923 !isBLACompatibleAddress(Callee, DAG)) { 3924 // Load r2 into a virtual register and store it to the TOC save area. 3925 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 3926 // TOC save area offset. 3927 SDValue PtrOff = DAG.getIntPtrConstant(40); 3928 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3929 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), 3930 false, false, 0); 3931 // R12 must contain the address of an indirect callee. This does not 3932 // mean the MTCTR instruction must use R12; it's easier to model this 3933 // as an extra parameter, so do that. 3934 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 3935 } 3936 3937 // Build a sequence of copy-to-reg nodes chained together with token chain 3938 // and flag operands which copy the outgoing args into the appropriate regs. 3939 SDValue InFlag; 3940 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3941 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3942 RegsToPass[i].second, InFlag); 3943 InFlag = Chain.getValue(1); 3944 } 3945 3946 if (isTailCall) 3947 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 3948 FPOp, true, TailCallArguments); 3949 3950 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3951 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3952 Ins, InVals); 3953 } 3954 3955 SDValue 3956 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 3957 CallingConv::ID CallConv, bool isVarArg, 3958 bool isTailCall, 3959 const SmallVectorImpl<ISD::OutputArg> &Outs, 3960 const SmallVectorImpl<SDValue> &OutVals, 3961 const SmallVectorImpl<ISD::InputArg> &Ins, 3962 DebugLoc dl, SelectionDAG &DAG, 3963 SmallVectorImpl<SDValue> &InVals) const { 3964 3965 unsigned NumOps = Outs.size(); 3966 3967 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3968 bool isPPC64 = PtrVT == MVT::i64; 3969 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3970 3971 MachineFunction &MF = DAG.getMachineFunction(); 3972 3973 // Mark this function as potentially containing a function that contains a 3974 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3975 // and restoring the callers stack pointer in this functions epilog. This is 3976 // done because by tail calling the called function might overwrite the value 3977 // in this function's (MF) stack pointer stack slot 0(SP). 3978 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3979 CallConv == CallingConv::Fast) 3980 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3981 3982 unsigned nAltivecParamsAtEnd = 0; 3983 3984 // Count how many bytes are to be pushed on the stack, including the linkage 3985 // area, and parameter passing area. We start with 24/48 bytes, which is 3986 // prereserved space for [SP][CR][LR][3 x unused]. 3987 unsigned NumBytes = 3988 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv, 3989 Outs, OutVals, 3990 nAltivecParamsAtEnd); 3991 3992 // Calculate by how many bytes the stack has to be adjusted in case of tail 3993 // call optimization. 3994 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3995 3996 // To protect arguments on the stack from being clobbered in a tail call, 3997 // force all the loads to happen before doing any other lowering. 3998 if (isTailCall) 3999 Chain = DAG.getStackArgumentTokenFactor(Chain); 4000 4001 // Adjust the stack pointer for the new arguments... 4002 // These operations are automatically eliminated by the prolog/epilog pass 4003 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 4004 SDValue CallSeqStart = Chain; 4005 4006 // Load the return address and frame pointer so it can be move somewhere else 4007 // later. 4008 SDValue LROp, FPOp; 4009 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4010 dl); 4011 4012 // Set up a copy of the stack pointer for use loading and storing any 4013 // arguments that may not fit in the registers available for argument 4014 // passing. 4015 SDValue StackPtr; 4016 if (isPPC64) 4017 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4018 else 4019 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4020 4021 // Figure out which arguments are going to go in registers, and which in 4022 // memory. Also, if this is a vararg function, floating point operations 4023 // must be stored to our stack, and loaded into integer regs as well, if 4024 // any integer regs are available for argument passing. 4025 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 4026 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4027 4028 static const uint16_t GPR_32[] = { // 32-bit registers. 4029 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4030 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4031 }; 4032 static const uint16_t GPR_64[] = { // 64-bit registers. 4033 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4034 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4035 }; 4036 static const uint16_t *FPR = GetFPR(); 4037 4038 static const uint16_t VR[] = { 4039 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4040 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4041 }; 4042 const unsigned NumGPRs = array_lengthof(GPR_32); 4043 const unsigned NumFPRs = 13; 4044 const unsigned NumVRs = array_lengthof(VR); 4045 4046 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 4047 4048 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4049 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4050 4051 SmallVector<SDValue, 8> MemOpChains; 4052 for (unsigned i = 0; i != NumOps; ++i) { 4053 SDValue Arg = OutVals[i]; 4054 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4055 4056 // PtrOff will be used to store the current argument to the stack if a 4057 // register cannot be found for it. 4058 SDValue PtrOff; 4059 4060 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 4061 4062 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4063 4064 // On PPC64, promote integers to 64-bit values. 4065 if (isPPC64 && Arg.getValueType() == MVT::i32) { 4066 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4067 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4068 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4069 } 4070 4071 // FIXME memcpy is used way more than necessary. Correctness first. 4072 // Note: "by value" is code for passing a structure by value, not 4073 // basic types. 4074 if (Flags.isByVal()) { 4075 unsigned Size = Flags.getByValSize(); 4076 // Very small objects are passed right-justified. Everything else is 4077 // passed left-justified. 4078 if (Size==1 || Size==2) { 4079 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 4080 if (GPR_idx != NumGPRs) { 4081 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4082 MachinePointerInfo(), VT, 4083 false, false, 0); 4084 MemOpChains.push_back(Load.getValue(1)); 4085 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4086 4087 ArgOffset += PtrByteSize; 4088 } else { 4089 SDValue Const = DAG.getConstant(PtrByteSize - Size, 4090 PtrOff.getValueType()); 4091 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4092 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4093 CallSeqStart, 4094 Flags, DAG, dl); 4095 ArgOffset += PtrByteSize; 4096 } 4097 continue; 4098 } 4099 // Copy entire object into memory. There are cases where gcc-generated 4100 // code assumes it is there, even if it could be put entirely into 4101 // registers. (This is not what the doc says.) 4102 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4103 CallSeqStart, 4104 Flags, DAG, dl); 4105 4106 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 4107 // copy the pieces of the object that fit into registers from the 4108 // parameter save area. 4109 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4110 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 4111 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4112 if (GPR_idx != NumGPRs) { 4113 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4114 MachinePointerInfo(), 4115 false, false, false, 0); 4116 MemOpChains.push_back(Load.getValue(1)); 4117 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4118 ArgOffset += PtrByteSize; 4119 } else { 4120 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4121 break; 4122 } 4123 } 4124 continue; 4125 } 4126 4127 switch (Arg.getValueType().getSimpleVT().SimpleTy) { 4128 default: llvm_unreachable("Unexpected ValueType for argument!"); 4129 case MVT::i32: 4130 case MVT::i64: 4131 if (GPR_idx != NumGPRs) { 4132 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4133 } else { 4134 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4135 isPPC64, isTailCall, false, MemOpChains, 4136 TailCallArguments, dl); 4137 } 4138 ArgOffset += PtrByteSize; 4139 break; 4140 case MVT::f32: 4141 case MVT::f64: 4142 if (FPR_idx != NumFPRs) { 4143 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4144 4145 if (isVarArg) { 4146 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4147 MachinePointerInfo(), false, false, 0); 4148 MemOpChains.push_back(Store); 4149 4150 // Float varargs are always shadowed in available integer registers 4151 if (GPR_idx != NumGPRs) { 4152 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4153 MachinePointerInfo(), false, false, 4154 false, 0); 4155 MemOpChains.push_back(Load.getValue(1)); 4156 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4157 } 4158 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 4159 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4160 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4161 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4162 MachinePointerInfo(), 4163 false, false, false, 0); 4164 MemOpChains.push_back(Load.getValue(1)); 4165 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4166 } 4167 } else { 4168 // If we have any FPRs remaining, we may also have GPRs remaining. 4169 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 4170 // GPRs. 4171 if (GPR_idx != NumGPRs) 4172 ++GPR_idx; 4173 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 4174 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 4175 ++GPR_idx; 4176 } 4177 } else 4178 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4179 isPPC64, isTailCall, false, MemOpChains, 4180 TailCallArguments, dl); 4181 if (isPPC64) 4182 ArgOffset += 8; 4183 else 4184 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 4185 break; 4186 case MVT::v4f32: 4187 case MVT::v4i32: 4188 case MVT::v8i16: 4189 case MVT::v16i8: 4190 if (isVarArg) { 4191 // These go aligned on the stack, or in the corresponding R registers 4192 // when within range. The Darwin PPC ABI doc claims they also go in 4193 // V registers; in fact gcc does this only for arguments that are 4194 // prototyped, not for those that match the ... We do it for all 4195 // arguments, seems to work. 4196 while (ArgOffset % 16 !=0) { 4197 ArgOffset += PtrByteSize; 4198 if (GPR_idx != NumGPRs) 4199 GPR_idx++; 4200 } 4201 // We could elide this store in the case where the object fits 4202 // entirely in R registers. Maybe later. 4203 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4204 DAG.getConstant(ArgOffset, PtrVT)); 4205 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4206 MachinePointerInfo(), false, false, 0); 4207 MemOpChains.push_back(Store); 4208 if (VR_idx != NumVRs) { 4209 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4210 MachinePointerInfo(), 4211 false, false, false, 0); 4212 MemOpChains.push_back(Load.getValue(1)); 4213 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4214 } 4215 ArgOffset += 16; 4216 for (unsigned i=0; i<16; i+=PtrByteSize) { 4217 if (GPR_idx == NumGPRs) 4218 break; 4219 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4220 DAG.getConstant(i, PtrVT)); 4221 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4222 false, false, false, 0); 4223 MemOpChains.push_back(Load.getValue(1)); 4224 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4225 } 4226 break; 4227 } 4228 4229 // Non-varargs Altivec params generally go in registers, but have 4230 // stack space allocated at the end. 4231 if (VR_idx != NumVRs) { 4232 // Doesn't have GPR space allocated. 4233 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4234 } else if (nAltivecParamsAtEnd==0) { 4235 // We are emitting Altivec params in order. 4236 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4237 isPPC64, isTailCall, true, MemOpChains, 4238 TailCallArguments, dl); 4239 ArgOffset += 16; 4240 } 4241 break; 4242 } 4243 } 4244 // If all Altivec parameters fit in registers, as they usually do, 4245 // they get stack space following the non-Altivec parameters. We 4246 // don't track this here because nobody below needs it. 4247 // If there are more Altivec parameters than fit in registers emit 4248 // the stores here. 4249 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 4250 unsigned j = 0; 4251 // Offset is aligned; skip 1st 12 params which go in V registers. 4252 ArgOffset = ((ArgOffset+15)/16)*16; 4253 ArgOffset += 12*16; 4254 for (unsigned i = 0; i != NumOps; ++i) { 4255 SDValue Arg = OutVals[i]; 4256 EVT ArgType = Outs[i].VT; 4257 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 4258 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 4259 if (++j > NumVRs) { 4260 SDValue PtrOff; 4261 // We are emitting Altivec params in order. 4262 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4263 isPPC64, isTailCall, true, MemOpChains, 4264 TailCallArguments, dl); 4265 ArgOffset += 16; 4266 } 4267 } 4268 } 4269 } 4270 4271 if (!MemOpChains.empty()) 4272 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4273 &MemOpChains[0], MemOpChains.size()); 4274 4275 // On Darwin, R12 must contain the address of an indirect callee. This does 4276 // not mean the MTCTR instruction must use R12; it's easier to model this as 4277 // an extra parameter, so do that. 4278 if (!isTailCall && 4279 !dyn_cast<GlobalAddressSDNode>(Callee) && 4280 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4281 !isBLACompatibleAddress(Callee, DAG)) 4282 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 4283 PPC::R12), Callee)); 4284 4285 // Build a sequence of copy-to-reg nodes chained together with token chain 4286 // and flag operands which copy the outgoing args into the appropriate regs. 4287 SDValue InFlag; 4288 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4289 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4290 RegsToPass[i].second, InFlag); 4291 InFlag = Chain.getValue(1); 4292 } 4293 4294 if (isTailCall) 4295 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 4296 FPOp, true, TailCallArguments); 4297 4298 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4299 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4300 Ins, InVals); 4301 } 4302 4303 bool 4304 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 4305 MachineFunction &MF, bool isVarArg, 4306 const SmallVectorImpl<ISD::OutputArg> &Outs, 4307 LLVMContext &Context) const { 4308 SmallVector<CCValAssign, 16> RVLocs; 4309 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 4310 RVLocs, Context); 4311 return CCInfo.CheckReturn(Outs, RetCC_PPC); 4312 } 4313 4314 SDValue 4315 PPCTargetLowering::LowerReturn(SDValue Chain, 4316 CallingConv::ID CallConv, bool isVarArg, 4317 const SmallVectorImpl<ISD::OutputArg> &Outs, 4318 const SmallVectorImpl<SDValue> &OutVals, 4319 DebugLoc dl, SelectionDAG &DAG) const { 4320 4321 SmallVector<CCValAssign, 16> RVLocs; 4322 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4323 getTargetMachine(), RVLocs, *DAG.getContext()); 4324 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 4325 4326 // If this is the first return lowered for this function, add the regs to the 4327 // liveout set for the function. 4328 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 4329 for (unsigned i = 0; i != RVLocs.size(); ++i) 4330 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 4331 } 4332 4333 SDValue Flag; 4334 4335 // Copy the result values into the output registers. 4336 for (unsigned i = 0; i != RVLocs.size(); ++i) { 4337 CCValAssign &VA = RVLocs[i]; 4338 assert(VA.isRegLoc() && "Can only return in registers!"); 4339 4340 SDValue Arg = OutVals[i]; 4341 4342 switch (VA.getLocInfo()) { 4343 default: llvm_unreachable("Unknown loc info!"); 4344 case CCValAssign::Full: break; 4345 case CCValAssign::AExt: 4346 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 4347 break; 4348 case CCValAssign::ZExt: 4349 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 4350 break; 4351 case CCValAssign::SExt: 4352 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 4353 break; 4354 } 4355 4356 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 4357 Flag = Chain.getValue(1); 4358 } 4359 4360 if (Flag.getNode()) 4361 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 4362 else 4363 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain); 4364 } 4365 4366 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 4367 const PPCSubtarget &Subtarget) const { 4368 // When we pop the dynamic allocation we need to restore the SP link. 4369 DebugLoc dl = Op.getDebugLoc(); 4370 4371 // Get the corect type for pointers. 4372 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4373 4374 // Construct the stack pointer operand. 4375 bool isPPC64 = Subtarget.isPPC64(); 4376 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 4377 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 4378 4379 // Get the operands for the STACKRESTORE. 4380 SDValue Chain = Op.getOperand(0); 4381 SDValue SaveSP = Op.getOperand(1); 4382 4383 // Load the old link SP. 4384 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 4385 MachinePointerInfo(), 4386 false, false, false, 0); 4387 4388 // Restore the stack pointer. 4389 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 4390 4391 // Store the old link SP. 4392 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 4393 false, false, 0); 4394 } 4395 4396 4397 4398 SDValue 4399 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 4400 MachineFunction &MF = DAG.getMachineFunction(); 4401 bool isPPC64 = PPCSubTarget.isPPC64(); 4402 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4403 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4404 4405 // Get current frame pointer save index. The users of this index will be 4406 // primarily DYNALLOC instructions. 4407 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4408 int RASI = FI->getReturnAddrSaveIndex(); 4409 4410 // If the frame pointer save index hasn't been defined yet. 4411 if (!RASI) { 4412 // Find out what the fix offset of the frame pointer save area. 4413 int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); 4414 // Allocate the frame index for frame pointer save area. 4415 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true); 4416 // Save the result. 4417 FI->setReturnAddrSaveIndex(RASI); 4418 } 4419 return DAG.getFrameIndex(RASI, PtrVT); 4420 } 4421 4422 SDValue 4423 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 4424 MachineFunction &MF = DAG.getMachineFunction(); 4425 bool isPPC64 = PPCSubTarget.isPPC64(); 4426 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4427 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4428 4429 // Get current frame pointer save index. The users of this index will be 4430 // primarily DYNALLOC instructions. 4431 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4432 int FPSI = FI->getFramePointerSaveIndex(); 4433 4434 // If the frame pointer save index hasn't been defined yet. 4435 if (!FPSI) { 4436 // Find out what the fix offset of the frame pointer save area. 4437 int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, 4438 isDarwinABI); 4439 4440 // Allocate the frame index for frame pointer save area. 4441 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 4442 // Save the result. 4443 FI->setFramePointerSaveIndex(FPSI); 4444 } 4445 return DAG.getFrameIndex(FPSI, PtrVT); 4446 } 4447 4448 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 4449 SelectionDAG &DAG, 4450 const PPCSubtarget &Subtarget) const { 4451 // Get the inputs. 4452 SDValue Chain = Op.getOperand(0); 4453 SDValue Size = Op.getOperand(1); 4454 DebugLoc dl = Op.getDebugLoc(); 4455 4456 // Get the corect type for pointers. 4457 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4458 // Negate the size. 4459 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 4460 DAG.getConstant(0, PtrVT), Size); 4461 // Construct a node for the frame pointer save index. 4462 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 4463 // Build a DYNALLOC node. 4464 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 4465 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 4466 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3); 4467 } 4468 4469 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 4470 /// possible. 4471 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 4472 // Not FP? Not a fsel. 4473 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 4474 !Op.getOperand(2).getValueType().isFloatingPoint()) 4475 return Op; 4476 4477 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4478 4479 // Cannot handle SETEQ/SETNE. 4480 if (CC == ISD::SETEQ || CC == ISD::SETNE) return Op; 4481 4482 EVT ResVT = Op.getValueType(); 4483 EVT CmpVT = Op.getOperand(0).getValueType(); 4484 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4485 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 4486 DebugLoc dl = Op.getDebugLoc(); 4487 4488 // If the RHS of the comparison is a 0.0, we don't need to do the 4489 // subtraction at all. 4490 if (isFloatingPointZero(RHS)) 4491 switch (CC) { 4492 default: break; // SETUO etc aren't handled by fsel. 4493 case ISD::SETULT: 4494 case ISD::SETLT: 4495 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4496 case ISD::SETOGE: 4497 case ISD::SETGE: 4498 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4499 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4500 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 4501 case ISD::SETUGT: 4502 case ISD::SETGT: 4503 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4504 case ISD::SETOLE: 4505 case ISD::SETLE: 4506 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4507 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4508 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4509 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 4510 } 4511 4512 SDValue Cmp; 4513 switch (CC) { 4514 default: break; // SETUO etc aren't handled by fsel. 4515 case ISD::SETULT: 4516 case ISD::SETLT: 4517 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4518 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4519 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4520 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4521 case ISD::SETOGE: 4522 case ISD::SETGE: 4523 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4524 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4525 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4526 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4527 case ISD::SETUGT: 4528 case ISD::SETGT: 4529 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4530 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4531 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4532 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4533 case ISD::SETOLE: 4534 case ISD::SETLE: 4535 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4536 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4537 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4538 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4539 } 4540 return Op; 4541 } 4542 4543 // FIXME: Split this code up when LegalizeDAGTypes lands. 4544 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 4545 DebugLoc dl) const { 4546 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 4547 SDValue Src = Op.getOperand(0); 4548 if (Src.getValueType() == MVT::f32) 4549 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 4550 4551 SDValue Tmp; 4552 switch (Op.getValueType().getSimpleVT().SimpleTy) { 4553 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 4554 case MVT::i32: 4555 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 4556 PPCISD::FCTIDZ, 4557 dl, MVT::f64, Src); 4558 break; 4559 case MVT::i64: 4560 Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src); 4561 break; 4562 } 4563 4564 // Convert the FP value to an int value through memory. 4565 SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64); 4566 4567 // Emit a store to the stack slot. 4568 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 4569 MachinePointerInfo(), false, false, 0); 4570 4571 // Result is a load from the stack slot. If loading 4 bytes, make sure to 4572 // add in a bias. 4573 if (Op.getValueType() == MVT::i32) 4574 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 4575 DAG.getConstant(4, FIPtr.getValueType())); 4576 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MachinePointerInfo(), 4577 false, false, false, 0); 4578 } 4579 4580 SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, 4581 SelectionDAG &DAG) const { 4582 DebugLoc dl = Op.getDebugLoc(); 4583 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 4584 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 4585 return SDValue(); 4586 4587 if (Op.getOperand(0).getValueType() == MVT::i64) { 4588 SDValue SINT = Op.getOperand(0); 4589 // When converting to single-precision, we actually need to convert 4590 // to double-precision first and then round to single-precision. 4591 // To avoid double-rounding effects during that operation, we have 4592 // to prepare the input operand. Bits that might be truncated when 4593 // converting to double-precision are replaced by a bit that won't 4594 // be lost at this stage, but is below the single-precision rounding 4595 // position. 4596 // 4597 // However, if -enable-unsafe-fp-math is in effect, accept double 4598 // rounding to avoid the extra overhead. 4599 if (Op.getValueType() == MVT::f32 && 4600 !DAG.getTarget().Options.UnsafeFPMath) { 4601 4602 // Twiddle input to make sure the low 11 bits are zero. (If this 4603 // is the case, we are guaranteed the value will fit into the 53 bit 4604 // mantissa of an IEEE double-precision value without rounding.) 4605 // If any of those low 11 bits were not zero originally, make sure 4606 // bit 12 (value 2048) is set instead, so that the final rounding 4607 // to single-precision gets the correct result. 4608 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4609 SINT, DAG.getConstant(2047, MVT::i64)); 4610 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 4611 Round, DAG.getConstant(2047, MVT::i64)); 4612 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 4613 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4614 Round, DAG.getConstant(-2048, MVT::i64)); 4615 4616 // However, we cannot use that value unconditionally: if the magnitude 4617 // of the input value is small, the bit-twiddling we did above might 4618 // end up visibly changing the output. Fortunately, in that case, we 4619 // don't need to twiddle bits since the original input will convert 4620 // exactly to double-precision floating-point already. Therefore, 4621 // construct a conditional to use the original value if the top 11 4622 // bits are all sign-bit copies, and use the rounded value computed 4623 // above otherwise. 4624 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 4625 SINT, DAG.getConstant(53, MVT::i32)); 4626 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 4627 Cond, DAG.getConstant(1, MVT::i64)); 4628 Cond = DAG.getSetCC(dl, MVT::i32, 4629 Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT); 4630 4631 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 4632 } 4633 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 4634 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits); 4635 if (Op.getValueType() == MVT::f32) 4636 FP = DAG.getNode(ISD::FP_ROUND, dl, 4637 MVT::f32, FP, DAG.getIntPtrConstant(0)); 4638 return FP; 4639 } 4640 4641 assert(Op.getOperand(0).getValueType() == MVT::i32 && 4642 "Unhandled SINT_TO_FP type in custom expander!"); 4643 // Since we only generate this in 64-bit mode, we can take advantage of 4644 // 64-bit registers. In particular, sign extend the input value into the 4645 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 4646 // then lfd it and fcfid it. 4647 MachineFunction &MF = DAG.getMachineFunction(); 4648 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 4649 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 4650 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4651 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4652 4653 SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32, 4654 Op.getOperand(0)); 4655 4656 // STD the extended value into the stack slot. 4657 MachineMemOperand *MMO = 4658 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), 4659 MachineMemOperand::MOStore, 8, 8); 4660 SDValue Ops[] = { DAG.getEntryNode(), Ext64, FIdx }; 4661 SDValue Store = 4662 DAG.getMemIntrinsicNode(PPCISD::STD_32, dl, DAG.getVTList(MVT::Other), 4663 Ops, 4, MVT::i64, MMO); 4664 // Load the value as a double. 4665 SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, MachinePointerInfo(), 4666 false, false, false, 0); 4667 4668 // FCFID it and return it. 4669 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld); 4670 if (Op.getValueType() == MVT::f32) 4671 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 4672 return FP; 4673 } 4674 4675 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 4676 SelectionDAG &DAG) const { 4677 DebugLoc dl = Op.getDebugLoc(); 4678 /* 4679 The rounding mode is in bits 30:31 of FPSR, and has the following 4680 settings: 4681 00 Round to nearest 4682 01 Round to 0 4683 10 Round to +inf 4684 11 Round to -inf 4685 4686 FLT_ROUNDS, on the other hand, expects the following: 4687 -1 Undefined 4688 0 Round to 0 4689 1 Round to nearest 4690 2 Round to +inf 4691 3 Round to -inf 4692 4693 To perform the conversion, we do: 4694 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 4695 */ 4696 4697 MachineFunction &MF = DAG.getMachineFunction(); 4698 EVT VT = Op.getValueType(); 4699 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4700 std::vector<EVT> NodeTys; 4701 SDValue MFFSreg, InFlag; 4702 4703 // Save FP Control Word to register 4704 NodeTys.push_back(MVT::f64); // return register 4705 NodeTys.push_back(MVT::Glue); // unused in this context 4706 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 4707 4708 // Save FP register to stack slot 4709 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 4710 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 4711 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 4712 StackSlot, MachinePointerInfo(), false, false,0); 4713 4714 // Load FP Control Word from low 32 bits of stack slot. 4715 SDValue Four = DAG.getConstant(4, PtrVT); 4716 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 4717 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 4718 false, false, false, 0); 4719 4720 // Transform as necessary 4721 SDValue CWD1 = 4722 DAG.getNode(ISD::AND, dl, MVT::i32, 4723 CWD, DAG.getConstant(3, MVT::i32)); 4724 SDValue CWD2 = 4725 DAG.getNode(ISD::SRL, dl, MVT::i32, 4726 DAG.getNode(ISD::AND, dl, MVT::i32, 4727 DAG.getNode(ISD::XOR, dl, MVT::i32, 4728 CWD, DAG.getConstant(3, MVT::i32)), 4729 DAG.getConstant(3, MVT::i32)), 4730 DAG.getConstant(1, MVT::i32)); 4731 4732 SDValue RetVal = 4733 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 4734 4735 return DAG.getNode((VT.getSizeInBits() < 16 ? 4736 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 4737 } 4738 4739 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 4740 EVT VT = Op.getValueType(); 4741 unsigned BitWidth = VT.getSizeInBits(); 4742 DebugLoc dl = Op.getDebugLoc(); 4743 assert(Op.getNumOperands() == 3 && 4744 VT == Op.getOperand(1).getValueType() && 4745 "Unexpected SHL!"); 4746 4747 // Expand into a bunch of logical ops. Note that these ops 4748 // depend on the PPC behavior for oversized shift amounts. 4749 SDValue Lo = Op.getOperand(0); 4750 SDValue Hi = Op.getOperand(1); 4751 SDValue Amt = Op.getOperand(2); 4752 EVT AmtVT = Amt.getValueType(); 4753 4754 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 4755 DAG.getConstant(BitWidth, AmtVT), Amt); 4756 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 4757 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 4758 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 4759 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 4760 DAG.getConstant(-BitWidth, AmtVT)); 4761 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 4762 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 4763 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 4764 SDValue OutOps[] = { OutLo, OutHi }; 4765 return DAG.getMergeValues(OutOps, 2, dl); 4766 } 4767 4768 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 4769 EVT VT = Op.getValueType(); 4770 DebugLoc dl = Op.getDebugLoc(); 4771 unsigned BitWidth = VT.getSizeInBits(); 4772 assert(Op.getNumOperands() == 3 && 4773 VT == Op.getOperand(1).getValueType() && 4774 "Unexpected SRL!"); 4775 4776 // Expand into a bunch of logical ops. Note that these ops 4777 // depend on the PPC behavior for oversized shift amounts. 4778 SDValue Lo = Op.getOperand(0); 4779 SDValue Hi = Op.getOperand(1); 4780 SDValue Amt = Op.getOperand(2); 4781 EVT AmtVT = Amt.getValueType(); 4782 4783 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 4784 DAG.getConstant(BitWidth, AmtVT), Amt); 4785 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 4786 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 4787 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 4788 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 4789 DAG.getConstant(-BitWidth, AmtVT)); 4790 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 4791 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 4792 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 4793 SDValue OutOps[] = { OutLo, OutHi }; 4794 return DAG.getMergeValues(OutOps, 2, dl); 4795 } 4796 4797 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 4798 DebugLoc dl = Op.getDebugLoc(); 4799 EVT VT = Op.getValueType(); 4800 unsigned BitWidth = VT.getSizeInBits(); 4801 assert(Op.getNumOperands() == 3 && 4802 VT == Op.getOperand(1).getValueType() && 4803 "Unexpected SRA!"); 4804 4805 // Expand into a bunch of logical ops, followed by a select_cc. 4806 SDValue Lo = Op.getOperand(0); 4807 SDValue Hi = Op.getOperand(1); 4808 SDValue Amt = Op.getOperand(2); 4809 EVT AmtVT = Amt.getValueType(); 4810 4811 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 4812 DAG.getConstant(BitWidth, AmtVT), Amt); 4813 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 4814 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 4815 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 4816 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 4817 DAG.getConstant(-BitWidth, AmtVT)); 4818 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 4819 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 4820 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 4821 Tmp4, Tmp6, ISD::SETLE); 4822 SDValue OutOps[] = { OutLo, OutHi }; 4823 return DAG.getMergeValues(OutOps, 2, dl); 4824 } 4825 4826 //===----------------------------------------------------------------------===// 4827 // Vector related lowering. 4828 // 4829 4830 /// BuildSplatI - Build a canonical splati of Val with an element size of 4831 /// SplatSize. Cast the result to VT. 4832 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 4833 SelectionDAG &DAG, DebugLoc dl) { 4834 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 4835 4836 static const EVT VTys[] = { // canonical VT to use for each size. 4837 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 4838 }; 4839 4840 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 4841 4842 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 4843 if (Val == -1) 4844 SplatSize = 1; 4845 4846 EVT CanonicalVT = VTys[SplatSize-1]; 4847 4848 // Build a canonical splat for this value. 4849 SDValue Elt = DAG.getConstant(Val, MVT::i32); 4850 SmallVector<SDValue, 8> Ops; 4851 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 4852 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, 4853 &Ops[0], Ops.size()); 4854 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 4855 } 4856 4857 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 4858 /// specified intrinsic ID. 4859 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 4860 SelectionDAG &DAG, DebugLoc dl, 4861 EVT DestVT = MVT::Other) { 4862 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 4863 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 4864 DAG.getConstant(IID, MVT::i32), LHS, RHS); 4865 } 4866 4867 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 4868 /// specified intrinsic ID. 4869 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 4870 SDValue Op2, SelectionDAG &DAG, 4871 DebugLoc dl, EVT DestVT = MVT::Other) { 4872 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 4873 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 4874 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 4875 } 4876 4877 4878 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 4879 /// amount. The result has the specified value type. 4880 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 4881 EVT VT, SelectionDAG &DAG, DebugLoc dl) { 4882 // Force LHS/RHS to be the right type. 4883 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 4884 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 4885 4886 int Ops[16]; 4887 for (unsigned i = 0; i != 16; ++i) 4888 Ops[i] = i + Amt; 4889 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 4890 return DAG.getNode(ISD::BITCAST, dl, VT, T); 4891 } 4892 4893 // If this is a case we can't handle, return null and let the default 4894 // expansion code take care of it. If we CAN select this case, and if it 4895 // selects to a single instruction, return Op. Otherwise, if we can codegen 4896 // this case more efficiently than a constant pool load, lower it to the 4897 // sequence of ops that should be used. 4898 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 4899 SelectionDAG &DAG) const { 4900 DebugLoc dl = Op.getDebugLoc(); 4901 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 4902 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 4903 4904 // Check if this is a splat of a constant value. 4905 APInt APSplatBits, APSplatUndef; 4906 unsigned SplatBitSize; 4907 bool HasAnyUndefs; 4908 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 4909 HasAnyUndefs, 0, true) || SplatBitSize > 32) 4910 return SDValue(); 4911 4912 unsigned SplatBits = APSplatBits.getZExtValue(); 4913 unsigned SplatUndef = APSplatUndef.getZExtValue(); 4914 unsigned SplatSize = SplatBitSize / 8; 4915 4916 // First, handle single instruction cases. 4917 4918 // All zeros? 4919 if (SplatBits == 0) { 4920 // Canonicalize all zero vectors to be v4i32. 4921 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 4922 SDValue Z = DAG.getConstant(0, MVT::i32); 4923 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 4924 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 4925 } 4926 return Op; 4927 } 4928 4929 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 4930 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 4931 (32-SplatBitSize)); 4932 if (SextVal >= -16 && SextVal <= 15) 4933 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 4934 4935 4936 // Two instruction sequences. 4937 4938 // If this value is in the range [-32,30] and is even, use: 4939 // tmp = VSPLTI[bhw], result = add tmp, tmp 4940 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 4941 SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl); 4942 Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res); 4943 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 4944 } 4945 4946 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 4947 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 4948 // for fneg/fabs. 4949 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 4950 // Make -1 and vspltisw -1: 4951 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 4952 4953 // Make the VSLW intrinsic, computing 0x8000_0000. 4954 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 4955 OnesV, DAG, dl); 4956 4957 // xor by OnesV to invert it. 4958 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 4959 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 4960 } 4961 4962 // Check to see if this is a wide variety of vsplti*, binop self cases. 4963 static const signed char SplatCsts[] = { 4964 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 4965 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 4966 }; 4967 4968 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 4969 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 4970 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 4971 int i = SplatCsts[idx]; 4972 4973 // Figure out what shift amount will be used by altivec if shifted by i in 4974 // this splat size. 4975 unsigned TypeShiftAmt = i & (SplatBitSize-1); 4976 4977 // vsplti + shl self. 4978 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 4979 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 4980 static const unsigned IIDs[] = { // Intrinsic to use for each size. 4981 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 4982 Intrinsic::ppc_altivec_vslw 4983 }; 4984 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 4985 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 4986 } 4987 4988 // vsplti + srl self. 4989 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 4990 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 4991 static const unsigned IIDs[] = { // Intrinsic to use for each size. 4992 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 4993 Intrinsic::ppc_altivec_vsrw 4994 }; 4995 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 4996 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 4997 } 4998 4999 // vsplti + sra self. 5000 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5001 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5002 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5003 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 5004 Intrinsic::ppc_altivec_vsraw 5005 }; 5006 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5007 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5008 } 5009 5010 // vsplti + rol self. 5011 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 5012 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 5013 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5014 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5015 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 5016 Intrinsic::ppc_altivec_vrlw 5017 }; 5018 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5019 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5020 } 5021 5022 // t = vsplti c, result = vsldoi t, t, 1 5023 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 5024 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5025 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 5026 } 5027 // t = vsplti c, result = vsldoi t, t, 2 5028 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 5029 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5030 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 5031 } 5032 // t = vsplti c, result = vsldoi t, t, 3 5033 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 5034 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5035 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 5036 } 5037 } 5038 5039 // Three instruction sequences. 5040 5041 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 5042 if (SextVal >= 0 && SextVal <= 31) { 5043 SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl); 5044 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); 5045 LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS); 5046 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS); 5047 } 5048 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 5049 if (SextVal >= -31 && SextVal <= 0) { 5050 SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl); 5051 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); 5052 LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS); 5053 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS); 5054 } 5055 5056 return SDValue(); 5057 } 5058 5059 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5060 /// the specified operations to build the shuffle. 5061 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5062 SDValue RHS, SelectionDAG &DAG, 5063 DebugLoc dl) { 5064 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5065 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5066 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5067 5068 enum { 5069 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 5070 OP_VMRGHW, 5071 OP_VMRGLW, 5072 OP_VSPLTISW0, 5073 OP_VSPLTISW1, 5074 OP_VSPLTISW2, 5075 OP_VSPLTISW3, 5076 OP_VSLDOI4, 5077 OP_VSLDOI8, 5078 OP_VSLDOI12 5079 }; 5080 5081 if (OpNum == OP_COPY) { 5082 if (LHSID == (1*9+2)*9+3) return LHS; 5083 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 5084 return RHS; 5085 } 5086 5087 SDValue OpLHS, OpRHS; 5088 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 5089 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 5090 5091 int ShufIdxs[16]; 5092 switch (OpNum) { 5093 default: llvm_unreachable("Unknown i32 permute!"); 5094 case OP_VMRGHW: 5095 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 5096 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 5097 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 5098 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 5099 break; 5100 case OP_VMRGLW: 5101 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 5102 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 5103 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 5104 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 5105 break; 5106 case OP_VSPLTISW0: 5107 for (unsigned i = 0; i != 16; ++i) 5108 ShufIdxs[i] = (i&3)+0; 5109 break; 5110 case OP_VSPLTISW1: 5111 for (unsigned i = 0; i != 16; ++i) 5112 ShufIdxs[i] = (i&3)+4; 5113 break; 5114 case OP_VSPLTISW2: 5115 for (unsigned i = 0; i != 16; ++i) 5116 ShufIdxs[i] = (i&3)+8; 5117 break; 5118 case OP_VSPLTISW3: 5119 for (unsigned i = 0; i != 16; ++i) 5120 ShufIdxs[i] = (i&3)+12; 5121 break; 5122 case OP_VSLDOI4: 5123 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 5124 case OP_VSLDOI8: 5125 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 5126 case OP_VSLDOI12: 5127 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 5128 } 5129 EVT VT = OpLHS.getValueType(); 5130 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 5131 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 5132 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 5133 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5134 } 5135 5136 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 5137 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 5138 /// return the code it can be lowered into. Worst case, it can always be 5139 /// lowered into a vperm. 5140 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 5141 SelectionDAG &DAG) const { 5142 DebugLoc dl = Op.getDebugLoc(); 5143 SDValue V1 = Op.getOperand(0); 5144 SDValue V2 = Op.getOperand(1); 5145 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5146 EVT VT = Op.getValueType(); 5147 5148 // Cases that are handled by instructions that take permute immediates 5149 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 5150 // selected by the instruction selector. 5151 if (V2.getOpcode() == ISD::UNDEF) { 5152 if (PPC::isSplatShuffleMask(SVOp, 1) || 5153 PPC::isSplatShuffleMask(SVOp, 2) || 5154 PPC::isSplatShuffleMask(SVOp, 4) || 5155 PPC::isVPKUWUMShuffleMask(SVOp, true) || 5156 PPC::isVPKUHUMShuffleMask(SVOp, true) || 5157 PPC::isVSLDOIShuffleMask(SVOp, true) != -1 || 5158 PPC::isVMRGLShuffleMask(SVOp, 1, true) || 5159 PPC::isVMRGLShuffleMask(SVOp, 2, true) || 5160 PPC::isVMRGLShuffleMask(SVOp, 4, true) || 5161 PPC::isVMRGHShuffleMask(SVOp, 1, true) || 5162 PPC::isVMRGHShuffleMask(SVOp, 2, true) || 5163 PPC::isVMRGHShuffleMask(SVOp, 4, true)) { 5164 return Op; 5165 } 5166 } 5167 5168 // Altivec has a variety of "shuffle immediates" that take two vector inputs 5169 // and produce a fixed permutation. If any of these match, do not lower to 5170 // VPERM. 5171 if (PPC::isVPKUWUMShuffleMask(SVOp, false) || 5172 PPC::isVPKUHUMShuffleMask(SVOp, false) || 5173 PPC::isVSLDOIShuffleMask(SVOp, false) != -1 || 5174 PPC::isVMRGLShuffleMask(SVOp, 1, false) || 5175 PPC::isVMRGLShuffleMask(SVOp, 2, false) || 5176 PPC::isVMRGLShuffleMask(SVOp, 4, false) || 5177 PPC::isVMRGHShuffleMask(SVOp, 1, false) || 5178 PPC::isVMRGHShuffleMask(SVOp, 2, false) || 5179 PPC::isVMRGHShuffleMask(SVOp, 4, false)) 5180 return Op; 5181 5182 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 5183 // perfect shuffle table to emit an optimal matching sequence. 5184 ArrayRef<int> PermMask = SVOp->getMask(); 5185 5186 unsigned PFIndexes[4]; 5187 bool isFourElementShuffle = true; 5188 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 5189 unsigned EltNo = 8; // Start out undef. 5190 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 5191 if (PermMask[i*4+j] < 0) 5192 continue; // Undef, ignore it. 5193 5194 unsigned ByteSource = PermMask[i*4+j]; 5195 if ((ByteSource & 3) != j) { 5196 isFourElementShuffle = false; 5197 break; 5198 } 5199 5200 if (EltNo == 8) { 5201 EltNo = ByteSource/4; 5202 } else if (EltNo != ByteSource/4) { 5203 isFourElementShuffle = false; 5204 break; 5205 } 5206 } 5207 PFIndexes[i] = EltNo; 5208 } 5209 5210 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 5211 // perfect shuffle vector to determine if it is cost effective to do this as 5212 // discrete instructions, or whether we should use a vperm. 5213 if (isFourElementShuffle) { 5214 // Compute the index in the perfect shuffle table. 5215 unsigned PFTableIndex = 5216 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5217 5218 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5219 unsigned Cost = (PFEntry >> 30); 5220 5221 // Determining when to avoid vperm is tricky. Many things affect the cost 5222 // of vperm, particularly how many times the perm mask needs to be computed. 5223 // For example, if the perm mask can be hoisted out of a loop or is already 5224 // used (perhaps because there are multiple permutes with the same shuffle 5225 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 5226 // the loop requires an extra register. 5227 // 5228 // As a compromise, we only emit discrete instructions if the shuffle can be 5229 // generated in 3 or fewer operations. When we have loop information 5230 // available, if this block is within a loop, we should avoid using vperm 5231 // for 3-operation perms and use a constant pool load instead. 5232 if (Cost < 3) 5233 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 5234 } 5235 5236 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 5237 // vector that will get spilled to the constant pool. 5238 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 5239 5240 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 5241 // that it is in input element units, not in bytes. Convert now. 5242 EVT EltVT = V1.getValueType().getVectorElementType(); 5243 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 5244 5245 SmallVector<SDValue, 16> ResultMask; 5246 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5247 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 5248 5249 for (unsigned j = 0; j != BytesPerElement; ++j) 5250 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 5251 MVT::i32)); 5252 } 5253 5254 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 5255 &ResultMask[0], ResultMask.size()); 5256 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); 5257 } 5258 5259 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 5260 /// altivec comparison. If it is, return true and fill in Opc/isDot with 5261 /// information about the intrinsic. 5262 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 5263 bool &isDot) { 5264 unsigned IntrinsicID = 5265 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 5266 CompareOpc = -1; 5267 isDot = false; 5268 switch (IntrinsicID) { 5269 default: return false; 5270 // Comparison predicates. 5271 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 5272 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 5273 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 5274 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 5275 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 5276 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 5277 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 5278 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 5279 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 5280 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 5281 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 5282 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 5283 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 5284 5285 // Normal Comparisons. 5286 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 5287 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 5288 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 5289 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 5290 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 5291 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 5292 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 5293 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 5294 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 5295 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 5296 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 5297 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 5298 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 5299 } 5300 return true; 5301 } 5302 5303 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 5304 /// lower, do it, otherwise return null. 5305 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 5306 SelectionDAG &DAG) const { 5307 // If this is a lowered altivec predicate compare, CompareOpc is set to the 5308 // opcode number of the comparison. 5309 DebugLoc dl = Op.getDebugLoc(); 5310 int CompareOpc; 5311 bool isDot; 5312 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 5313 return SDValue(); // Don't custom lower most intrinsics. 5314 5315 // If this is a non-dot comparison, make the VCMP node and we are done. 5316 if (!isDot) { 5317 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 5318 Op.getOperand(1), Op.getOperand(2), 5319 DAG.getConstant(CompareOpc, MVT::i32)); 5320 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 5321 } 5322 5323 // Create the PPCISD altivec 'dot' comparison node. 5324 SDValue Ops[] = { 5325 Op.getOperand(2), // LHS 5326 Op.getOperand(3), // RHS 5327 DAG.getConstant(CompareOpc, MVT::i32) 5328 }; 5329 std::vector<EVT> VTs; 5330 VTs.push_back(Op.getOperand(2).getValueType()); 5331 VTs.push_back(MVT::Glue); 5332 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 5333 5334 // Now that we have the comparison, emit a copy from the CR to a GPR. 5335 // This is flagged to the above dot comparison. 5336 SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32, 5337 DAG.getRegister(PPC::CR6, MVT::i32), 5338 CompNode.getValue(1)); 5339 5340 // Unpack the result based on how the target uses it. 5341 unsigned BitNo; // Bit # of CR6. 5342 bool InvertBit; // Invert result? 5343 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 5344 default: // Can't happen, don't crash on invalid number though. 5345 case 0: // Return the value of the EQ bit of CR6. 5346 BitNo = 0; InvertBit = false; 5347 break; 5348 case 1: // Return the inverted value of the EQ bit of CR6. 5349 BitNo = 0; InvertBit = true; 5350 break; 5351 case 2: // Return the value of the LT bit of CR6. 5352 BitNo = 2; InvertBit = false; 5353 break; 5354 case 3: // Return the inverted value of the LT bit of CR6. 5355 BitNo = 2; InvertBit = true; 5356 break; 5357 } 5358 5359 // Shift the bit into the low position. 5360 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 5361 DAG.getConstant(8-(3-BitNo), MVT::i32)); 5362 // Isolate the bit. 5363 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 5364 DAG.getConstant(1, MVT::i32)); 5365 5366 // If we are supposed to, toggle the bit. 5367 if (InvertBit) 5368 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 5369 DAG.getConstant(1, MVT::i32)); 5370 return Flags; 5371 } 5372 5373 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 5374 SelectionDAG &DAG) const { 5375 DebugLoc dl = Op.getDebugLoc(); 5376 // Create a stack slot that is 16-byte aligned. 5377 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 5378 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 5379 EVT PtrVT = getPointerTy(); 5380 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 5381 5382 // Store the input value into Value#0 of the stack slot. 5383 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 5384 Op.getOperand(0), FIdx, MachinePointerInfo(), 5385 false, false, 0); 5386 // Load it out. 5387 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 5388 false, false, false, 0); 5389 } 5390 5391 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 5392 DebugLoc dl = Op.getDebugLoc(); 5393 if (Op.getValueType() == MVT::v4i32) { 5394 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5395 5396 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 5397 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 5398 5399 SDValue RHSSwap = // = vrlw RHS, 16 5400 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 5401 5402 // Shrinkify inputs to v8i16. 5403 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 5404 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 5405 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 5406 5407 // Low parts multiplied together, generating 32-bit results (we ignore the 5408 // top parts). 5409 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 5410 LHS, RHS, DAG, dl, MVT::v4i32); 5411 5412 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 5413 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 5414 // Shift the high parts up 16 bits. 5415 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 5416 Neg16, DAG, dl); 5417 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 5418 } else if (Op.getValueType() == MVT::v8i16) { 5419 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5420 5421 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 5422 5423 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 5424 LHS, RHS, Zero, DAG, dl); 5425 } else if (Op.getValueType() == MVT::v16i8) { 5426 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5427 5428 // Multiply the even 8-bit parts, producing 16-bit sums. 5429 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 5430 LHS, RHS, DAG, dl, MVT::v8i16); 5431 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 5432 5433 // Multiply the odd 8-bit parts, producing 16-bit sums. 5434 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 5435 LHS, RHS, DAG, dl, MVT::v8i16); 5436 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 5437 5438 // Merge the results together. 5439 int Ops[16]; 5440 for (unsigned i = 0; i != 8; ++i) { 5441 Ops[i*2 ] = 2*i+1; 5442 Ops[i*2+1] = 2*i+1+16; 5443 } 5444 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 5445 } else { 5446 llvm_unreachable("Unknown mul to lower!"); 5447 } 5448 } 5449 5450 /// LowerOperation - Provide custom lowering hooks for some operations. 5451 /// 5452 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 5453 switch (Op.getOpcode()) { 5454 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 5455 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5456 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 5457 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5458 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5459 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5460 case ISD::SETCC: return LowerSETCC(Op, DAG); 5461 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 5462 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 5463 case ISD::VASTART: 5464 return LowerVASTART(Op, DAG, PPCSubTarget); 5465 5466 case ISD::VAARG: 5467 return LowerVAARG(Op, DAG, PPCSubTarget); 5468 5469 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 5470 case ISD::DYNAMIC_STACKALLOC: 5471 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 5472 5473 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 5474 case ISD::FP_TO_UINT: 5475 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 5476 Op.getDebugLoc()); 5477 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5478 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5479 5480 // Lower 64-bit shifts. 5481 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 5482 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 5483 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 5484 5485 // Vector-related lowering. 5486 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5487 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5488 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5489 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5490 case ISD::MUL: return LowerMUL(Op, DAG); 5491 5492 // Frame & Return address. 5493 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5494 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5495 } 5496 } 5497 5498 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 5499 SmallVectorImpl<SDValue>&Results, 5500 SelectionDAG &DAG) const { 5501 const TargetMachine &TM = getTargetMachine(); 5502 DebugLoc dl = N->getDebugLoc(); 5503 switch (N->getOpcode()) { 5504 default: 5505 llvm_unreachable("Do not know how to custom type legalize this operation!"); 5506 case ISD::VAARG: { 5507 if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 5508 || TM.getSubtarget<PPCSubtarget>().isPPC64()) 5509 return; 5510 5511 EVT VT = N->getValueType(0); 5512 5513 if (VT == MVT::i64) { 5514 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, PPCSubTarget); 5515 5516 Results.push_back(NewNode); 5517 Results.push_back(NewNode.getValue(1)); 5518 } 5519 return; 5520 } 5521 case ISD::FP_ROUND_INREG: { 5522 assert(N->getValueType(0) == MVT::ppcf128); 5523 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 5524 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5525 MVT::f64, N->getOperand(0), 5526 DAG.getIntPtrConstant(0)); 5527 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5528 MVT::f64, N->getOperand(0), 5529 DAG.getIntPtrConstant(1)); 5530 5531 // This sequence changes FPSCR to do round-to-zero, adds the two halves 5532 // of the long double, and puts FPSCR back the way it was. We do not 5533 // actually model FPSCR. 5534 std::vector<EVT> NodeTys; 5535 SDValue Ops[4], Result, MFFSreg, InFlag, FPreg; 5536 5537 NodeTys.push_back(MVT::f64); // Return register 5538 NodeTys.push_back(MVT::Glue); // Returns a flag for later insns 5539 Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 5540 MFFSreg = Result.getValue(0); 5541 InFlag = Result.getValue(1); 5542 5543 NodeTys.clear(); 5544 NodeTys.push_back(MVT::Glue); // Returns a flag 5545 Ops[0] = DAG.getConstant(31, MVT::i32); 5546 Ops[1] = InFlag; 5547 Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2); 5548 InFlag = Result.getValue(0); 5549 5550 NodeTys.clear(); 5551 NodeTys.push_back(MVT::Glue); // Returns a flag 5552 Ops[0] = DAG.getConstant(30, MVT::i32); 5553 Ops[1] = InFlag; 5554 Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2); 5555 InFlag = Result.getValue(0); 5556 5557 NodeTys.clear(); 5558 NodeTys.push_back(MVT::f64); // result of add 5559 NodeTys.push_back(MVT::Glue); // Returns a flag 5560 Ops[0] = Lo; 5561 Ops[1] = Hi; 5562 Ops[2] = InFlag; 5563 Result = DAG.getNode(PPCISD::FADDRTZ, dl, NodeTys, Ops, 3); 5564 FPreg = Result.getValue(0); 5565 InFlag = Result.getValue(1); 5566 5567 NodeTys.clear(); 5568 NodeTys.push_back(MVT::f64); 5569 Ops[0] = DAG.getConstant(1, MVT::i32); 5570 Ops[1] = MFFSreg; 5571 Ops[2] = FPreg; 5572 Ops[3] = InFlag; 5573 Result = DAG.getNode(PPCISD::MTFSF, dl, NodeTys, Ops, 4); 5574 FPreg = Result.getValue(0); 5575 5576 // We know the low half is about to be thrown away, so just use something 5577 // convenient. 5578 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 5579 FPreg, FPreg)); 5580 return; 5581 } 5582 case ISD::FP_TO_SINT: 5583 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 5584 return; 5585 } 5586 } 5587 5588 5589 //===----------------------------------------------------------------------===// 5590 // Other Lowering Code 5591 //===----------------------------------------------------------------------===// 5592 5593 MachineBasicBlock * 5594 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5595 bool is64bit, unsigned BinOpcode) const { 5596 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5597 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5598 5599 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5600 MachineFunction *F = BB->getParent(); 5601 MachineFunction::iterator It = BB; 5602 ++It; 5603 5604 unsigned dest = MI->getOperand(0).getReg(); 5605 unsigned ptrA = MI->getOperand(1).getReg(); 5606 unsigned ptrB = MI->getOperand(2).getReg(); 5607 unsigned incr = MI->getOperand(3).getReg(); 5608 DebugLoc dl = MI->getDebugLoc(); 5609 5610 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5611 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5612 F->insert(It, loopMBB); 5613 F->insert(It, exitMBB); 5614 exitMBB->splice(exitMBB->begin(), BB, 5615 llvm::next(MachineBasicBlock::iterator(MI)), 5616 BB->end()); 5617 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5618 5619 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5620 unsigned TmpReg = (!BinOpcode) ? incr : 5621 RegInfo.createVirtualRegister( 5622 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5623 (const TargetRegisterClass *) &PPC::GPRCRegClass); 5624 5625 // thisMBB: 5626 // ... 5627 // fallthrough --> loopMBB 5628 BB->addSuccessor(loopMBB); 5629 5630 // loopMBB: 5631 // l[wd]arx dest, ptr 5632 // add r0, dest, incr 5633 // st[wd]cx. r0, ptr 5634 // bne- loopMBB 5635 // fallthrough --> exitMBB 5636 BB = loopMBB; 5637 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 5638 .addReg(ptrA).addReg(ptrB); 5639 if (BinOpcode) 5640 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 5641 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 5642 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 5643 BuildMI(BB, dl, TII->get(PPC::BCC)) 5644 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 5645 BB->addSuccessor(loopMBB); 5646 BB->addSuccessor(exitMBB); 5647 5648 // exitMBB: 5649 // ... 5650 BB = exitMBB; 5651 return BB; 5652 } 5653 5654 MachineBasicBlock * 5655 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 5656 MachineBasicBlock *BB, 5657 bool is8bit, // operation 5658 unsigned BinOpcode) const { 5659 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5660 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5661 // In 64 bit mode we have to use 64 bits for addresses, even though the 5662 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 5663 // registers without caring whether they're 32 or 64, but here we're 5664 // doing actual arithmetic on the addresses. 5665 bool is64bit = PPCSubTarget.isPPC64(); 5666 unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0; 5667 5668 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5669 MachineFunction *F = BB->getParent(); 5670 MachineFunction::iterator It = BB; 5671 ++It; 5672 5673 unsigned dest = MI->getOperand(0).getReg(); 5674 unsigned ptrA = MI->getOperand(1).getReg(); 5675 unsigned ptrB = MI->getOperand(2).getReg(); 5676 unsigned incr = MI->getOperand(3).getReg(); 5677 DebugLoc dl = MI->getDebugLoc(); 5678 5679 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5680 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5681 F->insert(It, loopMBB); 5682 F->insert(It, exitMBB); 5683 exitMBB->splice(exitMBB->begin(), BB, 5684 llvm::next(MachineBasicBlock::iterator(MI)), 5685 BB->end()); 5686 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5687 5688 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5689 const TargetRegisterClass *RC = 5690 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5691 (const TargetRegisterClass *) &PPC::GPRCRegClass; 5692 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 5693 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 5694 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 5695 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 5696 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 5697 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 5698 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 5699 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 5700 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 5701 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 5702 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 5703 unsigned Ptr1Reg; 5704 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 5705 5706 // thisMBB: 5707 // ... 5708 // fallthrough --> loopMBB 5709 BB->addSuccessor(loopMBB); 5710 5711 // The 4-byte load must be aligned, while a char or short may be 5712 // anywhere in the word. Hence all this nasty bookkeeping code. 5713 // add ptr1, ptrA, ptrB [copy if ptrA==0] 5714 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 5715 // xori shift, shift1, 24 [16] 5716 // rlwinm ptr, ptr1, 0, 0, 29 5717 // slw incr2, incr, shift 5718 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 5719 // slw mask, mask2, shift 5720 // loopMBB: 5721 // lwarx tmpDest, ptr 5722 // add tmp, tmpDest, incr2 5723 // andc tmp2, tmpDest, mask 5724 // and tmp3, tmp, mask 5725 // or tmp4, tmp3, tmp2 5726 // stwcx. tmp4, ptr 5727 // bne- loopMBB 5728 // fallthrough --> exitMBB 5729 // srw dest, tmpDest, shift 5730 if (ptrA != ZeroReg) { 5731 Ptr1Reg = RegInfo.createVirtualRegister(RC); 5732 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 5733 .addReg(ptrA).addReg(ptrB); 5734 } else { 5735 Ptr1Reg = ptrB; 5736 } 5737 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 5738 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 5739 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 5740 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 5741 if (is64bit) 5742 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 5743 .addReg(Ptr1Reg).addImm(0).addImm(61); 5744 else 5745 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 5746 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 5747 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 5748 .addReg(incr).addReg(ShiftReg); 5749 if (is8bit) 5750 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 5751 else { 5752 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 5753 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 5754 } 5755 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 5756 .addReg(Mask2Reg).addReg(ShiftReg); 5757 5758 BB = loopMBB; 5759 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 5760 .addReg(ZeroReg).addReg(PtrReg); 5761 if (BinOpcode) 5762 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 5763 .addReg(Incr2Reg).addReg(TmpDestReg); 5764 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 5765 .addReg(TmpDestReg).addReg(MaskReg); 5766 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 5767 .addReg(TmpReg).addReg(MaskReg); 5768 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 5769 .addReg(Tmp3Reg).addReg(Tmp2Reg); 5770 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 5771 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 5772 BuildMI(BB, dl, TII->get(PPC::BCC)) 5773 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 5774 BB->addSuccessor(loopMBB); 5775 BB->addSuccessor(exitMBB); 5776 5777 // exitMBB: 5778 // ... 5779 BB = exitMBB; 5780 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 5781 .addReg(ShiftReg); 5782 return BB; 5783 } 5784 5785 MachineBasicBlock * 5786 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5787 MachineBasicBlock *BB) const { 5788 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5789 5790 // To "insert" these instructions we actually have to insert their 5791 // control-flow patterns. 5792 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5793 MachineFunction::iterator It = BB; 5794 ++It; 5795 5796 MachineFunction *F = BB->getParent(); 5797 5798 if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 5799 MI->getOpcode() == PPC::SELECT_CC_I8)) { 5800 unsigned OpCode = MI->getOpcode() == PPC::SELECT_CC_I8 ? 5801 PPC::ISEL8 : PPC::ISEL; 5802 unsigned SelectPred = MI->getOperand(4).getImm(); 5803 DebugLoc dl = MI->getDebugLoc(); 5804 5805 // The SelectPred is ((BI << 5) | BO) for a BCC 5806 unsigned BO = SelectPred & 0xF; 5807 assert((BO == 12 || BO == 4) && "invalid predicate BO field for isel"); 5808 5809 unsigned TrueOpNo, FalseOpNo; 5810 if (BO == 12) { 5811 TrueOpNo = 2; 5812 FalseOpNo = 3; 5813 } else { 5814 TrueOpNo = 3; 5815 FalseOpNo = 2; 5816 SelectPred = PPC::InvertPredicate((PPC::Predicate)SelectPred); 5817 } 5818 5819 BuildMI(*BB, MI, dl, TII->get(OpCode), MI->getOperand(0).getReg()) 5820 .addReg(MI->getOperand(TrueOpNo).getReg()) 5821 .addReg(MI->getOperand(FalseOpNo).getReg()) 5822 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()); 5823 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 5824 MI->getOpcode() == PPC::SELECT_CC_I8 || 5825 MI->getOpcode() == PPC::SELECT_CC_F4 || 5826 MI->getOpcode() == PPC::SELECT_CC_F8 || 5827 MI->getOpcode() == PPC::SELECT_CC_VRRC) { 5828 5829 5830 // The incoming instruction knows the destination vreg to set, the 5831 // condition code register to branch on, the true/false values to 5832 // select between, and a branch opcode to use. 5833 5834 // thisMBB: 5835 // ... 5836 // TrueVal = ... 5837 // cmpTY ccX, r1, r2 5838 // bCC copy1MBB 5839 // fallthrough --> copy0MBB 5840 MachineBasicBlock *thisMBB = BB; 5841 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 5842 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 5843 unsigned SelectPred = MI->getOperand(4).getImm(); 5844 DebugLoc dl = MI->getDebugLoc(); 5845 F->insert(It, copy0MBB); 5846 F->insert(It, sinkMBB); 5847 5848 // Transfer the remainder of BB and its successor edges to sinkMBB. 5849 sinkMBB->splice(sinkMBB->begin(), BB, 5850 llvm::next(MachineBasicBlock::iterator(MI)), 5851 BB->end()); 5852 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 5853 5854 // Next, add the true and fallthrough blocks as its successors. 5855 BB->addSuccessor(copy0MBB); 5856 BB->addSuccessor(sinkMBB); 5857 5858 BuildMI(BB, dl, TII->get(PPC::BCC)) 5859 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 5860 5861 // copy0MBB: 5862 // %FalseValue = ... 5863 // # fallthrough to sinkMBB 5864 BB = copy0MBB; 5865 5866 // Update machine-CFG edges 5867 BB->addSuccessor(sinkMBB); 5868 5869 // sinkMBB: 5870 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5871 // ... 5872 BB = sinkMBB; 5873 BuildMI(*BB, BB->begin(), dl, 5874 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 5875 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 5876 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5877 } 5878 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 5879 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 5880 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 5881 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 5882 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 5883 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 5884 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 5885 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 5886 5887 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 5888 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 5889 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 5890 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 5891 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 5892 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 5893 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 5894 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 5895 5896 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 5897 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 5898 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 5899 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 5900 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 5901 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 5902 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 5903 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 5904 5905 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 5906 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 5907 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 5908 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 5909 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 5910 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 5911 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 5912 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 5913 5914 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 5915 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 5916 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 5917 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 5918 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 5919 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 5920 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 5921 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 5922 5923 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 5924 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 5925 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 5926 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 5927 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 5928 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 5929 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 5930 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 5931 5932 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 5933 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 5934 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 5935 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 5936 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 5937 BB = EmitAtomicBinary(MI, BB, false, 0); 5938 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 5939 BB = EmitAtomicBinary(MI, BB, true, 0); 5940 5941 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 5942 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 5943 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 5944 5945 unsigned dest = MI->getOperand(0).getReg(); 5946 unsigned ptrA = MI->getOperand(1).getReg(); 5947 unsigned ptrB = MI->getOperand(2).getReg(); 5948 unsigned oldval = MI->getOperand(3).getReg(); 5949 unsigned newval = MI->getOperand(4).getReg(); 5950 DebugLoc dl = MI->getDebugLoc(); 5951 5952 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 5953 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 5954 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 5955 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5956 F->insert(It, loop1MBB); 5957 F->insert(It, loop2MBB); 5958 F->insert(It, midMBB); 5959 F->insert(It, exitMBB); 5960 exitMBB->splice(exitMBB->begin(), BB, 5961 llvm::next(MachineBasicBlock::iterator(MI)), 5962 BB->end()); 5963 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5964 5965 // thisMBB: 5966 // ... 5967 // fallthrough --> loopMBB 5968 BB->addSuccessor(loop1MBB); 5969 5970 // loop1MBB: 5971 // l[wd]arx dest, ptr 5972 // cmp[wd] dest, oldval 5973 // bne- midMBB 5974 // loop2MBB: 5975 // st[wd]cx. newval, ptr 5976 // bne- loopMBB 5977 // b exitBB 5978 // midMBB: 5979 // st[wd]cx. dest, ptr 5980 // exitBB: 5981 BB = loop1MBB; 5982 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 5983 .addReg(ptrA).addReg(ptrB); 5984 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 5985 .addReg(oldval).addReg(dest); 5986 BuildMI(BB, dl, TII->get(PPC::BCC)) 5987 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 5988 BB->addSuccessor(loop2MBB); 5989 BB->addSuccessor(midMBB); 5990 5991 BB = loop2MBB; 5992 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 5993 .addReg(newval).addReg(ptrA).addReg(ptrB); 5994 BuildMI(BB, dl, TII->get(PPC::BCC)) 5995 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 5996 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 5997 BB->addSuccessor(loop1MBB); 5998 BB->addSuccessor(exitMBB); 5999 6000 BB = midMBB; 6001 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6002 .addReg(dest).addReg(ptrA).addReg(ptrB); 6003 BB->addSuccessor(exitMBB); 6004 6005 // exitMBB: 6006 // ... 6007 BB = exitMBB; 6008 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 6009 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 6010 // We must use 64-bit registers for addresses when targeting 64-bit, 6011 // since we're actually doing arithmetic on them. Other registers 6012 // can be 32-bit. 6013 bool is64bit = PPCSubTarget.isPPC64(); 6014 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 6015 6016 unsigned dest = MI->getOperand(0).getReg(); 6017 unsigned ptrA = MI->getOperand(1).getReg(); 6018 unsigned ptrB = MI->getOperand(2).getReg(); 6019 unsigned oldval = MI->getOperand(3).getReg(); 6020 unsigned newval = MI->getOperand(4).getReg(); 6021 DebugLoc dl = MI->getDebugLoc(); 6022 6023 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6024 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6025 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6026 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6027 F->insert(It, loop1MBB); 6028 F->insert(It, loop2MBB); 6029 F->insert(It, midMBB); 6030 F->insert(It, exitMBB); 6031 exitMBB->splice(exitMBB->begin(), BB, 6032 llvm::next(MachineBasicBlock::iterator(MI)), 6033 BB->end()); 6034 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6035 6036 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6037 const TargetRegisterClass *RC = 6038 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 6039 (const TargetRegisterClass *) &PPC::GPRCRegClass; 6040 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 6041 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 6042 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 6043 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 6044 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 6045 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 6046 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 6047 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 6048 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 6049 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 6050 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 6051 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 6052 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 6053 unsigned Ptr1Reg; 6054 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 6055 unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0; 6056 // thisMBB: 6057 // ... 6058 // fallthrough --> loopMBB 6059 BB->addSuccessor(loop1MBB); 6060 6061 // The 4-byte load must be aligned, while a char or short may be 6062 // anywhere in the word. Hence all this nasty bookkeeping code. 6063 // add ptr1, ptrA, ptrB [copy if ptrA==0] 6064 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 6065 // xori shift, shift1, 24 [16] 6066 // rlwinm ptr, ptr1, 0, 0, 29 6067 // slw newval2, newval, shift 6068 // slw oldval2, oldval,shift 6069 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 6070 // slw mask, mask2, shift 6071 // and newval3, newval2, mask 6072 // and oldval3, oldval2, mask 6073 // loop1MBB: 6074 // lwarx tmpDest, ptr 6075 // and tmp, tmpDest, mask 6076 // cmpw tmp, oldval3 6077 // bne- midMBB 6078 // loop2MBB: 6079 // andc tmp2, tmpDest, mask 6080 // or tmp4, tmp2, newval3 6081 // stwcx. tmp4, ptr 6082 // bne- loop1MBB 6083 // b exitBB 6084 // midMBB: 6085 // stwcx. tmpDest, ptr 6086 // exitBB: 6087 // srw dest, tmpDest, shift 6088 if (ptrA != ZeroReg) { 6089 Ptr1Reg = RegInfo.createVirtualRegister(RC); 6090 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 6091 .addReg(ptrA).addReg(ptrB); 6092 } else { 6093 Ptr1Reg = ptrB; 6094 } 6095 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 6096 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 6097 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 6098 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 6099 if (is64bit) 6100 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 6101 .addReg(Ptr1Reg).addImm(0).addImm(61); 6102 else 6103 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 6104 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 6105 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 6106 .addReg(newval).addReg(ShiftReg); 6107 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 6108 .addReg(oldval).addReg(ShiftReg); 6109 if (is8bit) 6110 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 6111 else { 6112 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 6113 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 6114 .addReg(Mask3Reg).addImm(65535); 6115 } 6116 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 6117 .addReg(Mask2Reg).addReg(ShiftReg); 6118 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 6119 .addReg(NewVal2Reg).addReg(MaskReg); 6120 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 6121 .addReg(OldVal2Reg).addReg(MaskReg); 6122 6123 BB = loop1MBB; 6124 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 6125 .addReg(ZeroReg).addReg(PtrReg); 6126 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 6127 .addReg(TmpDestReg).addReg(MaskReg); 6128 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 6129 .addReg(TmpReg).addReg(OldVal3Reg); 6130 BuildMI(BB, dl, TII->get(PPC::BCC)) 6131 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6132 BB->addSuccessor(loop2MBB); 6133 BB->addSuccessor(midMBB); 6134 6135 BB = loop2MBB; 6136 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 6137 .addReg(TmpDestReg).addReg(MaskReg); 6138 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 6139 .addReg(Tmp2Reg).addReg(NewVal3Reg); 6140 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 6141 .addReg(ZeroReg).addReg(PtrReg); 6142 BuildMI(BB, dl, TII->get(PPC::BCC)) 6143 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6144 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6145 BB->addSuccessor(loop1MBB); 6146 BB->addSuccessor(exitMBB); 6147 6148 BB = midMBB; 6149 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 6150 .addReg(ZeroReg).addReg(PtrReg); 6151 BB->addSuccessor(exitMBB); 6152 6153 // exitMBB: 6154 // ... 6155 BB = exitMBB; 6156 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 6157 .addReg(ShiftReg); 6158 } else { 6159 llvm_unreachable("Unexpected instr type to insert"); 6160 } 6161 6162 MI->eraseFromParent(); // The pseudo instruction is gone now. 6163 return BB; 6164 } 6165 6166 //===----------------------------------------------------------------------===// 6167 // Target Optimization Hooks 6168 //===----------------------------------------------------------------------===// 6169 6170 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 6171 DAGCombinerInfo &DCI) const { 6172 const TargetMachine &TM = getTargetMachine(); 6173 SelectionDAG &DAG = DCI.DAG; 6174 DebugLoc dl = N->getDebugLoc(); 6175 switch (N->getOpcode()) { 6176 default: break; 6177 case PPCISD::SHL: 6178 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6179 if (C->isNullValue()) // 0 << V -> 0. 6180 return N->getOperand(0); 6181 } 6182 break; 6183 case PPCISD::SRL: 6184 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6185 if (C->isNullValue()) // 0 >>u V -> 0. 6186 return N->getOperand(0); 6187 } 6188 break; 6189 case PPCISD::SRA: 6190 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6191 if (C->isNullValue() || // 0 >>s V -> 0. 6192 C->isAllOnesValue()) // -1 >>s V -> -1. 6193 return N->getOperand(0); 6194 } 6195 break; 6196 6197 case ISD::SINT_TO_FP: 6198 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 6199 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 6200 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 6201 // We allow the src/dst to be either f32/f64, but the intermediate 6202 // type must be i64. 6203 if (N->getOperand(0).getValueType() == MVT::i64 && 6204 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 6205 SDValue Val = N->getOperand(0).getOperand(0); 6206 if (Val.getValueType() == MVT::f32) { 6207 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 6208 DCI.AddToWorklist(Val.getNode()); 6209 } 6210 6211 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val); 6212 DCI.AddToWorklist(Val.getNode()); 6213 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val); 6214 DCI.AddToWorklist(Val.getNode()); 6215 if (N->getValueType(0) == MVT::f32) { 6216 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 6217 DAG.getIntPtrConstant(0)); 6218 DCI.AddToWorklist(Val.getNode()); 6219 } 6220 return Val; 6221 } else if (N->getOperand(0).getValueType() == MVT::i32) { 6222 // If the intermediate type is i32, we can avoid the load/store here 6223 // too. 6224 } 6225 } 6226 } 6227 break; 6228 case ISD::STORE: 6229 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 6230 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 6231 !cast<StoreSDNode>(N)->isTruncatingStore() && 6232 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 6233 N->getOperand(1).getValueType() == MVT::i32 && 6234 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 6235 SDValue Val = N->getOperand(1).getOperand(0); 6236 if (Val.getValueType() == MVT::f32) { 6237 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 6238 DCI.AddToWorklist(Val.getNode()); 6239 } 6240 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 6241 DCI.AddToWorklist(Val.getNode()); 6242 6243 Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val, 6244 N->getOperand(2), N->getOperand(3)); 6245 DCI.AddToWorklist(Val.getNode()); 6246 return Val; 6247 } 6248 6249 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 6250 if (cast<StoreSDNode>(N)->isUnindexed() && 6251 N->getOperand(1).getOpcode() == ISD::BSWAP && 6252 N->getOperand(1).getNode()->hasOneUse() && 6253 (N->getOperand(1).getValueType() == MVT::i32 || 6254 N->getOperand(1).getValueType() == MVT::i16)) { 6255 SDValue BSwapOp = N->getOperand(1).getOperand(0); 6256 // Do an any-extend to 32-bits if this is a half-word input. 6257 if (BSwapOp.getValueType() == MVT::i16) 6258 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 6259 6260 SDValue Ops[] = { 6261 N->getOperand(0), BSwapOp, N->getOperand(2), 6262 DAG.getValueType(N->getOperand(1).getValueType()) 6263 }; 6264 return 6265 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 6266 Ops, array_lengthof(Ops), 6267 cast<StoreSDNode>(N)->getMemoryVT(), 6268 cast<StoreSDNode>(N)->getMemOperand()); 6269 } 6270 break; 6271 case ISD::BSWAP: 6272 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 6273 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 6274 N->getOperand(0).hasOneUse() && 6275 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 6276 SDValue Load = N->getOperand(0); 6277 LoadSDNode *LD = cast<LoadSDNode>(Load); 6278 // Create the byte-swapping load. 6279 SDValue Ops[] = { 6280 LD->getChain(), // Chain 6281 LD->getBasePtr(), // Ptr 6282 DAG.getValueType(N->getValueType(0)) // VT 6283 }; 6284 SDValue BSLoad = 6285 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 6286 DAG.getVTList(MVT::i32, MVT::Other), Ops, 3, 6287 LD->getMemoryVT(), LD->getMemOperand()); 6288 6289 // If this is an i16 load, insert the truncate. 6290 SDValue ResVal = BSLoad; 6291 if (N->getValueType(0) == MVT::i16) 6292 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 6293 6294 // First, combine the bswap away. This makes the value produced by the 6295 // load dead. 6296 DCI.CombineTo(N, ResVal); 6297 6298 // Next, combine the load away, we give it a bogus result value but a real 6299 // chain result. The result value is dead because the bswap is dead. 6300 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 6301 6302 // Return N so it doesn't get rechecked! 6303 return SDValue(N, 0); 6304 } 6305 6306 break; 6307 case PPCISD::VCMP: { 6308 // If a VCMPo node already exists with exactly the same operands as this 6309 // node, use its result instead of this node (VCMPo computes both a CR6 and 6310 // a normal output). 6311 // 6312 if (!N->getOperand(0).hasOneUse() && 6313 !N->getOperand(1).hasOneUse() && 6314 !N->getOperand(2).hasOneUse()) { 6315 6316 // Scan all of the users of the LHS, looking for VCMPo's that match. 6317 SDNode *VCMPoNode = 0; 6318 6319 SDNode *LHSN = N->getOperand(0).getNode(); 6320 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 6321 UI != E; ++UI) 6322 if (UI->getOpcode() == PPCISD::VCMPo && 6323 UI->getOperand(1) == N->getOperand(1) && 6324 UI->getOperand(2) == N->getOperand(2) && 6325 UI->getOperand(0) == N->getOperand(0)) { 6326 VCMPoNode = *UI; 6327 break; 6328 } 6329 6330 // If there is no VCMPo node, or if the flag value has a single use, don't 6331 // transform this. 6332 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 6333 break; 6334 6335 // Look at the (necessarily single) use of the flag value. If it has a 6336 // chain, this transformation is more complex. Note that multiple things 6337 // could use the value result, which we should ignore. 6338 SDNode *FlagUser = 0; 6339 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 6340 FlagUser == 0; ++UI) { 6341 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 6342 SDNode *User = *UI; 6343 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 6344 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 6345 FlagUser = User; 6346 break; 6347 } 6348 } 6349 } 6350 6351 // If the user is a MFCR instruction, we know this is safe. Otherwise we 6352 // give up for right now. 6353 if (FlagUser->getOpcode() == PPCISD::MFCR) 6354 return SDValue(VCMPoNode, 0); 6355 } 6356 break; 6357 } 6358 case ISD::BR_CC: { 6359 // If this is a branch on an altivec predicate comparison, lower this so 6360 // that we don't have to do a MFCR: instead, branch directly on CR6. This 6361 // lowering is done pre-legalize, because the legalizer lowers the predicate 6362 // compare down to code that is difficult to reassemble. 6363 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 6364 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 6365 int CompareOpc; 6366 bool isDot; 6367 6368 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 6369 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 6370 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 6371 assert(isDot && "Can't compare against a vector result!"); 6372 6373 // If this is a comparison against something other than 0/1, then we know 6374 // that the condition is never/always true. 6375 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 6376 if (Val != 0 && Val != 1) { 6377 if (CC == ISD::SETEQ) // Cond never true, remove branch. 6378 return N->getOperand(0); 6379 // Always !=, turn it into an unconditional branch. 6380 return DAG.getNode(ISD::BR, dl, MVT::Other, 6381 N->getOperand(0), N->getOperand(4)); 6382 } 6383 6384 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 6385 6386 // Create the PPCISD altivec 'dot' comparison node. 6387 std::vector<EVT> VTs; 6388 SDValue Ops[] = { 6389 LHS.getOperand(2), // LHS of compare 6390 LHS.getOperand(3), // RHS of compare 6391 DAG.getConstant(CompareOpc, MVT::i32) 6392 }; 6393 VTs.push_back(LHS.getOperand(2).getValueType()); 6394 VTs.push_back(MVT::Glue); 6395 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 6396 6397 // Unpack the result based on how the target uses it. 6398 PPC::Predicate CompOpc; 6399 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 6400 default: // Can't happen, don't crash on invalid number though. 6401 case 0: // Branch on the value of the EQ bit of CR6. 6402 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 6403 break; 6404 case 1: // Branch on the inverted value of the EQ bit of CR6. 6405 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 6406 break; 6407 case 2: // Branch on the value of the LT bit of CR6. 6408 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 6409 break; 6410 case 3: // Branch on the inverted value of the LT bit of CR6. 6411 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 6412 break; 6413 } 6414 6415 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 6416 DAG.getConstant(CompOpc, MVT::i32), 6417 DAG.getRegister(PPC::CR6, MVT::i32), 6418 N->getOperand(4), CompNode.getValue(1)); 6419 } 6420 break; 6421 } 6422 } 6423 6424 return SDValue(); 6425 } 6426 6427 //===----------------------------------------------------------------------===// 6428 // Inline Assembly Support 6429 //===----------------------------------------------------------------------===// 6430 6431 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 6432 APInt &KnownZero, 6433 APInt &KnownOne, 6434 const SelectionDAG &DAG, 6435 unsigned Depth) const { 6436 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 6437 switch (Op.getOpcode()) { 6438 default: break; 6439 case PPCISD::LBRX: { 6440 // lhbrx is known to have the top bits cleared out. 6441 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 6442 KnownZero = 0xFFFF0000; 6443 break; 6444 } 6445 case ISD::INTRINSIC_WO_CHAIN: { 6446 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 6447 default: break; 6448 case Intrinsic::ppc_altivec_vcmpbfp_p: 6449 case Intrinsic::ppc_altivec_vcmpeqfp_p: 6450 case Intrinsic::ppc_altivec_vcmpequb_p: 6451 case Intrinsic::ppc_altivec_vcmpequh_p: 6452 case Intrinsic::ppc_altivec_vcmpequw_p: 6453 case Intrinsic::ppc_altivec_vcmpgefp_p: 6454 case Intrinsic::ppc_altivec_vcmpgtfp_p: 6455 case Intrinsic::ppc_altivec_vcmpgtsb_p: 6456 case Intrinsic::ppc_altivec_vcmpgtsh_p: 6457 case Intrinsic::ppc_altivec_vcmpgtsw_p: 6458 case Intrinsic::ppc_altivec_vcmpgtub_p: 6459 case Intrinsic::ppc_altivec_vcmpgtuh_p: 6460 case Intrinsic::ppc_altivec_vcmpgtuw_p: 6461 KnownZero = ~1U; // All bits but the low one are known to be zero. 6462 break; 6463 } 6464 } 6465 } 6466 } 6467 6468 6469 /// getConstraintType - Given a constraint, return the type of 6470 /// constraint it is for this target. 6471 PPCTargetLowering::ConstraintType 6472 PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 6473 if (Constraint.size() == 1) { 6474 switch (Constraint[0]) { 6475 default: break; 6476 case 'b': 6477 case 'r': 6478 case 'f': 6479 case 'v': 6480 case 'y': 6481 return C_RegisterClass; 6482 case 'Z': 6483 // FIXME: While Z does indicate a memory constraint, it specifically 6484 // indicates an r+r address (used in conjunction with the 'y' modifier 6485 // in the replacement string). Currently, we're forcing the base 6486 // register to be r0 in the asm printer (which is interpreted as zero) 6487 // and forming the complete address in the second register. This is 6488 // suboptimal. 6489 return C_Memory; 6490 } 6491 } 6492 return TargetLowering::getConstraintType(Constraint); 6493 } 6494 6495 /// Examine constraint type and operand type and determine a weight value. 6496 /// This object must already have been set up with the operand type 6497 /// and the current alternative constraint selected. 6498 TargetLowering::ConstraintWeight 6499 PPCTargetLowering::getSingleConstraintMatchWeight( 6500 AsmOperandInfo &info, const char *constraint) const { 6501 ConstraintWeight weight = CW_Invalid; 6502 Value *CallOperandVal = info.CallOperandVal; 6503 // If we don't have a value, we can't do a match, 6504 // but allow it at the lowest weight. 6505 if (CallOperandVal == NULL) 6506 return CW_Default; 6507 Type *type = CallOperandVal->getType(); 6508 // Look at the constraint type. 6509 switch (*constraint) { 6510 default: 6511 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 6512 break; 6513 case 'b': 6514 if (type->isIntegerTy()) 6515 weight = CW_Register; 6516 break; 6517 case 'f': 6518 if (type->isFloatTy()) 6519 weight = CW_Register; 6520 break; 6521 case 'd': 6522 if (type->isDoubleTy()) 6523 weight = CW_Register; 6524 break; 6525 case 'v': 6526 if (type->isVectorTy()) 6527 weight = CW_Register; 6528 break; 6529 case 'y': 6530 weight = CW_Register; 6531 break; 6532 case 'Z': 6533 weight = CW_Memory; 6534 break; 6535 } 6536 return weight; 6537 } 6538 6539 std::pair<unsigned, const TargetRegisterClass*> 6540 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6541 EVT VT) const { 6542 if (Constraint.size() == 1) { 6543 // GCC RS6000 Constraint Letters 6544 switch (Constraint[0]) { 6545 case 'b': // R1-R31 6546 case 'r': // R0-R31 6547 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 6548 return std::make_pair(0U, &PPC::G8RCRegClass); 6549 return std::make_pair(0U, &PPC::GPRCRegClass); 6550 case 'f': 6551 if (VT == MVT::f32 || VT == MVT::i32) 6552 return std::make_pair(0U, &PPC::F4RCRegClass); 6553 if (VT == MVT::f64 || VT == MVT::i64) 6554 return std::make_pair(0U, &PPC::F8RCRegClass); 6555 break; 6556 case 'v': 6557 return std::make_pair(0U, &PPC::VRRCRegClass); 6558 case 'y': // crrc 6559 return std::make_pair(0U, &PPC::CRRCRegClass); 6560 } 6561 } 6562 6563 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6564 } 6565 6566 6567 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6568 /// vector. If it is invalid, don't add anything to Ops. 6569 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 6570 std::string &Constraint, 6571 std::vector<SDValue>&Ops, 6572 SelectionDAG &DAG) const { 6573 SDValue Result(0,0); 6574 6575 // Only support length 1 constraints. 6576 if (Constraint.length() > 1) return; 6577 6578 char Letter = Constraint[0]; 6579 switch (Letter) { 6580 default: break; 6581 case 'I': 6582 case 'J': 6583 case 'K': 6584 case 'L': 6585 case 'M': 6586 case 'N': 6587 case 'O': 6588 case 'P': { 6589 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 6590 if (!CST) return; // Must be an immediate to match. 6591 unsigned Value = CST->getZExtValue(); 6592 switch (Letter) { 6593 default: llvm_unreachable("Unknown constraint letter!"); 6594 case 'I': // "I" is a signed 16-bit constant. 6595 if ((short)Value == (int)Value) 6596 Result = DAG.getTargetConstant(Value, Op.getValueType()); 6597 break; 6598 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 6599 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 6600 if ((short)Value == 0) 6601 Result = DAG.getTargetConstant(Value, Op.getValueType()); 6602 break; 6603 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 6604 if ((Value >> 16) == 0) 6605 Result = DAG.getTargetConstant(Value, Op.getValueType()); 6606 break; 6607 case 'M': // "M" is a constant that is greater than 31. 6608 if (Value > 31) 6609 Result = DAG.getTargetConstant(Value, Op.getValueType()); 6610 break; 6611 case 'N': // "N" is a positive constant that is an exact power of two. 6612 if ((int)Value > 0 && isPowerOf2_32(Value)) 6613 Result = DAG.getTargetConstant(Value, Op.getValueType()); 6614 break; 6615 case 'O': // "O" is the constant zero. 6616 if (Value == 0) 6617 Result = DAG.getTargetConstant(Value, Op.getValueType()); 6618 break; 6619 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 6620 if ((short)-Value == (int)-Value) 6621 Result = DAG.getTargetConstant(Value, Op.getValueType()); 6622 break; 6623 } 6624 break; 6625 } 6626 } 6627 6628 if (Result.getNode()) { 6629 Ops.push_back(Result); 6630 return; 6631 } 6632 6633 // Handle standard constraint letters. 6634 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6635 } 6636 6637 // isLegalAddressingMode - Return true if the addressing mode represented 6638 // by AM is legal for this target, for a load/store of the specified type. 6639 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 6640 Type *Ty) const { 6641 // FIXME: PPC does not allow r+i addressing modes for vectors! 6642 6643 // PPC allows a sign-extended 16-bit immediate field. 6644 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 6645 return false; 6646 6647 // No global is ever allowed as a base. 6648 if (AM.BaseGV) 6649 return false; 6650 6651 // PPC only support r+r, 6652 switch (AM.Scale) { 6653 case 0: // "r+i" or just "i", depending on HasBaseReg. 6654 break; 6655 case 1: 6656 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 6657 return false; 6658 // Otherwise we have r+r or r+i. 6659 break; 6660 case 2: 6661 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 6662 return false; 6663 // Allow 2*r as r+r. 6664 break; 6665 default: 6666 // No other scales are supported. 6667 return false; 6668 } 6669 6670 return true; 6671 } 6672 6673 /// isLegalAddressImmediate - Return true if the integer value can be used 6674 /// as the offset of the target addressing mode for load / store of the 6675 /// given type. 6676 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,Type *Ty) const{ 6677 // PPC allows a sign-extended 16-bit immediate field. 6678 return (V > -(1 << 16) && V < (1 << 16)-1); 6679 } 6680 6681 bool PPCTargetLowering::isLegalAddressImmediate(GlobalValue* GV) const { 6682 return false; 6683 } 6684 6685 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 6686 SelectionDAG &DAG) const { 6687 MachineFunction &MF = DAG.getMachineFunction(); 6688 MachineFrameInfo *MFI = MF.getFrameInfo(); 6689 MFI->setReturnAddressIsTaken(true); 6690 6691 DebugLoc dl = Op.getDebugLoc(); 6692 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6693 6694 // Make sure the function does not optimize away the store of the RA to 6695 // the stack. 6696 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 6697 FuncInfo->setLRStoreRequired(); 6698 bool isPPC64 = PPCSubTarget.isPPC64(); 6699 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 6700 6701 if (Depth > 0) { 6702 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 6703 SDValue Offset = 6704 6705 DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI), 6706 isPPC64? MVT::i64 : MVT::i32); 6707 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 6708 DAG.getNode(ISD::ADD, dl, getPointerTy(), 6709 FrameAddr, Offset), 6710 MachinePointerInfo(), false, false, false, 0); 6711 } 6712 6713 // Just load the return address off the stack. 6714 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 6715 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 6716 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 6717 } 6718 6719 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 6720 SelectionDAG &DAG) const { 6721 DebugLoc dl = Op.getDebugLoc(); 6722 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6723 6724 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 6725 bool isPPC64 = PtrVT == MVT::i64; 6726 6727 MachineFunction &MF = DAG.getMachineFunction(); 6728 MachineFrameInfo *MFI = MF.getFrameInfo(); 6729 MFI->setFrameAddressIsTaken(true); 6730 bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) || 6731 MFI->hasVarSizedObjects()) && 6732 MFI->getStackSize() && 6733 !MF.getFunction()->getFnAttributes(). 6734 hasAttribute(Attributes::Naked); 6735 unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) : 6736 (is31 ? PPC::R31 : PPC::R1); 6737 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 6738 PtrVT); 6739 while (Depth--) 6740 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 6741 FrameAddr, MachinePointerInfo(), false, false, 6742 false, 0); 6743 return FrameAddr; 6744 } 6745 6746 bool 6747 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 6748 // The PowerPC target isn't yet aware of offsets. 6749 return false; 6750 } 6751 6752 /// getOptimalMemOpType - Returns the target specific optimal type for load 6753 /// and store operations as a result of memset, memcpy, and memmove 6754 /// lowering. If DstAlign is zero that means it's safe to destination 6755 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 6756 /// means there isn't a need to check it against alignment requirement, 6757 /// probably because the source does not need to be loaded. If 6758 /// 'IsZeroVal' is true, that means it's safe to return a 6759 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 6760 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 6761 /// constant so it does not need to be loaded. 6762 /// It returns EVT::Other if the type should be determined using generic 6763 /// target-independent logic. 6764 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 6765 unsigned DstAlign, unsigned SrcAlign, 6766 bool IsZeroVal, 6767 bool MemcpyStrSrc, 6768 MachineFunction &MF) const { 6769 if (this->PPCSubTarget.isPPC64()) { 6770 return MVT::i64; 6771 } else { 6772 return MVT::i32; 6773 } 6774 } 6775 6776 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than 6777 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to 6778 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd 6779 /// is expanded to mul + add. 6780 bool PPCTargetLowering::isFMAFasterThanMulAndAdd(EVT VT) const { 6781 if (!VT.isSimple()) 6782 return false; 6783 6784 switch (VT.getSimpleVT().SimpleTy) { 6785 case MVT::f32: 6786 case MVT::f64: 6787 case MVT::v4f32: 6788 return true; 6789 default: 6790 break; 6791 } 6792 6793 return false; 6794 } 6795 6796 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 6797 if (DisableILPPref) 6798 return TargetLowering::getSchedulingPreference(N); 6799 6800 return Sched::ILP; 6801 } 6802 6803