1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCMachineFunctionInfo.h" 17 #include "PPCPerfectShuffle.h" 18 #include "PPCTargetMachine.h" 19 #include "PPCTargetObjectFile.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/SelectionDAG.h" 27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28 #include "llvm/IR/CallingConv.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/MathExtras.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include "llvm/Target/TargetOptions.h" 38 using namespace llvm; 39 40 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 41 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 42 43 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 44 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 45 46 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 47 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 48 49 static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) { 50 if (TM.getSubtargetImpl()->isDarwin()) 51 return new TargetLoweringObjectFileMachO(); 52 53 if (TM.getSubtargetImpl()->isSVR4ABI()) 54 return new PPC64LinuxTargetObjectFile(); 55 56 return new TargetLoweringObjectFileELF(); 57 } 58 59 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 60 : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) { 61 const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>(); 62 63 setPow2DivIsCheap(); 64 65 // Use _setjmp/_longjmp instead of setjmp/longjmp. 66 setUseUnderscoreSetJmp(true); 67 setUseUnderscoreLongJmp(true); 68 69 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 70 // arguments are at least 4/8 bytes aligned. 71 bool isPPC64 = Subtarget->isPPC64(); 72 setMinStackArgumentAlignment(isPPC64 ? 8:4); 73 74 // Set up the register classes. 75 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 76 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 77 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 78 79 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 80 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 81 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 82 83 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 84 85 // PowerPC has pre-inc load and store's. 86 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 91 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 92 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 93 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 96 97 // This is used in the ppcf128->int sequence. Note it has different semantics 98 // from FP_ROUND: that rounds to nearest, this rounds to zero. 99 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 100 101 // We do not currently implement these libm ops for PowerPC. 102 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 103 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 104 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 105 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 106 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 107 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 108 109 // PowerPC has no SREM/UREM instructions 110 setOperationAction(ISD::SREM, MVT::i32, Expand); 111 setOperationAction(ISD::UREM, MVT::i32, Expand); 112 setOperationAction(ISD::SREM, MVT::i64, Expand); 113 setOperationAction(ISD::UREM, MVT::i64, Expand); 114 115 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 116 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 117 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 118 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 119 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 120 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 121 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 122 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 123 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 124 125 // We don't support sin/cos/sqrt/fmod/pow 126 setOperationAction(ISD::FSIN , MVT::f64, Expand); 127 setOperationAction(ISD::FCOS , MVT::f64, Expand); 128 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 129 setOperationAction(ISD::FREM , MVT::f64, Expand); 130 setOperationAction(ISD::FPOW , MVT::f64, Expand); 131 setOperationAction(ISD::FMA , MVT::f64, Legal); 132 setOperationAction(ISD::FSIN , MVT::f32, Expand); 133 setOperationAction(ISD::FCOS , MVT::f32, Expand); 134 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 135 setOperationAction(ISD::FREM , MVT::f32, Expand); 136 setOperationAction(ISD::FPOW , MVT::f32, Expand); 137 setOperationAction(ISD::FMA , MVT::f32, Legal); 138 139 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 140 141 // If we're enabling GP optimizations, use hardware square root 142 if (!Subtarget->hasFSQRT() && 143 !(TM.Options.UnsafeFPMath && 144 Subtarget->hasFRSQRTE() && Subtarget->hasFRE())) 145 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 146 147 if (!Subtarget->hasFSQRT() && 148 !(TM.Options.UnsafeFPMath && 149 Subtarget->hasFRSQRTES() && Subtarget->hasFRES())) 150 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 151 152 if (Subtarget->hasFCPSGN()) { 153 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 154 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 155 } else { 156 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 157 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 158 } 159 160 if (Subtarget->hasFPRND()) { 161 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 162 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 163 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 164 setOperationAction(ISD::FROUND, MVT::f64, Legal); 165 166 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 167 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 168 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 169 setOperationAction(ISD::FROUND, MVT::f32, Legal); 170 } 171 172 // PowerPC does not have BSWAP, CTPOP or CTTZ 173 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 174 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 175 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 176 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 177 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 178 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 179 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 180 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 181 182 if (Subtarget->hasPOPCNTD()) { 183 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 184 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 185 } else { 186 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 187 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 188 } 189 190 // PowerPC does not have ROTR 191 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 192 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 193 194 // PowerPC does not have Select 195 setOperationAction(ISD::SELECT, MVT::i32, Expand); 196 setOperationAction(ISD::SELECT, MVT::i64, Expand); 197 setOperationAction(ISD::SELECT, MVT::f32, Expand); 198 setOperationAction(ISD::SELECT, MVT::f64, Expand); 199 200 // PowerPC wants to turn select_cc of FP into fsel when possible. 201 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 202 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 203 204 // PowerPC wants to optimize integer setcc a bit 205 setOperationAction(ISD::SETCC, MVT::i32, Custom); 206 207 // PowerPC does not have BRCOND which requires SetCC 208 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 209 210 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 211 212 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 213 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 214 215 // PowerPC does not have [U|S]INT_TO_FP 216 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 217 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 218 219 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 220 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 221 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 222 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 223 224 // We cannot sextinreg(i1). Expand to shifts. 225 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 226 227 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 228 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 229 // support continuation, user-level threading, and etc.. As a result, no 230 // other SjLj exception interfaces are implemented and please don't build 231 // your own exception handling based on them. 232 // LLVM/Clang supports zero-cost DWARF exception handling. 233 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 234 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 235 236 // We want to legalize GlobalAddress and ConstantPool nodes into the 237 // appropriate instructions to materialize the address. 238 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 239 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 240 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 241 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 242 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 243 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 244 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 245 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 246 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 247 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 248 249 // TRAP is legal. 250 setOperationAction(ISD::TRAP, MVT::Other, Legal); 251 252 // TRAMPOLINE is custom lowered. 253 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 254 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 255 256 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 257 setOperationAction(ISD::VASTART , MVT::Other, Custom); 258 259 if (Subtarget->isSVR4ABI()) { 260 if (isPPC64) { 261 // VAARG always uses double-word chunks, so promote anything smaller. 262 setOperationAction(ISD::VAARG, MVT::i1, Promote); 263 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 264 setOperationAction(ISD::VAARG, MVT::i8, Promote); 265 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 266 setOperationAction(ISD::VAARG, MVT::i16, Promote); 267 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 268 setOperationAction(ISD::VAARG, MVT::i32, Promote); 269 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 270 setOperationAction(ISD::VAARG, MVT::Other, Expand); 271 } else { 272 // VAARG is custom lowered with the 32-bit SVR4 ABI. 273 setOperationAction(ISD::VAARG, MVT::Other, Custom); 274 setOperationAction(ISD::VAARG, MVT::i64, Custom); 275 } 276 } else 277 setOperationAction(ISD::VAARG, MVT::Other, Expand); 278 279 if (Subtarget->isSVR4ABI() && !isPPC64) 280 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 281 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 282 else 283 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 284 285 // Use the default implementation. 286 setOperationAction(ISD::VAEND , MVT::Other, Expand); 287 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 288 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 289 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 290 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 291 292 // We want to custom lower some of our intrinsics. 293 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 294 295 // To handle counter-based loop conditions. 296 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 297 298 // Comparisons that require checking two conditions. 299 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 300 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 301 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 302 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 303 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 304 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 305 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 306 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 307 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 308 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 309 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 310 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 311 312 if (Subtarget->has64BitSupport()) { 313 // They also have instructions for converting between i64 and fp. 314 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 315 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 316 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 317 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 318 // This is just the low 32 bits of a (signed) fp->i64 conversion. 319 // We cannot do this with Promote because i64 is not a legal type. 320 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 321 322 if (PPCSubTarget.hasLFIWAX() || Subtarget->isPPC64()) 323 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 324 } else { 325 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 326 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 327 } 328 329 // With the instructions enabled under FPCVT, we can do everything. 330 if (PPCSubTarget.hasFPCVT()) { 331 if (Subtarget->has64BitSupport()) { 332 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 333 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 334 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 335 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 336 } 337 338 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 339 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 340 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 341 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 342 } 343 344 if (Subtarget->use64BitRegs()) { 345 // 64-bit PowerPC implementations can support i64 types directly 346 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 347 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 348 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 349 // 64-bit PowerPC wants to expand i128 shifts itself. 350 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 351 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 352 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 353 } else { 354 // 32-bit PowerPC wants to expand i64 shifts itself. 355 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 356 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 357 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 358 } 359 360 if (Subtarget->hasAltivec()) { 361 // First set operation action for all vector types to expand. Then we 362 // will selectively turn on ones that can be effectively codegen'd. 363 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 364 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 365 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 366 367 // add/sub are legal for all supported vector VT's. 368 setOperationAction(ISD::ADD , VT, Legal); 369 setOperationAction(ISD::SUB , VT, Legal); 370 371 // We promote all shuffles to v16i8. 372 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 373 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 374 375 // We promote all non-typed operations to v4i32. 376 setOperationAction(ISD::AND , VT, Promote); 377 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 378 setOperationAction(ISD::OR , VT, Promote); 379 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 380 setOperationAction(ISD::XOR , VT, Promote); 381 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 382 setOperationAction(ISD::LOAD , VT, Promote); 383 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 384 setOperationAction(ISD::SELECT, VT, Promote); 385 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 386 setOperationAction(ISD::STORE, VT, Promote); 387 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 388 389 // No other operations are legal. 390 setOperationAction(ISD::MUL , VT, Expand); 391 setOperationAction(ISD::SDIV, VT, Expand); 392 setOperationAction(ISD::SREM, VT, Expand); 393 setOperationAction(ISD::UDIV, VT, Expand); 394 setOperationAction(ISD::UREM, VT, Expand); 395 setOperationAction(ISD::FDIV, VT, Expand); 396 setOperationAction(ISD::FREM, VT, Expand); 397 setOperationAction(ISD::FNEG, VT, Expand); 398 setOperationAction(ISD::FSQRT, VT, Expand); 399 setOperationAction(ISD::FLOG, VT, Expand); 400 setOperationAction(ISD::FLOG10, VT, Expand); 401 setOperationAction(ISD::FLOG2, VT, Expand); 402 setOperationAction(ISD::FEXP, VT, Expand); 403 setOperationAction(ISD::FEXP2, VT, Expand); 404 setOperationAction(ISD::FSIN, VT, Expand); 405 setOperationAction(ISD::FCOS, VT, Expand); 406 setOperationAction(ISD::FABS, VT, Expand); 407 setOperationAction(ISD::FPOWI, VT, Expand); 408 setOperationAction(ISD::FFLOOR, VT, Expand); 409 setOperationAction(ISD::FCEIL, VT, Expand); 410 setOperationAction(ISD::FTRUNC, VT, Expand); 411 setOperationAction(ISD::FRINT, VT, Expand); 412 setOperationAction(ISD::FNEARBYINT, VT, Expand); 413 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 414 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 415 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 416 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 417 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 418 setOperationAction(ISD::UDIVREM, VT, Expand); 419 setOperationAction(ISD::SDIVREM, VT, Expand); 420 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 421 setOperationAction(ISD::FPOW, VT, Expand); 422 setOperationAction(ISD::CTPOP, VT, Expand); 423 setOperationAction(ISD::CTLZ, VT, Expand); 424 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 425 setOperationAction(ISD::CTTZ, VT, Expand); 426 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 427 setOperationAction(ISD::VSELECT, VT, Expand); 428 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 429 430 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 431 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) { 432 MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j; 433 setTruncStoreAction(VT, InnerVT, Expand); 434 } 435 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 436 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 437 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 438 } 439 440 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 441 // with merges, splats, etc. 442 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 443 444 setOperationAction(ISD::AND , MVT::v4i32, Legal); 445 setOperationAction(ISD::OR , MVT::v4i32, Legal); 446 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 447 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 448 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 449 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 450 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 451 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 452 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 453 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 454 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 455 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 456 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 457 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 458 459 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 460 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 461 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 462 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 463 464 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 465 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 466 467 if (TM.Options.UnsafeFPMath) { 468 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 469 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 470 } 471 472 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 473 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 474 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 475 476 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 477 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 478 479 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 480 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 481 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 482 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 483 484 // Altivec does not contain unordered floating-point compare instructions 485 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 486 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 487 setCondCodeAction(ISD::SETUGT, MVT::v4f32, Expand); 488 setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand); 489 setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand); 490 setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand); 491 492 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 493 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 494 } 495 496 if (Subtarget->has64BitSupport()) { 497 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 498 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 499 } 500 501 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); 502 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); 503 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 504 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 505 506 setBooleanContents(ZeroOrOneBooleanContent); 507 // Altivec instructions set fields to all zeros or all ones. 508 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 509 510 if (isPPC64) { 511 setStackPointerRegisterToSaveRestore(PPC::X1); 512 setExceptionPointerRegister(PPC::X3); 513 setExceptionSelectorRegister(PPC::X4); 514 } else { 515 setStackPointerRegisterToSaveRestore(PPC::R1); 516 setExceptionPointerRegister(PPC::R3); 517 setExceptionSelectorRegister(PPC::R4); 518 } 519 520 // We have target-specific dag combine patterns for the following nodes: 521 setTargetDAGCombine(ISD::SINT_TO_FP); 522 setTargetDAGCombine(ISD::LOAD); 523 setTargetDAGCombine(ISD::STORE); 524 setTargetDAGCombine(ISD::BR_CC); 525 setTargetDAGCombine(ISD::BSWAP); 526 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 527 528 // Use reciprocal estimates. 529 if (TM.Options.UnsafeFPMath) { 530 setTargetDAGCombine(ISD::FDIV); 531 setTargetDAGCombine(ISD::FSQRT); 532 } 533 534 // Darwin long double math library functions have $LDBL128 appended. 535 if (Subtarget->isDarwin()) { 536 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 537 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 538 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 539 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 540 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 541 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 542 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 543 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 544 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 545 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 546 } 547 548 setMinFunctionAlignment(2); 549 if (PPCSubTarget.isDarwin()) 550 setPrefFunctionAlignment(4); 551 552 if (isPPC64 && Subtarget->isJITCodeModel()) 553 // Temporary workaround for the inability of PPC64 JIT to handle jump 554 // tables. 555 setSupportJumpTables(false); 556 557 setInsertFencesForAtomic(true); 558 559 setSchedulingPreference(Sched::Hybrid); 560 561 computeRegisterProperties(); 562 563 // The Freescale cores does better with aggressive inlining of memcpy and 564 // friends. Gcc uses same threshold of 128 bytes (= 32 word stores). 565 if (Subtarget->getDarwinDirective() == PPC::DIR_E500mc || 566 Subtarget->getDarwinDirective() == PPC::DIR_E5500) { 567 MaxStoresPerMemset = 32; 568 MaxStoresPerMemsetOptSize = 16; 569 MaxStoresPerMemcpy = 32; 570 MaxStoresPerMemcpyOptSize = 8; 571 MaxStoresPerMemmove = 32; 572 MaxStoresPerMemmoveOptSize = 8; 573 574 setPrefFunctionAlignment(4); 575 } 576 } 577 578 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 579 /// function arguments in the caller parameter area. 580 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { 581 const TargetMachine &TM = getTargetMachine(); 582 // Darwin passes everything on 4 byte boundary. 583 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 584 return 4; 585 586 // 16byte and wider vectors are passed on 16byte boundary. 587 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 588 if (VTy->getBitWidth() >= 128) 589 return 16; 590 591 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 592 if (PPCSubTarget.isPPC64()) 593 return 8; 594 595 return 4; 596 } 597 598 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 599 switch (Opcode) { 600 default: return 0; 601 case PPCISD::FSEL: return "PPCISD::FSEL"; 602 case PPCISD::FCFID: return "PPCISD::FCFID"; 603 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 604 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 605 case PPCISD::FRE: return "PPCISD::FRE"; 606 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 607 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 608 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 609 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 610 case PPCISD::VPERM: return "PPCISD::VPERM"; 611 case PPCISD::Hi: return "PPCISD::Hi"; 612 case PPCISD::Lo: return "PPCISD::Lo"; 613 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 614 case PPCISD::TOC_RESTORE: return "PPCISD::TOC_RESTORE"; 615 case PPCISD::LOAD: return "PPCISD::LOAD"; 616 case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; 617 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 618 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 619 case PPCISD::SRL: return "PPCISD::SRL"; 620 case PPCISD::SRA: return "PPCISD::SRA"; 621 case PPCISD::SHL: return "PPCISD::SHL"; 622 case PPCISD::CALL: return "PPCISD::CALL"; 623 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 624 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 625 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 626 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 627 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 628 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 629 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 630 case PPCISD::VCMP: return "PPCISD::VCMP"; 631 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 632 case PPCISD::LBRX: return "PPCISD::LBRX"; 633 case PPCISD::STBRX: return "PPCISD::STBRX"; 634 case PPCISD::LARX: return "PPCISD::LARX"; 635 case PPCISD::STCX: return "PPCISD::STCX"; 636 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 637 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 638 case PPCISD::BDZ: return "PPCISD::BDZ"; 639 case PPCISD::MFFS: return "PPCISD::MFFS"; 640 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 641 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 642 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 643 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 644 case PPCISD::ADDIS_TOC_HA: return "PPCISD::ADDIS_TOC_HA"; 645 case PPCISD::LD_TOC_L: return "PPCISD::LD_TOC_L"; 646 case PPCISD::ADDI_TOC_L: return "PPCISD::ADDI_TOC_L"; 647 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 648 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 649 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 650 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 651 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 652 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 653 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 654 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 655 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 656 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 657 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 658 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 659 case PPCISD::SC: return "PPCISD::SC"; 660 } 661 } 662 663 EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 664 if (!VT.isVector()) 665 return MVT::i32; 666 return VT.changeVectorElementTypeToInteger(); 667 } 668 669 //===----------------------------------------------------------------------===// 670 // Node matching predicates, for use by the tblgen matching code. 671 //===----------------------------------------------------------------------===// 672 673 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 674 static bool isFloatingPointZero(SDValue Op) { 675 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 676 return CFP->getValueAPF().isZero(); 677 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 678 // Maybe this has already been legalized into the constant pool? 679 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 680 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 681 return CFP->getValueAPF().isZero(); 682 } 683 return false; 684 } 685 686 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 687 /// true if Op is undef or if it matches the specified value. 688 static bool isConstantOrUndef(int Op, int Val) { 689 return Op < 0 || Op == Val; 690 } 691 692 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 693 /// VPKUHUM instruction. 694 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 695 if (!isUnary) { 696 for (unsigned i = 0; i != 16; ++i) 697 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 698 return false; 699 } else { 700 for (unsigned i = 0; i != 8; ++i) 701 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) || 702 !isConstantOrUndef(N->getMaskElt(i+8), i*2+1)) 703 return false; 704 } 705 return true; 706 } 707 708 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 709 /// VPKUWUM instruction. 710 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 711 if (!isUnary) { 712 for (unsigned i = 0; i != 16; i += 2) 713 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 714 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 715 return false; 716 } else { 717 for (unsigned i = 0; i != 8; i += 2) 718 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 719 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) || 720 !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) || 721 !isConstantOrUndef(N->getMaskElt(i+9), i*2+3)) 722 return false; 723 } 724 return true; 725 } 726 727 /// isVMerge - Common function, used to match vmrg* shuffles. 728 /// 729 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 730 unsigned LHSStart, unsigned RHSStart) { 731 assert(N->getValueType(0) == MVT::v16i8 && 732 "PPC only supports shuffles by bytes!"); 733 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 734 "Unsupported merge size!"); 735 736 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 737 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 738 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 739 LHSStart+j+i*UnitSize) || 740 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 741 RHSStart+j+i*UnitSize)) 742 return false; 743 } 744 return true; 745 } 746 747 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 748 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 749 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 750 bool isUnary) { 751 if (!isUnary) 752 return isVMerge(N, UnitSize, 8, 24); 753 return isVMerge(N, UnitSize, 8, 8); 754 } 755 756 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 757 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 758 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 759 bool isUnary) { 760 if (!isUnary) 761 return isVMerge(N, UnitSize, 0, 16); 762 return isVMerge(N, UnitSize, 0, 0); 763 } 764 765 766 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 767 /// amount, otherwise return -1. 768 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 769 assert(N->getValueType(0) == MVT::v16i8 && 770 "PPC only supports shuffles by bytes!"); 771 772 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 773 774 // Find the first non-undef value in the shuffle mask. 775 unsigned i; 776 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 777 /*search*/; 778 779 if (i == 16) return -1; // all undef. 780 781 // Otherwise, check to see if the rest of the elements are consecutively 782 // numbered from this value. 783 unsigned ShiftAmt = SVOp->getMaskElt(i); 784 if (ShiftAmt < i) return -1; 785 ShiftAmt -= i; 786 787 if (!isUnary) { 788 // Check the rest of the elements to see if they are consecutive. 789 for (++i; i != 16; ++i) 790 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 791 return -1; 792 } else { 793 // Check the rest of the elements to see if they are consecutive. 794 for (++i; i != 16; ++i) 795 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 796 return -1; 797 } 798 return ShiftAmt; 799 } 800 801 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 802 /// specifies a splat of a single element that is suitable for input to 803 /// VSPLTB/VSPLTH/VSPLTW. 804 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 805 assert(N->getValueType(0) == MVT::v16i8 && 806 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 807 808 // This is a splat operation if each element of the permute is the same, and 809 // if the value doesn't reference the second vector. 810 unsigned ElementBase = N->getMaskElt(0); 811 812 // FIXME: Handle UNDEF elements too! 813 if (ElementBase >= 16) 814 return false; 815 816 // Check that the indices are consecutive, in the case of a multi-byte element 817 // splatted with a v16i8 mask. 818 for (unsigned i = 1; i != EltSize; ++i) 819 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 820 return false; 821 822 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 823 if (N->getMaskElt(i) < 0) continue; 824 for (unsigned j = 0; j != EltSize; ++j) 825 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 826 return false; 827 } 828 return true; 829 } 830 831 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 832 /// are -0.0. 833 bool PPC::isAllNegativeZeroVector(SDNode *N) { 834 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 835 836 APInt APVal, APUndef; 837 unsigned BitSize; 838 bool HasAnyUndefs; 839 840 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) 841 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 842 return CFP->getValueAPF().isNegZero(); 843 844 return false; 845 } 846 847 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 848 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 849 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 850 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 851 assert(isSplatShuffleMask(SVOp, EltSize)); 852 return SVOp->getMaskElt(0) / EltSize; 853 } 854 855 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 856 /// by using a vspltis[bhw] instruction of the specified element size, return 857 /// the constant being splatted. The ByteSize field indicates the number of 858 /// bytes of each element [124] -> [bhw]. 859 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 860 SDValue OpVal(0, 0); 861 862 // If ByteSize of the splat is bigger than the element size of the 863 // build_vector, then we have a case where we are checking for a splat where 864 // multiple elements of the buildvector are folded together into a single 865 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 866 unsigned EltSize = 16/N->getNumOperands(); 867 if (EltSize < ByteSize) { 868 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 869 SDValue UniquedVals[4]; 870 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 871 872 // See if all of the elements in the buildvector agree across. 873 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 874 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 875 // If the element isn't a constant, bail fully out. 876 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 877 878 879 if (UniquedVals[i&(Multiple-1)].getNode() == 0) 880 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 881 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 882 return SDValue(); // no match. 883 } 884 885 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 886 // either constant or undef values that are identical for each chunk. See 887 // if these chunks can form into a larger vspltis*. 888 889 // Check to see if all of the leading entries are either 0 or -1. If 890 // neither, then this won't fit into the immediate field. 891 bool LeadingZero = true; 892 bool LeadingOnes = true; 893 for (unsigned i = 0; i != Multiple-1; ++i) { 894 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. 895 896 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 897 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 898 } 899 // Finally, check the least significant entry. 900 if (LeadingZero) { 901 if (UniquedVals[Multiple-1].getNode() == 0) 902 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 903 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 904 if (Val < 16) 905 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 906 } 907 if (LeadingOnes) { 908 if (UniquedVals[Multiple-1].getNode() == 0) 909 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 910 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 911 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 912 return DAG.getTargetConstant(Val, MVT::i32); 913 } 914 915 return SDValue(); 916 } 917 918 // Check to see if this buildvec has a single non-undef value in its elements. 919 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 920 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 921 if (OpVal.getNode() == 0) 922 OpVal = N->getOperand(i); 923 else if (OpVal != N->getOperand(i)) 924 return SDValue(); 925 } 926 927 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. 928 929 unsigned ValSizeInBytes = EltSize; 930 uint64_t Value = 0; 931 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 932 Value = CN->getZExtValue(); 933 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 934 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 935 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 936 } 937 938 // If the splat value is larger than the element value, then we can never do 939 // this splat. The only case that we could fit the replicated bits into our 940 // immediate field for would be zero, and we prefer to use vxor for it. 941 if (ValSizeInBytes < ByteSize) return SDValue(); 942 943 // If the element value is larger than the splat value, cut it in half and 944 // check to see if the two halves are equal. Continue doing this until we 945 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 946 while (ValSizeInBytes > ByteSize) { 947 ValSizeInBytes >>= 1; 948 949 // If the top half equals the bottom half, we're still ok. 950 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 951 (Value & ((1 << (8*ValSizeInBytes))-1))) 952 return SDValue(); 953 } 954 955 // Properly sign extend the value. 956 int MaskVal = SignExtend32(Value, ByteSize * 8); 957 958 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 959 if (MaskVal == 0) return SDValue(); 960 961 // Finally, if this value fits in a 5 bit sext field, return it 962 if (SignExtend32<5>(MaskVal) == MaskVal) 963 return DAG.getTargetConstant(MaskVal, MVT::i32); 964 return SDValue(); 965 } 966 967 //===----------------------------------------------------------------------===// 968 // Addressing Mode Selection 969 //===----------------------------------------------------------------------===// 970 971 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 972 /// or 64-bit immediate, and if the value can be accurately represented as a 973 /// sign extension from a 16-bit value. If so, this returns true and the 974 /// immediate. 975 static bool isIntS16Immediate(SDNode *N, short &Imm) { 976 if (N->getOpcode() != ISD::Constant) 977 return false; 978 979 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 980 if (N->getValueType(0) == MVT::i32) 981 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 982 else 983 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 984 } 985 static bool isIntS16Immediate(SDValue Op, short &Imm) { 986 return isIntS16Immediate(Op.getNode(), Imm); 987 } 988 989 990 /// SelectAddressRegReg - Given the specified addressed, check to see if it 991 /// can be represented as an indexed [r+r] operation. Returns false if it 992 /// can be more efficiently represented with [r+imm]. 993 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 994 SDValue &Index, 995 SelectionDAG &DAG) const { 996 short imm = 0; 997 if (N.getOpcode() == ISD::ADD) { 998 if (isIntS16Immediate(N.getOperand(1), imm)) 999 return false; // r+i 1000 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1001 return false; // r+i 1002 1003 Base = N.getOperand(0); 1004 Index = N.getOperand(1); 1005 return true; 1006 } else if (N.getOpcode() == ISD::OR) { 1007 if (isIntS16Immediate(N.getOperand(1), imm)) 1008 return false; // r+i can fold it if we can. 1009 1010 // If this is an or of disjoint bitfields, we can codegen this as an add 1011 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1012 // disjoint. 1013 APInt LHSKnownZero, LHSKnownOne; 1014 APInt RHSKnownZero, RHSKnownOne; 1015 DAG.ComputeMaskedBits(N.getOperand(0), 1016 LHSKnownZero, LHSKnownOne); 1017 1018 if (LHSKnownZero.getBoolValue()) { 1019 DAG.ComputeMaskedBits(N.getOperand(1), 1020 RHSKnownZero, RHSKnownOne); 1021 // If all of the bits are known zero on the LHS or RHS, the add won't 1022 // carry. 1023 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1024 Base = N.getOperand(0); 1025 Index = N.getOperand(1); 1026 return true; 1027 } 1028 } 1029 } 1030 1031 return false; 1032 } 1033 1034 // If we happen to be doing an i64 load or store into a stack slot that has 1035 // less than a 4-byte alignment, then the frame-index elimination may need to 1036 // use an indexed load or store instruction (because the offset may not be a 1037 // multiple of 4). The extra register needed to hold the offset comes from the 1038 // register scavenger, and it is possible that the scavenger will need to use 1039 // an emergency spill slot. As a result, we need to make sure that a spill slot 1040 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1041 // stack slot. 1042 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1043 // FIXME: This does not handle the LWA case. 1044 if (VT != MVT::i64) 1045 return; 1046 1047 // NOTE: We'll exclude negative FIs here, which come from argument 1048 // lowering, because there are no known test cases triggering this problem 1049 // using packed structures (or similar). We can remove this exclusion if 1050 // we find such a test case. The reason why this is so test-case driven is 1051 // because this entire 'fixup' is only to prevent crashes (from the 1052 // register scavenger) on not-really-valid inputs. For example, if we have: 1053 // %a = alloca i1 1054 // %b = bitcast i1* %a to i64* 1055 // store i64* a, i64 b 1056 // then the store should really be marked as 'align 1', but is not. If it 1057 // were marked as 'align 1' then the indexed form would have been 1058 // instruction-selected initially, and the problem this 'fixup' is preventing 1059 // won't happen regardless. 1060 if (FrameIdx < 0) 1061 return; 1062 1063 MachineFunction &MF = DAG.getMachineFunction(); 1064 MachineFrameInfo *MFI = MF.getFrameInfo(); 1065 1066 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1067 if (Align >= 4) 1068 return; 1069 1070 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1071 FuncInfo->setHasNonRISpills(); 1072 } 1073 1074 /// Returns true if the address N can be represented by a base register plus 1075 /// a signed 16-bit displacement [r+imm], and if it is not better 1076 /// represented as reg+reg. If Aligned is true, only accept displacements 1077 /// suitable for STD and friends, i.e. multiples of 4. 1078 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1079 SDValue &Base, 1080 SelectionDAG &DAG, 1081 bool Aligned) const { 1082 // FIXME dl should come from parent load or store, not from address 1083 SDLoc dl(N); 1084 // If this can be more profitably realized as r+r, fail. 1085 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1086 return false; 1087 1088 if (N.getOpcode() == ISD::ADD) { 1089 short imm = 0; 1090 if (isIntS16Immediate(N.getOperand(1), imm) && 1091 (!Aligned || (imm & 3) == 0)) { 1092 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1093 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1094 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1095 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1096 } else { 1097 Base = N.getOperand(0); 1098 } 1099 return true; // [r+i] 1100 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1101 // Match LOAD (ADD (X, Lo(G))). 1102 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1103 && "Cannot handle constant offsets yet!"); 1104 Disp = N.getOperand(1).getOperand(0); // The global address. 1105 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1106 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1107 Disp.getOpcode() == ISD::TargetConstantPool || 1108 Disp.getOpcode() == ISD::TargetJumpTable); 1109 Base = N.getOperand(0); 1110 return true; // [&g+r] 1111 } 1112 } else if (N.getOpcode() == ISD::OR) { 1113 short imm = 0; 1114 if (isIntS16Immediate(N.getOperand(1), imm) && 1115 (!Aligned || (imm & 3) == 0)) { 1116 // If this is an or of disjoint bitfields, we can codegen this as an add 1117 // (for better address arithmetic) if the LHS and RHS of the OR are 1118 // provably disjoint. 1119 APInt LHSKnownZero, LHSKnownOne; 1120 DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1121 1122 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1123 // If all of the bits are known zero on the LHS or RHS, the add won't 1124 // carry. 1125 Base = N.getOperand(0); 1126 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1127 return true; 1128 } 1129 } 1130 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1131 // Loading from a constant address. 1132 1133 // If this address fits entirely in a 16-bit sext immediate field, codegen 1134 // this as "d, 0" 1135 short Imm; 1136 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1137 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 1138 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1139 CN->getValueType(0)); 1140 return true; 1141 } 1142 1143 // Handle 32-bit sext immediates with LIS + addr mode. 1144 if ((CN->getValueType(0) == MVT::i32 || 1145 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1146 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1147 int Addr = (int)CN->getZExtValue(); 1148 1149 // Otherwise, break this down into an LIS + disp. 1150 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 1151 1152 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 1153 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1154 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1155 return true; 1156 } 1157 } 1158 1159 Disp = DAG.getTargetConstant(0, getPointerTy()); 1160 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1161 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1162 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1163 } else 1164 Base = N; 1165 return true; // [r+0] 1166 } 1167 1168 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1169 /// represented as an indexed [r+r] operation. 1170 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1171 SDValue &Index, 1172 SelectionDAG &DAG) const { 1173 // Check to see if we can easily represent this as an [r+r] address. This 1174 // will fail if it thinks that the address is more profitably represented as 1175 // reg+imm, e.g. where imm = 0. 1176 if (SelectAddressRegReg(N, Base, Index, DAG)) 1177 return true; 1178 1179 // If the operand is an addition, always emit this as [r+r], since this is 1180 // better (for code size, and execution, as the memop does the add for free) 1181 // than emitting an explicit add. 1182 if (N.getOpcode() == ISD::ADD) { 1183 Base = N.getOperand(0); 1184 Index = N.getOperand(1); 1185 return true; 1186 } 1187 1188 // Otherwise, do it the hard way, using R0 as the base register. 1189 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1190 N.getValueType()); 1191 Index = N; 1192 return true; 1193 } 1194 1195 /// getPreIndexedAddressParts - returns true by value, base pointer and 1196 /// offset pointer and addressing mode by reference if the node's address 1197 /// can be legally represented as pre-indexed load / store address. 1198 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1199 SDValue &Offset, 1200 ISD::MemIndexedMode &AM, 1201 SelectionDAG &DAG) const { 1202 if (DisablePPCPreinc) return false; 1203 1204 bool isLoad = true; 1205 SDValue Ptr; 1206 EVT VT; 1207 unsigned Alignment; 1208 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1209 Ptr = LD->getBasePtr(); 1210 VT = LD->getMemoryVT(); 1211 Alignment = LD->getAlignment(); 1212 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1213 Ptr = ST->getBasePtr(); 1214 VT = ST->getMemoryVT(); 1215 Alignment = ST->getAlignment(); 1216 isLoad = false; 1217 } else 1218 return false; 1219 1220 // PowerPC doesn't have preinc load/store instructions for vectors. 1221 if (VT.isVector()) 1222 return false; 1223 1224 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1225 1226 // Common code will reject creating a pre-inc form if the base pointer 1227 // is a frame index, or if N is a store and the base pointer is either 1228 // the same as or a predecessor of the value being stored. Check for 1229 // those situations here, and try with swapped Base/Offset instead. 1230 bool Swap = false; 1231 1232 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1233 Swap = true; 1234 else if (!isLoad) { 1235 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1236 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1237 Swap = true; 1238 } 1239 1240 if (Swap) 1241 std::swap(Base, Offset); 1242 1243 AM = ISD::PRE_INC; 1244 return true; 1245 } 1246 1247 // LDU/STU can only handle immediates that are a multiple of 4. 1248 if (VT != MVT::i64) { 1249 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1250 return false; 1251 } else { 1252 // LDU/STU need an address with at least 4-byte alignment. 1253 if (Alignment < 4) 1254 return false; 1255 1256 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1257 return false; 1258 } 1259 1260 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1261 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1262 // sext i32 to i64 when addr mode is r+i. 1263 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1264 LD->getExtensionType() == ISD::SEXTLOAD && 1265 isa<ConstantSDNode>(Offset)) 1266 return false; 1267 } 1268 1269 AM = ISD::PRE_INC; 1270 return true; 1271 } 1272 1273 //===----------------------------------------------------------------------===// 1274 // LowerOperation implementation 1275 //===----------------------------------------------------------------------===// 1276 1277 /// GetLabelAccessInfo - Return true if we should reference labels using a 1278 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1279 static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, 1280 unsigned &LoOpFlags, const GlobalValue *GV = 0) { 1281 HiOpFlags = PPCII::MO_HA; 1282 LoOpFlags = PPCII::MO_LO; 1283 1284 // Don't use the pic base if not in PIC relocation model. Or if we are on a 1285 // non-darwin platform. We don't support PIC on other platforms yet. 1286 bool isPIC = TM.getRelocationModel() == Reloc::PIC_ && 1287 TM.getSubtarget<PPCSubtarget>().isDarwin(); 1288 if (isPIC) { 1289 HiOpFlags |= PPCII::MO_PIC_FLAG; 1290 LoOpFlags |= PPCII::MO_PIC_FLAG; 1291 } 1292 1293 // If this is a reference to a global value that requires a non-lazy-ptr, make 1294 // sure that instruction lowering adds it. 1295 if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) { 1296 HiOpFlags |= PPCII::MO_NLP_FLAG; 1297 LoOpFlags |= PPCII::MO_NLP_FLAG; 1298 1299 if (GV->hasHiddenVisibility()) { 1300 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1301 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1302 } 1303 } 1304 1305 return isPIC; 1306 } 1307 1308 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1309 SelectionDAG &DAG) { 1310 EVT PtrVT = HiPart.getValueType(); 1311 SDValue Zero = DAG.getConstant(0, PtrVT); 1312 SDLoc DL(HiPart); 1313 1314 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1315 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1316 1317 // With PIC, the first instruction is actually "GR+hi(&G)". 1318 if (isPIC) 1319 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1320 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1321 1322 // Generate non-pic code that has direct accesses to the constant pool. 1323 // The address of the global is just (hi(&g)+lo(&g)). 1324 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1325 } 1326 1327 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1328 SelectionDAG &DAG) const { 1329 EVT PtrVT = Op.getValueType(); 1330 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1331 const Constant *C = CP->getConstVal(); 1332 1333 // 64-bit SVR4 ABI code is always position-independent. 1334 // The actual address of the GlobalValue is stored in the TOC. 1335 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1336 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 1337 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA, 1338 DAG.getRegister(PPC::X2, MVT::i64)); 1339 } 1340 1341 unsigned MOHiFlag, MOLoFlag; 1342 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1343 SDValue CPIHi = 1344 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 1345 SDValue CPILo = 1346 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 1347 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 1348 } 1349 1350 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1351 EVT PtrVT = Op.getValueType(); 1352 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1353 1354 // 64-bit SVR4 ABI code is always position-independent. 1355 // The actual address of the GlobalValue is stored in the TOC. 1356 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1357 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1358 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA, 1359 DAG.getRegister(PPC::X2, MVT::i64)); 1360 } 1361 1362 unsigned MOHiFlag, MOLoFlag; 1363 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1364 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 1365 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 1366 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 1367 } 1368 1369 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1370 SelectionDAG &DAG) const { 1371 EVT PtrVT = Op.getValueType(); 1372 1373 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1374 1375 unsigned MOHiFlag, MOLoFlag; 1376 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1377 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 1378 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 1379 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 1380 } 1381 1382 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1383 SelectionDAG &DAG) const { 1384 1385 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1386 SDLoc dl(GA); 1387 const GlobalValue *GV = GA->getGlobal(); 1388 EVT PtrVT = getPointerTy(); 1389 bool is64bit = PPCSubTarget.isPPC64(); 1390 1391 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 1392 1393 if (Model == TLSModel::LocalExec) { 1394 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1395 PPCII::MO_TPREL_HA); 1396 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1397 PPCII::MO_TPREL_LO); 1398 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 1399 is64bit ? MVT::i64 : MVT::i32); 1400 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 1401 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 1402 } 1403 1404 if (!is64bit) 1405 llvm_unreachable("only local-exec is currently supported for ppc32"); 1406 1407 if (Model == TLSModel::InitialExec) { 1408 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1409 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1410 PPCII::MO_TLS); 1411 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1412 SDValue TPOffsetHi = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 1413 PtrVT, GOTReg, TGA); 1414 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 1415 PtrVT, TGA, TPOffsetHi); 1416 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 1417 } 1418 1419 if (Model == TLSModel::GeneralDynamic) { 1420 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1421 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1422 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 1423 GOTReg, TGA); 1424 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT, 1425 GOTEntryHi, TGA); 1426 1427 // We need a chain node, and don't have one handy. The underlying 1428 // call has no side effects, so using the function entry node 1429 // suffices. 1430 SDValue Chain = DAG.getEntryNode(); 1431 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1432 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1433 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLS_ADDR, dl, 1434 PtrVT, ParmReg, TGA); 1435 // The return value from GET_TLS_ADDR really is in X3 already, but 1436 // some hacks are needed here to tie everything together. The extra 1437 // copies dissolve during subsequent transforms. 1438 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1439 return DAG.getCopyFromReg(Chain, dl, PPC::X3, PtrVT); 1440 } 1441 1442 if (Model == TLSModel::LocalDynamic) { 1443 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1444 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1445 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 1446 GOTReg, TGA); 1447 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT, 1448 GOTEntryHi, TGA); 1449 1450 // We need a chain node, and don't have one handy. The underlying 1451 // call has no side effects, so using the function entry node 1452 // suffices. 1453 SDValue Chain = DAG.getEntryNode(); 1454 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1455 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1456 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLSLD_ADDR, dl, 1457 PtrVT, ParmReg, TGA); 1458 // The return value from GET_TLSLD_ADDR really is in X3 already, but 1459 // some hacks are needed here to tie everything together. The extra 1460 // copies dissolve during subsequent transforms. 1461 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1462 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT, 1463 Chain, ParmReg, TGA); 1464 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 1465 } 1466 1467 llvm_unreachable("Unknown TLS model!"); 1468 } 1469 1470 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1471 SelectionDAG &DAG) const { 1472 EVT PtrVT = Op.getValueType(); 1473 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1474 SDLoc DL(GSDN); 1475 const GlobalValue *GV = GSDN->getGlobal(); 1476 1477 // 64-bit SVR4 ABI code is always position-independent. 1478 // The actual address of the GlobalValue is stored in the TOC. 1479 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1480 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 1481 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA, 1482 DAG.getRegister(PPC::X2, MVT::i64)); 1483 } 1484 1485 unsigned MOHiFlag, MOLoFlag; 1486 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV); 1487 1488 SDValue GAHi = 1489 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 1490 SDValue GALo = 1491 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 1492 1493 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 1494 1495 // If the global reference is actually to a non-lazy-pointer, we have to do an 1496 // extra load to get the address of the global. 1497 if (MOHiFlag & PPCII::MO_NLP_FLAG) 1498 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 1499 false, false, false, 0); 1500 return Ptr; 1501 } 1502 1503 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1504 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1505 SDLoc dl(Op); 1506 1507 // If we're comparing for equality to zero, expose the fact that this is 1508 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1509 // fold the new nodes. 1510 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1511 if (C->isNullValue() && CC == ISD::SETEQ) { 1512 EVT VT = Op.getOperand(0).getValueType(); 1513 SDValue Zext = Op.getOperand(0); 1514 if (VT.bitsLT(MVT::i32)) { 1515 VT = MVT::i32; 1516 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1517 } 1518 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1519 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1520 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1521 DAG.getConstant(Log2b, MVT::i32)); 1522 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1523 } 1524 // Leave comparisons against 0 and -1 alone for now, since they're usually 1525 // optimized. FIXME: revisit this when we can custom lower all setcc 1526 // optimizations. 1527 if (C->isAllOnesValue() || C->isNullValue()) 1528 return SDValue(); 1529 } 1530 1531 // If we have an integer seteq/setne, turn it into a compare against zero 1532 // by xor'ing the rhs with the lhs, which is faster than setting a 1533 // condition register, reading it back out, and masking the correct bit. The 1534 // normal approach here uses sub to do this instead of xor. Using xor exposes 1535 // the result to other bit-twiddling opportunities. 1536 EVT LHSVT = Op.getOperand(0).getValueType(); 1537 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1538 EVT VT = Op.getValueType(); 1539 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1540 Op.getOperand(1)); 1541 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1542 } 1543 return SDValue(); 1544 } 1545 1546 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1547 const PPCSubtarget &Subtarget) const { 1548 SDNode *Node = Op.getNode(); 1549 EVT VT = Node->getValueType(0); 1550 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1551 SDValue InChain = Node->getOperand(0); 1552 SDValue VAListPtr = Node->getOperand(1); 1553 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1554 SDLoc dl(Node); 1555 1556 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 1557 1558 // gpr_index 1559 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1560 VAListPtr, MachinePointerInfo(SV), MVT::i8, 1561 false, false, 0); 1562 InChain = GprIndex.getValue(1); 1563 1564 if (VT == MVT::i64) { 1565 // Check if GprIndex is even 1566 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 1567 DAG.getConstant(1, MVT::i32)); 1568 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 1569 DAG.getConstant(0, MVT::i32), ISD::SETNE); 1570 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 1571 DAG.getConstant(1, MVT::i32)); 1572 // Align GprIndex to be even if it isn't 1573 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 1574 GprIndex); 1575 } 1576 1577 // fpr index is 1 byte after gpr 1578 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1579 DAG.getConstant(1, MVT::i32)); 1580 1581 // fpr 1582 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1583 FprPtr, MachinePointerInfo(SV), MVT::i8, 1584 false, false, 0); 1585 InChain = FprIndex.getValue(1); 1586 1587 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1588 DAG.getConstant(8, MVT::i32)); 1589 1590 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1591 DAG.getConstant(4, MVT::i32)); 1592 1593 // areas 1594 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 1595 MachinePointerInfo(), false, false, 1596 false, 0); 1597 InChain = OverflowArea.getValue(1); 1598 1599 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 1600 MachinePointerInfo(), false, false, 1601 false, 0); 1602 InChain = RegSaveArea.getValue(1); 1603 1604 // select overflow_area if index > 8 1605 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 1606 DAG.getConstant(8, MVT::i32), ISD::SETLT); 1607 1608 // adjustment constant gpr_index * 4/8 1609 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 1610 VT.isInteger() ? GprIndex : FprIndex, 1611 DAG.getConstant(VT.isInteger() ? 4 : 8, 1612 MVT::i32)); 1613 1614 // OurReg = RegSaveArea + RegConstant 1615 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 1616 RegConstant); 1617 1618 // Floating types are 32 bytes into RegSaveArea 1619 if (VT.isFloatingPoint()) 1620 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 1621 DAG.getConstant(32, MVT::i32)); 1622 1623 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 1624 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 1625 VT.isInteger() ? GprIndex : FprIndex, 1626 DAG.getConstant(VT == MVT::i64 ? 2 : 1, 1627 MVT::i32)); 1628 1629 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 1630 VT.isInteger() ? VAListPtr : FprPtr, 1631 MachinePointerInfo(SV), 1632 MVT::i8, false, false, 0); 1633 1634 // determine if we should load from reg_save_area or overflow_area 1635 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 1636 1637 // increase overflow_area by 4/8 if gpr/fpr > 8 1638 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 1639 DAG.getConstant(VT.isInteger() ? 4 : 8, 1640 MVT::i32)); 1641 1642 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 1643 OverflowAreaPlusN); 1644 1645 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 1646 OverflowAreaPtr, 1647 MachinePointerInfo(), 1648 MVT::i32, false, false, 0); 1649 1650 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 1651 false, false, false, 0); 1652 } 1653 1654 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 1655 const PPCSubtarget &Subtarget) const { 1656 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 1657 1658 // We have to copy the entire va_list struct: 1659 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 1660 return DAG.getMemcpy(Op.getOperand(0), Op, 1661 Op.getOperand(1), Op.getOperand(2), 1662 DAG.getConstant(12, MVT::i32), 8, false, true, 1663 MachinePointerInfo(), MachinePointerInfo()); 1664 } 1665 1666 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 1667 SelectionDAG &DAG) const { 1668 return Op.getOperand(0); 1669 } 1670 1671 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 1672 SelectionDAG &DAG) const { 1673 SDValue Chain = Op.getOperand(0); 1674 SDValue Trmp = Op.getOperand(1); // trampoline 1675 SDValue FPtr = Op.getOperand(2); // nested function 1676 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 1677 SDLoc dl(Op); 1678 1679 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1680 bool isPPC64 = (PtrVT == MVT::i64); 1681 Type *IntPtrTy = 1682 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType( 1683 *DAG.getContext()); 1684 1685 TargetLowering::ArgListTy Args; 1686 TargetLowering::ArgListEntry Entry; 1687 1688 Entry.Ty = IntPtrTy; 1689 Entry.Node = Trmp; Args.push_back(Entry); 1690 1691 // TrampSize == (isPPC64 ? 48 : 40); 1692 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 1693 isPPC64 ? MVT::i64 : MVT::i32); 1694 Args.push_back(Entry); 1695 1696 Entry.Node = FPtr; Args.push_back(Entry); 1697 Entry.Node = Nest; Args.push_back(Entry); 1698 1699 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 1700 TargetLowering::CallLoweringInfo CLI(Chain, 1701 Type::getVoidTy(*DAG.getContext()), 1702 false, false, false, false, 0, 1703 CallingConv::C, 1704 /*isTailCall=*/false, 1705 /*doesNotRet=*/false, 1706 /*isReturnValueUsed=*/true, 1707 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 1708 Args, DAG, dl); 1709 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 1710 1711 return CallResult.second; 1712 } 1713 1714 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1715 const PPCSubtarget &Subtarget) const { 1716 MachineFunction &MF = DAG.getMachineFunction(); 1717 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1718 1719 SDLoc dl(Op); 1720 1721 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 1722 // vastart just stores the address of the VarArgsFrameIndex slot into the 1723 // memory location argument. 1724 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1725 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1726 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1727 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 1728 MachinePointerInfo(SV), 1729 false, false, 0); 1730 } 1731 1732 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 1733 // We suppose the given va_list is already allocated. 1734 // 1735 // typedef struct { 1736 // char gpr; /* index into the array of 8 GPRs 1737 // * stored in the register save area 1738 // * gpr=0 corresponds to r3, 1739 // * gpr=1 to r4, etc. 1740 // */ 1741 // char fpr; /* index into the array of 8 FPRs 1742 // * stored in the register save area 1743 // * fpr=0 corresponds to f1, 1744 // * fpr=1 to f2, etc. 1745 // */ 1746 // char *overflow_arg_area; 1747 // /* location on stack that holds 1748 // * the next overflow argument 1749 // */ 1750 // char *reg_save_area; 1751 // /* where r3:r10 and f1:f8 (if saved) 1752 // * are stored 1753 // */ 1754 // } va_list[1]; 1755 1756 1757 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32); 1758 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32); 1759 1760 1761 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1762 1763 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 1764 PtrVT); 1765 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1766 PtrVT); 1767 1768 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 1769 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1770 1771 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 1772 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1773 1774 uint64_t FPROffset = 1; 1775 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1776 1777 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1778 1779 // Store first byte : number of int regs 1780 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 1781 Op.getOperand(1), 1782 MachinePointerInfo(SV), 1783 MVT::i8, false, false, 0); 1784 uint64_t nextOffset = FPROffset; 1785 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 1786 ConstFPROffset); 1787 1788 // Store second byte : number of float regs 1789 SDValue secondStore = 1790 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 1791 MachinePointerInfo(SV, nextOffset), MVT::i8, 1792 false, false, 0); 1793 nextOffset += StackOffset; 1794 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 1795 1796 // Store second word : arguments given on stack 1797 SDValue thirdStore = 1798 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 1799 MachinePointerInfo(SV, nextOffset), 1800 false, false, 0); 1801 nextOffset += FrameOffset; 1802 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 1803 1804 // Store third word : arguments given in registers 1805 return DAG.getStore(thirdStore, dl, FR, nextPtr, 1806 MachinePointerInfo(SV, nextOffset), 1807 false, false, 0); 1808 1809 } 1810 1811 #include "PPCGenCallingConv.inc" 1812 1813 // Function whose sole purpose is to kill compiler warnings 1814 // stemming from unused functions included from PPCGenCallingConv.inc. 1815 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 1816 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 1817 } 1818 1819 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 1820 CCValAssign::LocInfo &LocInfo, 1821 ISD::ArgFlagsTy &ArgFlags, 1822 CCState &State) { 1823 return true; 1824 } 1825 1826 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 1827 MVT &LocVT, 1828 CCValAssign::LocInfo &LocInfo, 1829 ISD::ArgFlagsTy &ArgFlags, 1830 CCState &State) { 1831 static const uint16_t ArgRegs[] = { 1832 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1833 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1834 }; 1835 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1836 1837 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1838 1839 // Skip one register if the first unallocated register has an even register 1840 // number and there are still argument registers available which have not been 1841 // allocated yet. RegNum is actually an index into ArgRegs, which means we 1842 // need to skip a register if RegNum is odd. 1843 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 1844 State.AllocateReg(ArgRegs[RegNum]); 1845 } 1846 1847 // Always return false here, as this function only makes sure that the first 1848 // unallocated register has an odd register number and does not actually 1849 // allocate a register for the current argument. 1850 return false; 1851 } 1852 1853 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 1854 MVT &LocVT, 1855 CCValAssign::LocInfo &LocInfo, 1856 ISD::ArgFlagsTy &ArgFlags, 1857 CCState &State) { 1858 static const uint16_t ArgRegs[] = { 1859 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1860 PPC::F8 1861 }; 1862 1863 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1864 1865 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1866 1867 // If there is only one Floating-point register left we need to put both f64 1868 // values of a split ppc_fp128 value on the stack. 1869 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 1870 State.AllocateReg(ArgRegs[RegNum]); 1871 } 1872 1873 // Always return false here, as this function only makes sure that the two f64 1874 // values a ppc_fp128 value is split into are both passed in registers or both 1875 // passed on the stack and does not actually allocate a register for the 1876 // current argument. 1877 return false; 1878 } 1879 1880 /// GetFPR - Get the set of FP registers that should be allocated for arguments, 1881 /// on Darwin. 1882 static const uint16_t *GetFPR() { 1883 static const uint16_t FPR[] = { 1884 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1885 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1886 }; 1887 1888 return FPR; 1889 } 1890 1891 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 1892 /// the stack. 1893 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 1894 unsigned PtrByteSize) { 1895 unsigned ArgSize = ArgVT.getSizeInBits()/8; 1896 if (Flags.isByVal()) 1897 ArgSize = Flags.getByValSize(); 1898 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1899 1900 return ArgSize; 1901 } 1902 1903 SDValue 1904 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 1905 CallingConv::ID CallConv, bool isVarArg, 1906 const SmallVectorImpl<ISD::InputArg> 1907 &Ins, 1908 SDLoc dl, SelectionDAG &DAG, 1909 SmallVectorImpl<SDValue> &InVals) 1910 const { 1911 if (PPCSubTarget.isSVR4ABI()) { 1912 if (PPCSubTarget.isPPC64()) 1913 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 1914 dl, DAG, InVals); 1915 else 1916 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 1917 dl, DAG, InVals); 1918 } else { 1919 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 1920 dl, DAG, InVals); 1921 } 1922 } 1923 1924 SDValue 1925 PPCTargetLowering::LowerFormalArguments_32SVR4( 1926 SDValue Chain, 1927 CallingConv::ID CallConv, bool isVarArg, 1928 const SmallVectorImpl<ISD::InputArg> 1929 &Ins, 1930 SDLoc dl, SelectionDAG &DAG, 1931 SmallVectorImpl<SDValue> &InVals) const { 1932 1933 // 32-bit SVR4 ABI Stack Frame Layout: 1934 // +-----------------------------------+ 1935 // +--> | Back chain | 1936 // | +-----------------------------------+ 1937 // | | Floating-point register save area | 1938 // | +-----------------------------------+ 1939 // | | General register save area | 1940 // | +-----------------------------------+ 1941 // | | CR save word | 1942 // | +-----------------------------------+ 1943 // | | VRSAVE save word | 1944 // | +-----------------------------------+ 1945 // | | Alignment padding | 1946 // | +-----------------------------------+ 1947 // | | Vector register save area | 1948 // | +-----------------------------------+ 1949 // | | Local variable space | 1950 // | +-----------------------------------+ 1951 // | | Parameter list area | 1952 // | +-----------------------------------+ 1953 // | | LR save word | 1954 // | +-----------------------------------+ 1955 // SP--> +--- | Back chain | 1956 // +-----------------------------------+ 1957 // 1958 // Specifications: 1959 // System V Application Binary Interface PowerPC Processor Supplement 1960 // AltiVec Technology Programming Interface Manual 1961 1962 MachineFunction &MF = DAG.getMachineFunction(); 1963 MachineFrameInfo *MFI = MF.getFrameInfo(); 1964 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1965 1966 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1967 // Potential tail calls could cause overwriting of argument stack slots. 1968 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 1969 (CallConv == CallingConv::Fast)); 1970 unsigned PtrByteSize = 4; 1971 1972 // Assign locations to all of the incoming arguments. 1973 SmallVector<CCValAssign, 16> ArgLocs; 1974 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1975 getTargetMachine(), ArgLocs, *DAG.getContext()); 1976 1977 // Reserve space for the linkage area on the stack. 1978 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 1979 1980 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 1981 1982 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1983 CCValAssign &VA = ArgLocs[i]; 1984 1985 // Arguments stored in registers. 1986 if (VA.isRegLoc()) { 1987 const TargetRegisterClass *RC; 1988 EVT ValVT = VA.getValVT(); 1989 1990 switch (ValVT.getSimpleVT().SimpleTy) { 1991 default: 1992 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 1993 case MVT::i32: 1994 RC = &PPC::GPRCRegClass; 1995 break; 1996 case MVT::f32: 1997 RC = &PPC::F4RCRegClass; 1998 break; 1999 case MVT::f64: 2000 RC = &PPC::F8RCRegClass; 2001 break; 2002 case MVT::v16i8: 2003 case MVT::v8i16: 2004 case MVT::v4i32: 2005 case MVT::v4f32: 2006 RC = &PPC::VRRCRegClass; 2007 break; 2008 } 2009 2010 // Transform the arguments stored in physical registers into virtual ones. 2011 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2012 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT); 2013 2014 InVals.push_back(ArgValue); 2015 } else { 2016 // Argument stored in memory. 2017 assert(VA.isMemLoc()); 2018 2019 unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; 2020 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2021 isImmutable); 2022 2023 // Create load nodes to retrieve arguments from the stack. 2024 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2025 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2026 MachinePointerInfo(), 2027 false, false, false, 0)); 2028 } 2029 } 2030 2031 // Assign locations to all of the incoming aggregate by value arguments. 2032 // Aggregates passed by value are stored in the local variable space of the 2033 // caller's stack frame, right above the parameter list area. 2034 SmallVector<CCValAssign, 16> ByValArgLocs; 2035 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2036 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 2037 2038 // Reserve stack space for the allocations in CCInfo. 2039 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2040 2041 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2042 2043 // Area that is at least reserved in the caller of this function. 2044 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2045 2046 // Set the size that is at least reserved in caller of this function. Tail 2047 // call optimized function's reserved stack space needs to be aligned so that 2048 // taking the difference between two stack areas will result in an aligned 2049 // stack. 2050 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2051 2052 MinReservedArea = 2053 std::max(MinReservedArea, 2054 PPCFrameLowering::getMinCallFrameSize(false, false)); 2055 2056 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2057 getStackAlignment(); 2058 unsigned AlignMask = TargetAlign-1; 2059 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2060 2061 FI->setMinReservedArea(MinReservedArea); 2062 2063 SmallVector<SDValue, 8> MemOps; 2064 2065 // If the function takes variable number of arguments, make a frame index for 2066 // the start of the first vararg value... for expansion of llvm.va_start. 2067 if (isVarArg) { 2068 static const uint16_t GPArgRegs[] = { 2069 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2070 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2071 }; 2072 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2073 2074 static const uint16_t FPArgRegs[] = { 2075 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2076 PPC::F8 2077 }; 2078 const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2079 2080 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, 2081 NumGPArgRegs)); 2082 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, 2083 NumFPArgRegs)); 2084 2085 // Make room for NumGPArgRegs and NumFPArgRegs. 2086 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2087 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8; 2088 2089 FuncInfo->setVarArgsStackOffset( 2090 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2091 CCInfo.getNextStackOffset(), true)); 2092 2093 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2094 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2095 2096 // The fixed integer arguments of a variadic function are stored to the 2097 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2098 // the result of va_next. 2099 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2100 // Get an existing live-in vreg, or add a new one. 2101 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2102 if (!VReg) 2103 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2104 2105 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2106 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2107 MachinePointerInfo(), false, false, 0); 2108 MemOps.push_back(Store); 2109 // Increment the address by four for the next argument to store 2110 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2111 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2112 } 2113 2114 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2115 // is set. 2116 // The double arguments are stored to the VarArgsFrameIndex 2117 // on the stack. 2118 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2119 // Get an existing live-in vreg, or add a new one. 2120 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2121 if (!VReg) 2122 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2123 2124 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2125 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2126 MachinePointerInfo(), false, false, 0); 2127 MemOps.push_back(Store); 2128 // Increment the address by eight for the next argument to store 2129 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 2130 PtrVT); 2131 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2132 } 2133 } 2134 2135 if (!MemOps.empty()) 2136 Chain = DAG.getNode(ISD::TokenFactor, dl, 2137 MVT::Other, &MemOps[0], MemOps.size()); 2138 2139 return Chain; 2140 } 2141 2142 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2143 // value to MVT::i64 and then truncate to the correct register size. 2144 SDValue 2145 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 2146 SelectionDAG &DAG, SDValue ArgVal, 2147 SDLoc dl) const { 2148 if (Flags.isSExt()) 2149 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 2150 DAG.getValueType(ObjectVT)); 2151 else if (Flags.isZExt()) 2152 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 2153 DAG.getValueType(ObjectVT)); 2154 2155 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 2156 } 2157 2158 // Set the size that is at least reserved in caller of this function. Tail 2159 // call optimized functions' reserved stack space needs to be aligned so that 2160 // taking the difference between two stack areas will result in an aligned 2161 // stack. 2162 void 2163 PPCTargetLowering::setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, 2164 unsigned nAltivecParamsAtEnd, 2165 unsigned MinReservedArea, 2166 bool isPPC64) const { 2167 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2168 // Add the Altivec parameters at the end, if needed. 2169 if (nAltivecParamsAtEnd) { 2170 MinReservedArea = ((MinReservedArea+15)/16)*16; 2171 MinReservedArea += 16*nAltivecParamsAtEnd; 2172 } 2173 MinReservedArea = 2174 std::max(MinReservedArea, 2175 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2176 unsigned TargetAlign 2177 = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2178 getStackAlignment(); 2179 unsigned AlignMask = TargetAlign-1; 2180 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2181 FI->setMinReservedArea(MinReservedArea); 2182 } 2183 2184 SDValue 2185 PPCTargetLowering::LowerFormalArguments_64SVR4( 2186 SDValue Chain, 2187 CallingConv::ID CallConv, bool isVarArg, 2188 const SmallVectorImpl<ISD::InputArg> 2189 &Ins, 2190 SDLoc dl, SelectionDAG &DAG, 2191 SmallVectorImpl<SDValue> &InVals) const { 2192 // TODO: add description of PPC stack frame format, or at least some docs. 2193 // 2194 MachineFunction &MF = DAG.getMachineFunction(); 2195 MachineFrameInfo *MFI = MF.getFrameInfo(); 2196 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2197 2198 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2199 // Potential tail calls could cause overwriting of argument stack slots. 2200 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2201 (CallConv == CallingConv::Fast)); 2202 unsigned PtrByteSize = 8; 2203 2204 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 2205 // Area that is at least reserved in caller of this function. 2206 unsigned MinReservedArea = ArgOffset; 2207 2208 static const uint16_t GPR[] = { 2209 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2210 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2211 }; 2212 2213 static const uint16_t *FPR = GetFPR(); 2214 2215 static const uint16_t VR[] = { 2216 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2217 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2218 }; 2219 2220 const unsigned Num_GPR_Regs = array_lengthof(GPR); 2221 const unsigned Num_FPR_Regs = 13; 2222 const unsigned Num_VR_Regs = array_lengthof(VR); 2223 2224 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2225 2226 // Add DAG nodes to load the arguments or copy them out of registers. On 2227 // entry to a function on PPC, the arguments start after the linkage area, 2228 // although the first ones are often in registers. 2229 2230 SmallVector<SDValue, 8> MemOps; 2231 unsigned nAltivecParamsAtEnd = 0; 2232 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2233 unsigned CurArgIdx = 0; 2234 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2235 SDValue ArgVal; 2236 bool needsLoad = false; 2237 EVT ObjectVT = Ins[ArgNo].VT; 2238 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2239 unsigned ArgSize = ObjSize; 2240 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2241 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2242 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2243 2244 unsigned CurArgOffset = ArgOffset; 2245 2246 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2247 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2248 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2249 if (isVarArg) { 2250 MinReservedArea = ((MinReservedArea+15)/16)*16; 2251 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2252 Flags, 2253 PtrByteSize); 2254 } else 2255 nAltivecParamsAtEnd++; 2256 } else 2257 // Calculate min reserved area. 2258 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2259 Flags, 2260 PtrByteSize); 2261 2262 // FIXME the codegen can be much improved in some cases. 2263 // We do not have to keep everything in memory. 2264 if (Flags.isByVal()) { 2265 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2266 ObjSize = Flags.getByValSize(); 2267 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2268 // Empty aggregate parameters do not take up registers. Examples: 2269 // struct { } a; 2270 // union { } b; 2271 // int c[0]; 2272 // etc. However, we have to provide a place-holder in InVals, so 2273 // pretend we have an 8-byte item at the current address for that 2274 // purpose. 2275 if (!ObjSize) { 2276 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2277 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2278 InVals.push_back(FIN); 2279 continue; 2280 } 2281 // All aggregates smaller than 8 bytes must be passed right-justified. 2282 if (ObjSize < PtrByteSize) 2283 CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize); 2284 // The value of the object is its address. 2285 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2286 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2287 InVals.push_back(FIN); 2288 2289 if (ObjSize < 8) { 2290 if (GPR_idx != Num_GPR_Regs) { 2291 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2292 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2293 SDValue Store; 2294 2295 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 2296 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 2297 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 2298 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2299 MachinePointerInfo(FuncArg, CurArgOffset), 2300 ObjType, false, false, 0); 2301 } else { 2302 // For sizes that don't fit a truncating store (3, 5, 6, 7), 2303 // store the whole register as-is to the parameter save area 2304 // slot. The address of the parameter was already calculated 2305 // above (InVals.push_back(FIN)) to be the right-justified 2306 // offset within the slot. For this store, we need a new 2307 // frame index that points at the beginning of the slot. 2308 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2309 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2310 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2311 MachinePointerInfo(FuncArg, ArgOffset), 2312 false, false, 0); 2313 } 2314 2315 MemOps.push_back(Store); 2316 ++GPR_idx; 2317 } 2318 // Whether we copied from a register or not, advance the offset 2319 // into the parameter save area by a full doubleword. 2320 ArgOffset += PtrByteSize; 2321 continue; 2322 } 2323 2324 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2325 // Store whatever pieces of the object are in registers 2326 // to memory. ArgOffset will be the address of the beginning 2327 // of the object. 2328 if (GPR_idx != Num_GPR_Regs) { 2329 unsigned VReg; 2330 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2331 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2332 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2333 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2334 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2335 MachinePointerInfo(FuncArg, ArgOffset), 2336 false, false, 0); 2337 MemOps.push_back(Store); 2338 ++GPR_idx; 2339 ArgOffset += PtrByteSize; 2340 } else { 2341 ArgOffset += ArgSize - j; 2342 break; 2343 } 2344 } 2345 continue; 2346 } 2347 2348 switch (ObjectVT.getSimpleVT().SimpleTy) { 2349 default: llvm_unreachable("Unhandled argument type!"); 2350 case MVT::i32: 2351 case MVT::i64: 2352 if (GPR_idx != Num_GPR_Regs) { 2353 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2354 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2355 2356 if (ObjectVT == MVT::i32) 2357 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2358 // value to MVT::i64 and then truncate to the correct register size. 2359 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2360 2361 ++GPR_idx; 2362 } else { 2363 needsLoad = true; 2364 ArgSize = PtrByteSize; 2365 } 2366 ArgOffset += 8; 2367 break; 2368 2369 case MVT::f32: 2370 case MVT::f64: 2371 // Every 8 bytes of argument space consumes one of the GPRs available for 2372 // argument passing. 2373 if (GPR_idx != Num_GPR_Regs) { 2374 ++GPR_idx; 2375 } 2376 if (FPR_idx != Num_FPR_Regs) { 2377 unsigned VReg; 2378 2379 if (ObjectVT == MVT::f32) 2380 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2381 else 2382 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2383 2384 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2385 ++FPR_idx; 2386 } else { 2387 needsLoad = true; 2388 ArgSize = PtrByteSize; 2389 } 2390 2391 ArgOffset += 8; 2392 break; 2393 case MVT::v4f32: 2394 case MVT::v4i32: 2395 case MVT::v8i16: 2396 case MVT::v16i8: 2397 // Note that vector arguments in registers don't reserve stack space, 2398 // except in varargs functions. 2399 if (VR_idx != Num_VR_Regs) { 2400 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2401 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2402 if (isVarArg) { 2403 while ((ArgOffset % 16) != 0) { 2404 ArgOffset += PtrByteSize; 2405 if (GPR_idx != Num_GPR_Regs) 2406 GPR_idx++; 2407 } 2408 ArgOffset += 16; 2409 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2410 } 2411 ++VR_idx; 2412 } else { 2413 // Vectors are aligned. 2414 ArgOffset = ((ArgOffset+15)/16)*16; 2415 CurArgOffset = ArgOffset; 2416 ArgOffset += 16; 2417 needsLoad = true; 2418 } 2419 break; 2420 } 2421 2422 // We need to load the argument to a virtual register if we determined 2423 // above that we ran out of physical registers of the appropriate type. 2424 if (needsLoad) { 2425 int FI = MFI->CreateFixedObject(ObjSize, 2426 CurArgOffset + (ArgSize - ObjSize), 2427 isImmutable); 2428 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2429 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2430 false, false, false, 0); 2431 } 2432 2433 InVals.push_back(ArgVal); 2434 } 2435 2436 // Set the size that is at least reserved in caller of this function. Tail 2437 // call optimized functions' reserved stack space needs to be aligned so that 2438 // taking the difference between two stack areas will result in an aligned 2439 // stack. 2440 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, true); 2441 2442 // If the function takes variable number of arguments, make a frame index for 2443 // the start of the first vararg value... for expansion of llvm.va_start. 2444 if (isVarArg) { 2445 int Depth = ArgOffset; 2446 2447 FuncInfo->setVarArgsFrameIndex( 2448 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 2449 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2450 2451 // If this function is vararg, store any remaining integer argument regs 2452 // to their spots on the stack so that they may be loaded by deferencing the 2453 // result of va_next. 2454 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2455 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2456 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2457 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2458 MachinePointerInfo(), false, false, 0); 2459 MemOps.push_back(Store); 2460 // Increment the address by four for the next argument to store 2461 SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT); 2462 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2463 } 2464 } 2465 2466 if (!MemOps.empty()) 2467 Chain = DAG.getNode(ISD::TokenFactor, dl, 2468 MVT::Other, &MemOps[0], MemOps.size()); 2469 2470 return Chain; 2471 } 2472 2473 SDValue 2474 PPCTargetLowering::LowerFormalArguments_Darwin( 2475 SDValue Chain, 2476 CallingConv::ID CallConv, bool isVarArg, 2477 const SmallVectorImpl<ISD::InputArg> 2478 &Ins, 2479 SDLoc dl, SelectionDAG &DAG, 2480 SmallVectorImpl<SDValue> &InVals) const { 2481 // TODO: add description of PPC stack frame format, or at least some docs. 2482 // 2483 MachineFunction &MF = DAG.getMachineFunction(); 2484 MachineFrameInfo *MFI = MF.getFrameInfo(); 2485 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2486 2487 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2488 bool isPPC64 = PtrVT == MVT::i64; 2489 // Potential tail calls could cause overwriting of argument stack slots. 2490 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2491 (CallConv == CallingConv::Fast)); 2492 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2493 2494 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 2495 // Area that is at least reserved in caller of this function. 2496 unsigned MinReservedArea = ArgOffset; 2497 2498 static const uint16_t GPR_32[] = { // 32-bit registers. 2499 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2500 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2501 }; 2502 static const uint16_t GPR_64[] = { // 64-bit registers. 2503 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2504 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2505 }; 2506 2507 static const uint16_t *FPR = GetFPR(); 2508 2509 static const uint16_t VR[] = { 2510 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2511 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2512 }; 2513 2514 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 2515 const unsigned Num_FPR_Regs = 13; 2516 const unsigned Num_VR_Regs = array_lengthof( VR); 2517 2518 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2519 2520 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 2521 2522 // In 32-bit non-varargs functions, the stack space for vectors is after the 2523 // stack space for non-vectors. We do not use this space unless we have 2524 // too many vectors to fit in registers, something that only occurs in 2525 // constructed examples:), but we have to walk the arglist to figure 2526 // that out...for the pathological case, compute VecArgOffset as the 2527 // start of the vector parameter area. Computing VecArgOffset is the 2528 // entire point of the following loop. 2529 unsigned VecArgOffset = ArgOffset; 2530 if (!isVarArg && !isPPC64) { 2531 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 2532 ++ArgNo) { 2533 EVT ObjectVT = Ins[ArgNo].VT; 2534 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2535 2536 if (Flags.isByVal()) { 2537 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 2538 unsigned ObjSize = Flags.getByValSize(); 2539 unsigned ArgSize = 2540 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2541 VecArgOffset += ArgSize; 2542 continue; 2543 } 2544 2545 switch(ObjectVT.getSimpleVT().SimpleTy) { 2546 default: llvm_unreachable("Unhandled argument type!"); 2547 case MVT::i32: 2548 case MVT::f32: 2549 VecArgOffset += 4; 2550 break; 2551 case MVT::i64: // PPC64 2552 case MVT::f64: 2553 // FIXME: We are guaranteed to be !isPPC64 at this point. 2554 // Does MVT::i64 apply? 2555 VecArgOffset += 8; 2556 break; 2557 case MVT::v4f32: 2558 case MVT::v4i32: 2559 case MVT::v8i16: 2560 case MVT::v16i8: 2561 // Nothing to do, we're only looking at Nonvector args here. 2562 break; 2563 } 2564 } 2565 } 2566 // We've found where the vector parameter area in memory is. Skip the 2567 // first 12 parameters; these don't use that memory. 2568 VecArgOffset = ((VecArgOffset+15)/16)*16; 2569 VecArgOffset += 12*16; 2570 2571 // Add DAG nodes to load the arguments or copy them out of registers. On 2572 // entry to a function on PPC, the arguments start after the linkage area, 2573 // although the first ones are often in registers. 2574 2575 SmallVector<SDValue, 8> MemOps; 2576 unsigned nAltivecParamsAtEnd = 0; 2577 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2578 unsigned CurArgIdx = 0; 2579 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2580 SDValue ArgVal; 2581 bool needsLoad = false; 2582 EVT ObjectVT = Ins[ArgNo].VT; 2583 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2584 unsigned ArgSize = ObjSize; 2585 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2586 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2587 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2588 2589 unsigned CurArgOffset = ArgOffset; 2590 2591 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2592 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2593 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2594 if (isVarArg || isPPC64) { 2595 MinReservedArea = ((MinReservedArea+15)/16)*16; 2596 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2597 Flags, 2598 PtrByteSize); 2599 } else nAltivecParamsAtEnd++; 2600 } else 2601 // Calculate min reserved area. 2602 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2603 Flags, 2604 PtrByteSize); 2605 2606 // FIXME the codegen can be much improved in some cases. 2607 // We do not have to keep everything in memory. 2608 if (Flags.isByVal()) { 2609 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2610 ObjSize = Flags.getByValSize(); 2611 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2612 // Objects of size 1 and 2 are right justified, everything else is 2613 // left justified. This means the memory address is adjusted forwards. 2614 if (ObjSize==1 || ObjSize==2) { 2615 CurArgOffset = CurArgOffset + (4 - ObjSize); 2616 } 2617 // The value of the object is its address. 2618 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2619 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2620 InVals.push_back(FIN); 2621 if (ObjSize==1 || ObjSize==2) { 2622 if (GPR_idx != Num_GPR_Regs) { 2623 unsigned VReg; 2624 if (isPPC64) 2625 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2626 else 2627 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2628 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2629 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 2630 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2631 MachinePointerInfo(FuncArg, 2632 CurArgOffset), 2633 ObjType, false, false, 0); 2634 MemOps.push_back(Store); 2635 ++GPR_idx; 2636 } 2637 2638 ArgOffset += PtrByteSize; 2639 2640 continue; 2641 } 2642 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2643 // Store whatever pieces of the object are in registers 2644 // to memory. ArgOffset will be the address of the beginning 2645 // of the object. 2646 if (GPR_idx != Num_GPR_Regs) { 2647 unsigned VReg; 2648 if (isPPC64) 2649 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2650 else 2651 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2652 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2653 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2654 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2655 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2656 MachinePointerInfo(FuncArg, ArgOffset), 2657 false, false, 0); 2658 MemOps.push_back(Store); 2659 ++GPR_idx; 2660 ArgOffset += PtrByteSize; 2661 } else { 2662 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 2663 break; 2664 } 2665 } 2666 continue; 2667 } 2668 2669 switch (ObjectVT.getSimpleVT().SimpleTy) { 2670 default: llvm_unreachable("Unhandled argument type!"); 2671 case MVT::i32: 2672 if (!isPPC64) { 2673 if (GPR_idx != Num_GPR_Regs) { 2674 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2675 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2676 ++GPR_idx; 2677 } else { 2678 needsLoad = true; 2679 ArgSize = PtrByteSize; 2680 } 2681 // All int arguments reserve stack space in the Darwin ABI. 2682 ArgOffset += PtrByteSize; 2683 break; 2684 } 2685 // FALLTHROUGH 2686 case MVT::i64: // PPC64 2687 if (GPR_idx != Num_GPR_Regs) { 2688 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2689 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2690 2691 if (ObjectVT == MVT::i32) 2692 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2693 // value to MVT::i64 and then truncate to the correct register size. 2694 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2695 2696 ++GPR_idx; 2697 } else { 2698 needsLoad = true; 2699 ArgSize = PtrByteSize; 2700 } 2701 // All int arguments reserve stack space in the Darwin ABI. 2702 ArgOffset += 8; 2703 break; 2704 2705 case MVT::f32: 2706 case MVT::f64: 2707 // Every 4 bytes of argument space consumes one of the GPRs available for 2708 // argument passing. 2709 if (GPR_idx != Num_GPR_Regs) { 2710 ++GPR_idx; 2711 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 2712 ++GPR_idx; 2713 } 2714 if (FPR_idx != Num_FPR_Regs) { 2715 unsigned VReg; 2716 2717 if (ObjectVT == MVT::f32) 2718 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2719 else 2720 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2721 2722 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2723 ++FPR_idx; 2724 } else { 2725 needsLoad = true; 2726 } 2727 2728 // All FP arguments reserve stack space in the Darwin ABI. 2729 ArgOffset += isPPC64 ? 8 : ObjSize; 2730 break; 2731 case MVT::v4f32: 2732 case MVT::v4i32: 2733 case MVT::v8i16: 2734 case MVT::v16i8: 2735 // Note that vector arguments in registers don't reserve stack space, 2736 // except in varargs functions. 2737 if (VR_idx != Num_VR_Regs) { 2738 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2739 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2740 if (isVarArg) { 2741 while ((ArgOffset % 16) != 0) { 2742 ArgOffset += PtrByteSize; 2743 if (GPR_idx != Num_GPR_Regs) 2744 GPR_idx++; 2745 } 2746 ArgOffset += 16; 2747 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2748 } 2749 ++VR_idx; 2750 } else { 2751 if (!isVarArg && !isPPC64) { 2752 // Vectors go after all the nonvectors. 2753 CurArgOffset = VecArgOffset; 2754 VecArgOffset += 16; 2755 } else { 2756 // Vectors are aligned. 2757 ArgOffset = ((ArgOffset+15)/16)*16; 2758 CurArgOffset = ArgOffset; 2759 ArgOffset += 16; 2760 } 2761 needsLoad = true; 2762 } 2763 break; 2764 } 2765 2766 // We need to load the argument to a virtual register if we determined above 2767 // that we ran out of physical registers of the appropriate type. 2768 if (needsLoad) { 2769 int FI = MFI->CreateFixedObject(ObjSize, 2770 CurArgOffset + (ArgSize - ObjSize), 2771 isImmutable); 2772 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2773 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2774 false, false, false, 0); 2775 } 2776 2777 InVals.push_back(ArgVal); 2778 } 2779 2780 // Set the size that is at least reserved in caller of this function. Tail 2781 // call optimized functions' reserved stack space needs to be aligned so that 2782 // taking the difference between two stack areas will result in an aligned 2783 // stack. 2784 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, isPPC64); 2785 2786 // If the function takes variable number of arguments, make a frame index for 2787 // the start of the first vararg value... for expansion of llvm.va_start. 2788 if (isVarArg) { 2789 int Depth = ArgOffset; 2790 2791 FuncInfo->setVarArgsFrameIndex( 2792 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2793 Depth, true)); 2794 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2795 2796 // If this function is vararg, store any remaining integer argument regs 2797 // to their spots on the stack so that they may be loaded by deferencing the 2798 // result of va_next. 2799 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2800 unsigned VReg; 2801 2802 if (isPPC64) 2803 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2804 else 2805 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2806 2807 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2808 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2809 MachinePointerInfo(), false, false, 0); 2810 MemOps.push_back(Store); 2811 // Increment the address by four for the next argument to store 2812 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2813 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2814 } 2815 } 2816 2817 if (!MemOps.empty()) 2818 Chain = DAG.getNode(ISD::TokenFactor, dl, 2819 MVT::Other, &MemOps[0], MemOps.size()); 2820 2821 return Chain; 2822 } 2823 2824 /// CalculateParameterAndLinkageAreaSize - Get the size of the parameter plus 2825 /// linkage area for the Darwin ABI, or the 64-bit SVR4 ABI. 2826 static unsigned 2827 CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, 2828 bool isPPC64, 2829 bool isVarArg, 2830 unsigned CC, 2831 const SmallVectorImpl<ISD::OutputArg> 2832 &Outs, 2833 const SmallVectorImpl<SDValue> &OutVals, 2834 unsigned &nAltivecParamsAtEnd) { 2835 // Count how many bytes are to be pushed on the stack, including the linkage 2836 // area, and parameter passing area. We start with 24/48 bytes, which is 2837 // prereserved space for [SP][CR][LR][3 x unused]. 2838 unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true); 2839 unsigned NumOps = Outs.size(); 2840 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2841 2842 // Add up all the space actually used. 2843 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 2844 // they all go in registers, but we must reserve stack space for them for 2845 // possible use by the caller. In varargs or 64-bit calls, parameters are 2846 // assigned stack space in order, with padding so Altivec parameters are 2847 // 16-byte aligned. 2848 nAltivecParamsAtEnd = 0; 2849 for (unsigned i = 0; i != NumOps; ++i) { 2850 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2851 EVT ArgVT = Outs[i].VT; 2852 // Varargs Altivec parameters are padded to a 16 byte boundary. 2853 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 2854 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 2855 if (!isVarArg && !isPPC64) { 2856 // Non-varargs Altivec parameters go after all the non-Altivec 2857 // parameters; handle those later so we know how much padding we need. 2858 nAltivecParamsAtEnd++; 2859 continue; 2860 } 2861 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 2862 NumBytes = ((NumBytes+15)/16)*16; 2863 } 2864 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2865 } 2866 2867 // Allow for Altivec parameters at the end, if needed. 2868 if (nAltivecParamsAtEnd) { 2869 NumBytes = ((NumBytes+15)/16)*16; 2870 NumBytes += 16*nAltivecParamsAtEnd; 2871 } 2872 2873 // The prolog code of the callee may store up to 8 GPR argument registers to 2874 // the stack, allowing va_start to index over them in memory if its varargs. 2875 // Because we cannot tell if this is needed on the caller side, we have to 2876 // conservatively assume that it is needed. As such, make sure we have at 2877 // least enough stack space for the caller to store the 8 GPRs. 2878 NumBytes = std::max(NumBytes, 2879 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2880 2881 // Tail call needs the stack to be aligned. 2882 if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){ 2883 unsigned TargetAlign = DAG.getMachineFunction().getTarget(). 2884 getFrameLowering()->getStackAlignment(); 2885 unsigned AlignMask = TargetAlign-1; 2886 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2887 } 2888 2889 return NumBytes; 2890 } 2891 2892 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 2893 /// adjusted to accommodate the arguments for the tailcall. 2894 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 2895 unsigned ParamSize) { 2896 2897 if (!isTailCall) return 0; 2898 2899 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 2900 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 2901 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 2902 // Remember only if the new adjustement is bigger. 2903 if (SPDiff < FI->getTailCallSPDelta()) 2904 FI->setTailCallSPDelta(SPDiff); 2905 2906 return SPDiff; 2907 } 2908 2909 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2910 /// for tail call optimization. Targets which want to do tail call 2911 /// optimization should implement this function. 2912 bool 2913 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2914 CallingConv::ID CalleeCC, 2915 bool isVarArg, 2916 const SmallVectorImpl<ISD::InputArg> &Ins, 2917 SelectionDAG& DAG) const { 2918 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 2919 return false; 2920 2921 // Variable argument functions are not supported. 2922 if (isVarArg) 2923 return false; 2924 2925 MachineFunction &MF = DAG.getMachineFunction(); 2926 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 2927 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 2928 // Functions containing by val parameters are not supported. 2929 for (unsigned i = 0; i != Ins.size(); i++) { 2930 ISD::ArgFlagsTy Flags = Ins[i].Flags; 2931 if (Flags.isByVal()) return false; 2932 } 2933 2934 // Non PIC/GOT tail calls are supported. 2935 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 2936 return true; 2937 2938 // At the moment we can only do local tail calls (in same module, hidden 2939 // or protected) if we are generating PIC. 2940 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2941 return G->getGlobal()->hasHiddenVisibility() 2942 || G->getGlobal()->hasProtectedVisibility(); 2943 } 2944 2945 return false; 2946 } 2947 2948 /// isCallCompatibleAddress - Return the immediate to use if the specified 2949 /// 32-bit value is representable in the immediate field of a BxA instruction. 2950 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 2951 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2952 if (!C) return 0; 2953 2954 int Addr = C->getZExtValue(); 2955 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 2956 SignExtend32<26>(Addr) != Addr) 2957 return 0; // Top 6 bits have to be sext of immediate. 2958 2959 return DAG.getConstant((int)C->getZExtValue() >> 2, 2960 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 2961 } 2962 2963 namespace { 2964 2965 struct TailCallArgumentInfo { 2966 SDValue Arg; 2967 SDValue FrameIdxOp; 2968 int FrameIdx; 2969 2970 TailCallArgumentInfo() : FrameIdx(0) {} 2971 }; 2972 2973 } 2974 2975 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 2976 static void 2977 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 2978 SDValue Chain, 2979 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 2980 SmallVectorImpl<SDValue> &MemOpChains, 2981 SDLoc dl) { 2982 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 2983 SDValue Arg = TailCallArgs[i].Arg; 2984 SDValue FIN = TailCallArgs[i].FrameIdxOp; 2985 int FI = TailCallArgs[i].FrameIdx; 2986 // Store relative to framepointer. 2987 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 2988 MachinePointerInfo::getFixedStack(FI), 2989 false, false, 0)); 2990 } 2991 } 2992 2993 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 2994 /// the appropriate stack slot for the tail call optimized function call. 2995 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 2996 MachineFunction &MF, 2997 SDValue Chain, 2998 SDValue OldRetAddr, 2999 SDValue OldFP, 3000 int SPDiff, 3001 bool isPPC64, 3002 bool isDarwinABI, 3003 SDLoc dl) { 3004 if (SPDiff) { 3005 // Calculate the new stack slot for the return address. 3006 int SlotSize = isPPC64 ? 8 : 4; 3007 int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64, 3008 isDarwinABI); 3009 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3010 NewRetAddrLoc, true); 3011 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3012 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 3013 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 3014 MachinePointerInfo::getFixedStack(NewRetAddr), 3015 false, false, 0); 3016 3017 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 3018 // slot as the FP is never overwritten. 3019 if (isDarwinABI) { 3020 int NewFPLoc = 3021 SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI); 3022 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3023 true); 3024 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3025 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 3026 MachinePointerInfo::getFixedStack(NewFPIdx), 3027 false, false, 0); 3028 } 3029 } 3030 return Chain; 3031 } 3032 3033 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3034 /// the position of the argument. 3035 static void 3036 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3037 SDValue Arg, int SPDiff, unsigned ArgOffset, 3038 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3039 int Offset = ArgOffset + SPDiff; 3040 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3041 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3042 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3043 SDValue FIN = DAG.getFrameIndex(FI, VT); 3044 TailCallArgumentInfo Info; 3045 Info.Arg = Arg; 3046 Info.FrameIdxOp = FIN; 3047 Info.FrameIdx = FI; 3048 TailCallArguments.push_back(Info); 3049 } 3050 3051 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3052 /// stack slot. Returns the chain as result and the loaded frame pointers in 3053 /// LROpOut/FPOpout. Used when tail calling. 3054 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3055 int SPDiff, 3056 SDValue Chain, 3057 SDValue &LROpOut, 3058 SDValue &FPOpOut, 3059 bool isDarwinABI, 3060 SDLoc dl) const { 3061 if (SPDiff) { 3062 // Load the LR and FP stack slot for later adjusting. 3063 EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; 3064 LROpOut = getReturnAddrFrameIndex(DAG); 3065 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3066 false, false, false, 0); 3067 Chain = SDValue(LROpOut.getNode(), 1); 3068 3069 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3070 // slot as the FP is never overwritten. 3071 if (isDarwinABI) { 3072 FPOpOut = getFramePointerFrameIndex(DAG); 3073 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 3074 false, false, false, 0); 3075 Chain = SDValue(FPOpOut.getNode(), 1); 3076 } 3077 } 3078 return Chain; 3079 } 3080 3081 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 3082 /// by "Src" to address "Dst" of size "Size". Alignment information is 3083 /// specified by the specific parameter attribute. The copy will be passed as 3084 /// a byval function parameter. 3085 /// Sometimes what we are copying is the end of a larger object, the part that 3086 /// does not fit in registers. 3087 static SDValue 3088 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 3089 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 3090 SDLoc dl) { 3091 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 3092 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 3093 false, false, MachinePointerInfo(0), 3094 MachinePointerInfo(0)); 3095 } 3096 3097 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 3098 /// tail calls. 3099 static void 3100 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 3101 SDValue Arg, SDValue PtrOff, int SPDiff, 3102 unsigned ArgOffset, bool isPPC64, bool isTailCall, 3103 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 3104 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 3105 SDLoc dl) { 3106 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3107 if (!isTailCall) { 3108 if (isVector) { 3109 SDValue StackPtr; 3110 if (isPPC64) 3111 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3112 else 3113 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3114 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3115 DAG.getConstant(ArgOffset, PtrVT)); 3116 } 3117 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3118 MachinePointerInfo(), false, false, 0)); 3119 // Calculate and remember argument location. 3120 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 3121 TailCallArguments); 3122 } 3123 3124 static 3125 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 3126 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 3127 SDValue LROp, SDValue FPOp, bool isDarwinABI, 3128 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 3129 MachineFunction &MF = DAG.getMachineFunction(); 3130 3131 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 3132 // might overwrite each other in case of tail call optimization. 3133 SmallVector<SDValue, 8> MemOpChains2; 3134 // Do not flag preceding copytoreg stuff together with the following stuff. 3135 InFlag = SDValue(); 3136 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 3137 MemOpChains2, dl); 3138 if (!MemOpChains2.empty()) 3139 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3140 &MemOpChains2[0], MemOpChains2.size()); 3141 3142 // Store the return address to the appropriate stack slot. 3143 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 3144 isPPC64, isDarwinABI, dl); 3145 3146 // Emit callseq_end just before tailcall node. 3147 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3148 DAG.getIntPtrConstant(0, true), InFlag, dl); 3149 InFlag = Chain.getValue(1); 3150 } 3151 3152 static 3153 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 3154 SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall, 3155 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 3156 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 3157 const PPCSubtarget &PPCSubTarget) { 3158 3159 bool isPPC64 = PPCSubTarget.isPPC64(); 3160 bool isSVR4ABI = PPCSubTarget.isSVR4ABI(); 3161 3162 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3163 NodeTys.push_back(MVT::Other); // Returns a chain 3164 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 3165 3166 unsigned CallOpc = PPCISD::CALL; 3167 3168 bool needIndirectCall = true; 3169 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 3170 // If this is an absolute destination address, use the munged value. 3171 Callee = SDValue(Dest, 0); 3172 needIndirectCall = false; 3173 } 3174 3175 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3176 // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201 3177 // Use indirect calls for ALL functions calls in JIT mode, since the 3178 // far-call stubs may be outside relocation limits for a BL instruction. 3179 if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) { 3180 unsigned OpFlags = 0; 3181 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3182 (PPCSubTarget.getTargetTriple().isMacOSX() && 3183 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 3184 (G->getGlobal()->isDeclaration() || 3185 G->getGlobal()->isWeakForLinker())) { 3186 // PC-relative references to external symbols should go through $stub, 3187 // unless we're building with the leopard linker or later, which 3188 // automatically synthesizes these stubs. 3189 OpFlags = PPCII::MO_DARWIN_STUB; 3190 } 3191 3192 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 3193 // every direct call is) turn it into a TargetGlobalAddress / 3194 // TargetExternalSymbol node so that legalize doesn't hack it. 3195 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 3196 Callee.getValueType(), 3197 0, OpFlags); 3198 needIndirectCall = false; 3199 } 3200 } 3201 3202 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 3203 unsigned char OpFlags = 0; 3204 3205 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3206 (PPCSubTarget.getTargetTriple().isMacOSX() && 3207 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5))) { 3208 // PC-relative references to external symbols should go through $stub, 3209 // unless we're building with the leopard linker or later, which 3210 // automatically synthesizes these stubs. 3211 OpFlags = PPCII::MO_DARWIN_STUB; 3212 } 3213 3214 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 3215 OpFlags); 3216 needIndirectCall = false; 3217 } 3218 3219 if (needIndirectCall) { 3220 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 3221 // to do the call, we can't use PPCISD::CALL. 3222 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 3223 3224 if (isSVR4ABI && isPPC64) { 3225 // Function pointers in the 64-bit SVR4 ABI do not point to the function 3226 // entry point, but to the function descriptor (the function entry point 3227 // address is part of the function descriptor though). 3228 // The function descriptor is a three doubleword structure with the 3229 // following fields: function entry point, TOC base address and 3230 // environment pointer. 3231 // Thus for a call through a function pointer, the following actions need 3232 // to be performed: 3233 // 1. Save the TOC of the caller in the TOC save area of its stack 3234 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 3235 // 2. Load the address of the function entry point from the function 3236 // descriptor. 3237 // 3. Load the TOC of the callee from the function descriptor into r2. 3238 // 4. Load the environment pointer from the function descriptor into 3239 // r11. 3240 // 5. Branch to the function entry point address. 3241 // 6. On return of the callee, the TOC of the caller needs to be 3242 // restored (this is done in FinishCall()). 3243 // 3244 // All those operations are flagged together to ensure that no other 3245 // operations can be scheduled in between. E.g. without flagging the 3246 // operations together, a TOC access in the caller could be scheduled 3247 // between the load of the callee TOC and the branch to the callee, which 3248 // results in the TOC access going through the TOC of the callee instead 3249 // of going through the TOC of the caller, which leads to incorrect code. 3250 3251 // Load the address of the function entry point from the function 3252 // descriptor. 3253 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue); 3254 SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps, 3255 InFlag.getNode() ? 3 : 2); 3256 Chain = LoadFuncPtr.getValue(1); 3257 InFlag = LoadFuncPtr.getValue(2); 3258 3259 // Load environment pointer into r11. 3260 // Offset of the environment pointer within the function descriptor. 3261 SDValue PtrOff = DAG.getIntPtrConstant(16); 3262 3263 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 3264 SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, 3265 InFlag); 3266 Chain = LoadEnvPtr.getValue(1); 3267 InFlag = LoadEnvPtr.getValue(2); 3268 3269 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 3270 InFlag); 3271 Chain = EnvVal.getValue(0); 3272 InFlag = EnvVal.getValue(1); 3273 3274 // Load TOC of the callee into r2. We are using a target-specific load 3275 // with r2 hard coded, because the result of a target-independent load 3276 // would never go directly into r2, since r2 is a reserved register (which 3277 // prevents the register allocator from allocating it), resulting in an 3278 // additional register being allocated and an unnecessary move instruction 3279 // being generated. 3280 VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3281 SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, 3282 Callee, InFlag); 3283 Chain = LoadTOCPtr.getValue(0); 3284 InFlag = LoadTOCPtr.getValue(1); 3285 3286 MTCTROps[0] = Chain; 3287 MTCTROps[1] = LoadFuncPtr; 3288 MTCTROps[2] = InFlag; 3289 } 3290 3291 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, 3292 2 + (InFlag.getNode() != 0)); 3293 InFlag = Chain.getValue(1); 3294 3295 NodeTys.clear(); 3296 NodeTys.push_back(MVT::Other); 3297 NodeTys.push_back(MVT::Glue); 3298 Ops.push_back(Chain); 3299 CallOpc = PPCISD::BCTRL; 3300 Callee.setNode(0); 3301 // Add use of X11 (holding environment pointer) 3302 if (isSVR4ABI && isPPC64) 3303 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 3304 // Add CTR register as callee so a bctr can be emitted later. 3305 if (isTailCall) 3306 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 3307 } 3308 3309 // If this is a direct call, pass the chain and the callee. 3310 if (Callee.getNode()) { 3311 Ops.push_back(Chain); 3312 Ops.push_back(Callee); 3313 } 3314 // If this is a tail call add stack pointer delta. 3315 if (isTailCall) 3316 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 3317 3318 // Add argument registers to the end of the list so that they are known live 3319 // into the call. 3320 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 3321 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 3322 RegsToPass[i].second.getValueType())); 3323 3324 return CallOpc; 3325 } 3326 3327 static 3328 bool isLocalCall(const SDValue &Callee) 3329 { 3330 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3331 return !G->getGlobal()->isDeclaration() && 3332 !G->getGlobal()->isWeakForLinker(); 3333 return false; 3334 } 3335 3336 SDValue 3337 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 3338 CallingConv::ID CallConv, bool isVarArg, 3339 const SmallVectorImpl<ISD::InputArg> &Ins, 3340 SDLoc dl, SelectionDAG &DAG, 3341 SmallVectorImpl<SDValue> &InVals) const { 3342 3343 SmallVector<CCValAssign, 16> RVLocs; 3344 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3345 getTargetMachine(), RVLocs, *DAG.getContext()); 3346 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 3347 3348 // Copy all of the result registers out of their specified physreg. 3349 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 3350 CCValAssign &VA = RVLocs[i]; 3351 assert(VA.isRegLoc() && "Can only return in registers!"); 3352 3353 SDValue Val = DAG.getCopyFromReg(Chain, dl, 3354 VA.getLocReg(), VA.getLocVT(), InFlag); 3355 Chain = Val.getValue(1); 3356 InFlag = Val.getValue(2); 3357 3358 switch (VA.getLocInfo()) { 3359 default: llvm_unreachable("Unknown loc info!"); 3360 case CCValAssign::Full: break; 3361 case CCValAssign::AExt: 3362 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3363 break; 3364 case CCValAssign::ZExt: 3365 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 3366 DAG.getValueType(VA.getValVT())); 3367 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3368 break; 3369 case CCValAssign::SExt: 3370 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 3371 DAG.getValueType(VA.getValVT())); 3372 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3373 break; 3374 } 3375 3376 InVals.push_back(Val); 3377 } 3378 3379 return Chain; 3380 } 3381 3382 SDValue 3383 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 3384 bool isTailCall, bool isVarArg, 3385 SelectionDAG &DAG, 3386 SmallVector<std::pair<unsigned, SDValue>, 8> 3387 &RegsToPass, 3388 SDValue InFlag, SDValue Chain, 3389 SDValue &Callee, 3390 int SPDiff, unsigned NumBytes, 3391 const SmallVectorImpl<ISD::InputArg> &Ins, 3392 SmallVectorImpl<SDValue> &InVals) const { 3393 std::vector<EVT> NodeTys; 3394 SmallVector<SDValue, 8> Ops; 3395 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 3396 isTailCall, RegsToPass, Ops, NodeTys, 3397 PPCSubTarget); 3398 3399 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 3400 if (isVarArg && PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) 3401 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 3402 3403 // When performing tail call optimization the callee pops its arguments off 3404 // the stack. Account for this here so these bytes can be pushed back on in 3405 // PPCFrameLowering::eliminateCallFramePseudoInstr. 3406 int BytesCalleePops = 3407 (CallConv == CallingConv::Fast && 3408 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 3409 3410 // Add a register mask operand representing the call-preserved registers. 3411 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 3412 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 3413 assert(Mask && "Missing call preserved mask for calling convention"); 3414 Ops.push_back(DAG.getRegisterMask(Mask)); 3415 3416 if (InFlag.getNode()) 3417 Ops.push_back(InFlag); 3418 3419 // Emit tail call. 3420 if (isTailCall) { 3421 assert(((Callee.getOpcode() == ISD::Register && 3422 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 3423 Callee.getOpcode() == ISD::TargetExternalSymbol || 3424 Callee.getOpcode() == ISD::TargetGlobalAddress || 3425 isa<ConstantSDNode>(Callee)) && 3426 "Expecting an global address, external symbol, absolute value or register"); 3427 3428 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size()); 3429 } 3430 3431 // Add a NOP immediately after the branch instruction when using the 64-bit 3432 // SVR4 ABI. At link time, if caller and callee are in a different module and 3433 // thus have a different TOC, the call will be replaced with a call to a stub 3434 // function which saves the current TOC, loads the TOC of the callee and 3435 // branches to the callee. The NOP will be replaced with a load instruction 3436 // which restores the TOC of the caller from the TOC save slot of the current 3437 // stack frame. If caller and callee belong to the same module (and have the 3438 // same TOC), the NOP will remain unchanged. 3439 3440 bool needsTOCRestore = false; 3441 if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { 3442 if (CallOpc == PPCISD::BCTRL) { 3443 // This is a call through a function pointer. 3444 // Restore the caller TOC from the save area into R2. 3445 // See PrepareCall() for more information about calls through function 3446 // pointers in the 64-bit SVR4 ABI. 3447 // We are using a target-specific load with r2 hard coded, because the 3448 // result of a target-independent load would never go directly into r2, 3449 // since r2 is a reserved register (which prevents the register allocator 3450 // from allocating it), resulting in an additional register being 3451 // allocated and an unnecessary move instruction being generated. 3452 needsTOCRestore = true; 3453 } else if ((CallOpc == PPCISD::CALL) && !isLocalCall(Callee)) { 3454 // Otherwise insert NOP for non-local calls. 3455 CallOpc = PPCISD::CALL_NOP; 3456 } 3457 } 3458 3459 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 3460 InFlag = Chain.getValue(1); 3461 3462 if (needsTOCRestore) { 3463 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3464 Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag); 3465 InFlag = Chain.getValue(1); 3466 } 3467 3468 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3469 DAG.getIntPtrConstant(BytesCalleePops, true), 3470 InFlag, dl); 3471 if (!Ins.empty()) 3472 InFlag = Chain.getValue(1); 3473 3474 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 3475 Ins, dl, DAG, InVals); 3476 } 3477 3478 SDValue 3479 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 3480 SmallVectorImpl<SDValue> &InVals) const { 3481 SelectionDAG &DAG = CLI.DAG; 3482 SDLoc &dl = CLI.DL; 3483 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 3484 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 3485 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 3486 SDValue Chain = CLI.Chain; 3487 SDValue Callee = CLI.Callee; 3488 bool &isTailCall = CLI.IsTailCall; 3489 CallingConv::ID CallConv = CLI.CallConv; 3490 bool isVarArg = CLI.IsVarArg; 3491 3492 if (isTailCall) 3493 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 3494 Ins, DAG); 3495 3496 if (PPCSubTarget.isSVR4ABI()) { 3497 if (PPCSubTarget.isPPC64()) 3498 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 3499 isTailCall, Outs, OutVals, Ins, 3500 dl, DAG, InVals); 3501 else 3502 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 3503 isTailCall, Outs, OutVals, Ins, 3504 dl, DAG, InVals); 3505 } 3506 3507 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 3508 isTailCall, Outs, OutVals, Ins, 3509 dl, DAG, InVals); 3510 } 3511 3512 SDValue 3513 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 3514 CallingConv::ID CallConv, bool isVarArg, 3515 bool isTailCall, 3516 const SmallVectorImpl<ISD::OutputArg> &Outs, 3517 const SmallVectorImpl<SDValue> &OutVals, 3518 const SmallVectorImpl<ISD::InputArg> &Ins, 3519 SDLoc dl, SelectionDAG &DAG, 3520 SmallVectorImpl<SDValue> &InVals) const { 3521 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 3522 // of the 32-bit SVR4 ABI stack frame layout. 3523 3524 assert((CallConv == CallingConv::C || 3525 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 3526 3527 unsigned PtrByteSize = 4; 3528 3529 MachineFunction &MF = DAG.getMachineFunction(); 3530 3531 // Mark this function as potentially containing a function that contains a 3532 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3533 // and restoring the callers stack pointer in this functions epilog. This is 3534 // done because by tail calling the called function might overwrite the value 3535 // in this function's (MF) stack pointer stack slot 0(SP). 3536 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3537 CallConv == CallingConv::Fast) 3538 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3539 3540 // Count how many bytes are to be pushed on the stack, including the linkage 3541 // area, parameter list area and the part of the local variable space which 3542 // contains copies of aggregates which are passed by value. 3543 3544 // Assign locations to all of the outgoing arguments. 3545 SmallVector<CCValAssign, 16> ArgLocs; 3546 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3547 getTargetMachine(), ArgLocs, *DAG.getContext()); 3548 3549 // Reserve space for the linkage area on the stack. 3550 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 3551 3552 if (isVarArg) { 3553 // Handle fixed and variable vector arguments differently. 3554 // Fixed vector arguments go into registers as long as registers are 3555 // available. Variable vector arguments always go into memory. 3556 unsigned NumArgs = Outs.size(); 3557 3558 for (unsigned i = 0; i != NumArgs; ++i) { 3559 MVT ArgVT = Outs[i].VT; 3560 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3561 bool Result; 3562 3563 if (Outs[i].IsFixed) { 3564 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 3565 CCInfo); 3566 } else { 3567 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 3568 ArgFlags, CCInfo); 3569 } 3570 3571 if (Result) { 3572 #ifndef NDEBUG 3573 errs() << "Call operand #" << i << " has unhandled type " 3574 << EVT(ArgVT).getEVTString() << "\n"; 3575 #endif 3576 llvm_unreachable(0); 3577 } 3578 } 3579 } else { 3580 // All arguments are treated the same. 3581 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 3582 } 3583 3584 // Assign locations to all of the outgoing aggregate by value arguments. 3585 SmallVector<CCValAssign, 16> ByValArgLocs; 3586 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3587 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 3588 3589 // Reserve stack space for the allocations in CCInfo. 3590 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3591 3592 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 3593 3594 // Size of the linkage area, parameter list area and the part of the local 3595 // space variable where copies of aggregates which are passed by value are 3596 // stored. 3597 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 3598 3599 // Calculate by how many bytes the stack has to be adjusted in case of tail 3600 // call optimization. 3601 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3602 3603 // Adjust the stack pointer for the new arguments... 3604 // These operations are automatically eliminated by the prolog/epilog pass 3605 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 3606 dl); 3607 SDValue CallSeqStart = Chain; 3608 3609 // Load the return address and frame pointer so it can be moved somewhere else 3610 // later. 3611 SDValue LROp, FPOp; 3612 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 3613 dl); 3614 3615 // Set up a copy of the stack pointer for use loading and storing any 3616 // arguments that may not fit in the registers available for argument 3617 // passing. 3618 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3619 3620 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3621 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3622 SmallVector<SDValue, 8> MemOpChains; 3623 3624 bool seenFloatArg = false; 3625 // Walk the register/memloc assignments, inserting copies/loads. 3626 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 3627 i != e; 3628 ++i) { 3629 CCValAssign &VA = ArgLocs[i]; 3630 SDValue Arg = OutVals[i]; 3631 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3632 3633 if (Flags.isByVal()) { 3634 // Argument is an aggregate which is passed by value, thus we need to 3635 // create a copy of it in the local variable space of the current stack 3636 // frame (which is the stack frame of the caller) and pass the address of 3637 // this copy to the callee. 3638 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 3639 CCValAssign &ByValVA = ByValArgLocs[j++]; 3640 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 3641 3642 // Memory reserved in the local variable space of the callers stack frame. 3643 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 3644 3645 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3646 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3647 3648 // Create a copy of the argument in the local area of the current 3649 // stack frame. 3650 SDValue MemcpyCall = 3651 CreateCopyOfByValArgument(Arg, PtrOff, 3652 CallSeqStart.getNode()->getOperand(0), 3653 Flags, DAG, dl); 3654 3655 // This must go outside the CALLSEQ_START..END. 3656 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3657 CallSeqStart.getNode()->getOperand(1), 3658 SDLoc(MemcpyCall)); 3659 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3660 NewCallSeqStart.getNode()); 3661 Chain = CallSeqStart = NewCallSeqStart; 3662 3663 // Pass the address of the aggregate copy on the stack either in a 3664 // physical register or in the parameter list area of the current stack 3665 // frame to the callee. 3666 Arg = PtrOff; 3667 } 3668 3669 if (VA.isRegLoc()) { 3670 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 3671 // Put argument in a physical register. 3672 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 3673 } else { 3674 // Put argument in the parameter list area of the current stack frame. 3675 assert(VA.isMemLoc()); 3676 unsigned LocMemOffset = VA.getLocMemOffset(); 3677 3678 if (!isTailCall) { 3679 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3680 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3681 3682 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3683 MachinePointerInfo(), 3684 false, false, 0)); 3685 } else { 3686 // Calculate and remember argument location. 3687 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 3688 TailCallArguments); 3689 } 3690 } 3691 } 3692 3693 if (!MemOpChains.empty()) 3694 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3695 &MemOpChains[0], MemOpChains.size()); 3696 3697 // Build a sequence of copy-to-reg nodes chained together with token chain 3698 // and flag operands which copy the outgoing args into the appropriate regs. 3699 SDValue InFlag; 3700 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3701 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3702 RegsToPass[i].second, InFlag); 3703 InFlag = Chain.getValue(1); 3704 } 3705 3706 // Set CR bit 6 to true if this is a vararg call with floating args passed in 3707 // registers. 3708 if (isVarArg) { 3709 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3710 SDValue Ops[] = { Chain, InFlag }; 3711 3712 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 3713 dl, VTs, Ops, InFlag.getNode() ? 2 : 1); 3714 3715 InFlag = Chain.getValue(1); 3716 } 3717 3718 if (isTailCall) 3719 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 3720 false, TailCallArguments); 3721 3722 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3723 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3724 Ins, InVals); 3725 } 3726 3727 // Copy an argument into memory, being careful to do this outside the 3728 // call sequence for the call to which the argument belongs. 3729 SDValue 3730 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 3731 SDValue CallSeqStart, 3732 ISD::ArgFlagsTy Flags, 3733 SelectionDAG &DAG, 3734 SDLoc dl) const { 3735 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 3736 CallSeqStart.getNode()->getOperand(0), 3737 Flags, DAG, dl); 3738 // The MEMCPY must go outside the CALLSEQ_START..END. 3739 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3740 CallSeqStart.getNode()->getOperand(1), 3741 SDLoc(MemcpyCall)); 3742 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3743 NewCallSeqStart.getNode()); 3744 return NewCallSeqStart; 3745 } 3746 3747 SDValue 3748 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 3749 CallingConv::ID CallConv, bool isVarArg, 3750 bool isTailCall, 3751 const SmallVectorImpl<ISD::OutputArg> &Outs, 3752 const SmallVectorImpl<SDValue> &OutVals, 3753 const SmallVectorImpl<ISD::InputArg> &Ins, 3754 SDLoc dl, SelectionDAG &DAG, 3755 SmallVectorImpl<SDValue> &InVals) const { 3756 3757 unsigned NumOps = Outs.size(); 3758 3759 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3760 unsigned PtrByteSize = 8; 3761 3762 MachineFunction &MF = DAG.getMachineFunction(); 3763 3764 // Mark this function as potentially containing a function that contains a 3765 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3766 // and restoring the callers stack pointer in this functions epilog. This is 3767 // done because by tail calling the called function might overwrite the value 3768 // in this function's (MF) stack pointer stack slot 0(SP). 3769 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3770 CallConv == CallingConv::Fast) 3771 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3772 3773 unsigned nAltivecParamsAtEnd = 0; 3774 3775 // Count how many bytes are to be pushed on the stack, including the linkage 3776 // area, and parameter passing area. We start with at least 48 bytes, which 3777 // is reserved space for [SP][CR][LR][3 x unused]. 3778 // NOTE: For PPC64, nAltivecParamsAtEnd always remains zero as a result 3779 // of this call. 3780 unsigned NumBytes = 3781 CalculateParameterAndLinkageAreaSize(DAG, true, isVarArg, CallConv, 3782 Outs, OutVals, nAltivecParamsAtEnd); 3783 3784 // Calculate by how many bytes the stack has to be adjusted in case of tail 3785 // call optimization. 3786 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3787 3788 // To protect arguments on the stack from being clobbered in a tail call, 3789 // force all the loads to happen before doing any other lowering. 3790 if (isTailCall) 3791 Chain = DAG.getStackArgumentTokenFactor(Chain); 3792 3793 // Adjust the stack pointer for the new arguments... 3794 // These operations are automatically eliminated by the prolog/epilog pass 3795 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 3796 dl); 3797 SDValue CallSeqStart = Chain; 3798 3799 // Load the return address and frame pointer so it can be move somewhere else 3800 // later. 3801 SDValue LROp, FPOp; 3802 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 3803 dl); 3804 3805 // Set up a copy of the stack pointer for use loading and storing any 3806 // arguments that may not fit in the registers available for argument 3807 // passing. 3808 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3809 3810 // Figure out which arguments are going to go in registers, and which in 3811 // memory. Also, if this is a vararg function, floating point operations 3812 // must be stored to our stack, and loaded into integer regs as well, if 3813 // any integer regs are available for argument passing. 3814 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 3815 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3816 3817 static const uint16_t GPR[] = { 3818 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3819 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3820 }; 3821 static const uint16_t *FPR = GetFPR(); 3822 3823 static const uint16_t VR[] = { 3824 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3825 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3826 }; 3827 const unsigned NumGPRs = array_lengthof(GPR); 3828 const unsigned NumFPRs = 13; 3829 const unsigned NumVRs = array_lengthof(VR); 3830 3831 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3832 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3833 3834 SmallVector<SDValue, 8> MemOpChains; 3835 for (unsigned i = 0; i != NumOps; ++i) { 3836 SDValue Arg = OutVals[i]; 3837 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3838 3839 // PtrOff will be used to store the current argument to the stack if a 3840 // register cannot be found for it. 3841 SDValue PtrOff; 3842 3843 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 3844 3845 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3846 3847 // Promote integers to 64-bit values. 3848 if (Arg.getValueType() == MVT::i32) { 3849 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 3850 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3851 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 3852 } 3853 3854 // FIXME memcpy is used way more than necessary. Correctness first. 3855 // Note: "by value" is code for passing a structure by value, not 3856 // basic types. 3857 if (Flags.isByVal()) { 3858 // Note: Size includes alignment padding, so 3859 // struct x { short a; char b; } 3860 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 3861 // These are the proper values we need for right-justifying the 3862 // aggregate in a parameter register. 3863 unsigned Size = Flags.getByValSize(); 3864 3865 // An empty aggregate parameter takes up no storage and no 3866 // registers. 3867 if (Size == 0) 3868 continue; 3869 3870 // All aggregates smaller than 8 bytes must be passed right-justified. 3871 if (Size==1 || Size==2 || Size==4) { 3872 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 3873 if (GPR_idx != NumGPRs) { 3874 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 3875 MachinePointerInfo(), VT, 3876 false, false, 0); 3877 MemOpChains.push_back(Load.getValue(1)); 3878 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3879 3880 ArgOffset += PtrByteSize; 3881 continue; 3882 } 3883 } 3884 3885 if (GPR_idx == NumGPRs && Size < 8) { 3886 SDValue Const = DAG.getConstant(PtrByteSize - Size, 3887 PtrOff.getValueType()); 3888 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3889 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3890 CallSeqStart, 3891 Flags, DAG, dl); 3892 ArgOffset += PtrByteSize; 3893 continue; 3894 } 3895 // Copy entire object into memory. There are cases where gcc-generated 3896 // code assumes it is there, even if it could be put entirely into 3897 // registers. (This is not what the doc says.) 3898 3899 // FIXME: The above statement is likely due to a misunderstanding of the 3900 // documents. All arguments must be copied into the parameter area BY 3901 // THE CALLEE in the event that the callee takes the address of any 3902 // formal argument. That has not yet been implemented. However, it is 3903 // reasonable to use the stack area as a staging area for the register 3904 // load. 3905 3906 // Skip this for small aggregates, as we will use the same slot for a 3907 // right-justified copy, below. 3908 if (Size >= 8) 3909 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 3910 CallSeqStart, 3911 Flags, DAG, dl); 3912 3913 // When a register is available, pass a small aggregate right-justified. 3914 if (Size < 8 && GPR_idx != NumGPRs) { 3915 // The easiest way to get this right-justified in a register 3916 // is to copy the structure into the rightmost portion of a 3917 // local variable slot, then load the whole slot into the 3918 // register. 3919 // FIXME: The memcpy seems to produce pretty awful code for 3920 // small aggregates, particularly for packed ones. 3921 // FIXME: It would be preferable to use the slot in the 3922 // parameter save area instead of a new local variable. 3923 SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType()); 3924 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3925 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3926 CallSeqStart, 3927 Flags, DAG, dl); 3928 3929 // Load the slot into the register. 3930 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 3931 MachinePointerInfo(), 3932 false, false, false, 0); 3933 MemOpChains.push_back(Load.getValue(1)); 3934 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3935 3936 // Done with this argument. 3937 ArgOffset += PtrByteSize; 3938 continue; 3939 } 3940 3941 // For aggregates larger than PtrByteSize, copy the pieces of the 3942 // object that fit into registers from the parameter save area. 3943 for (unsigned j=0; j<Size; j+=PtrByteSize) { 3944 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 3945 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 3946 if (GPR_idx != NumGPRs) { 3947 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 3948 MachinePointerInfo(), 3949 false, false, false, 0); 3950 MemOpChains.push_back(Load.getValue(1)); 3951 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3952 ArgOffset += PtrByteSize; 3953 } else { 3954 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 3955 break; 3956 } 3957 } 3958 continue; 3959 } 3960 3961 switch (Arg.getSimpleValueType().SimpleTy) { 3962 default: llvm_unreachable("Unexpected ValueType for argument!"); 3963 case MVT::i32: 3964 case MVT::i64: 3965 if (GPR_idx != NumGPRs) { 3966 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 3967 } else { 3968 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3969 true, isTailCall, false, MemOpChains, 3970 TailCallArguments, dl); 3971 } 3972 ArgOffset += PtrByteSize; 3973 break; 3974 case MVT::f32: 3975 case MVT::f64: 3976 if (FPR_idx != NumFPRs) { 3977 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 3978 3979 if (isVarArg) { 3980 // A single float or an aggregate containing only a single float 3981 // must be passed right-justified in the stack doubleword, and 3982 // in the GPR, if one is available. 3983 SDValue StoreOff; 3984 if (Arg.getSimpleValueType().SimpleTy == MVT::f32) { 3985 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3986 StoreOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3987 } else 3988 StoreOff = PtrOff; 3989 3990 SDValue Store = DAG.getStore(Chain, dl, Arg, StoreOff, 3991 MachinePointerInfo(), false, false, 0); 3992 MemOpChains.push_back(Store); 3993 3994 // Float varargs are always shadowed in available integer registers 3995 if (GPR_idx != NumGPRs) { 3996 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 3997 MachinePointerInfo(), false, false, 3998 false, 0); 3999 MemOpChains.push_back(Load.getValue(1)); 4000 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4001 } 4002 } else if (GPR_idx != NumGPRs) 4003 // If we have any FPRs remaining, we may also have GPRs remaining. 4004 ++GPR_idx; 4005 } else { 4006 // Single-precision floating-point values are mapped to the 4007 // second (rightmost) word of the stack doubleword. 4008 if (Arg.getValueType() == MVT::f32) { 4009 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4010 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4011 } 4012 4013 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4014 true, isTailCall, false, MemOpChains, 4015 TailCallArguments, dl); 4016 } 4017 ArgOffset += 8; 4018 break; 4019 case MVT::v4f32: 4020 case MVT::v4i32: 4021 case MVT::v8i16: 4022 case MVT::v16i8: 4023 if (isVarArg) { 4024 // These go aligned on the stack, or in the corresponding R registers 4025 // when within range. The Darwin PPC ABI doc claims they also go in 4026 // V registers; in fact gcc does this only for arguments that are 4027 // prototyped, not for those that match the ... We do it for all 4028 // arguments, seems to work. 4029 while (ArgOffset % 16 !=0) { 4030 ArgOffset += PtrByteSize; 4031 if (GPR_idx != NumGPRs) 4032 GPR_idx++; 4033 } 4034 // We could elide this store in the case where the object fits 4035 // entirely in R registers. Maybe later. 4036 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4037 DAG.getConstant(ArgOffset, PtrVT)); 4038 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4039 MachinePointerInfo(), false, false, 0); 4040 MemOpChains.push_back(Store); 4041 if (VR_idx != NumVRs) { 4042 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4043 MachinePointerInfo(), 4044 false, false, false, 0); 4045 MemOpChains.push_back(Load.getValue(1)); 4046 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4047 } 4048 ArgOffset += 16; 4049 for (unsigned i=0; i<16; i+=PtrByteSize) { 4050 if (GPR_idx == NumGPRs) 4051 break; 4052 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4053 DAG.getConstant(i, PtrVT)); 4054 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4055 false, false, false, 0); 4056 MemOpChains.push_back(Load.getValue(1)); 4057 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4058 } 4059 break; 4060 } 4061 4062 // Non-varargs Altivec params generally go in registers, but have 4063 // stack space allocated at the end. 4064 if (VR_idx != NumVRs) { 4065 // Doesn't have GPR space allocated. 4066 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4067 } else { 4068 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4069 true, isTailCall, true, MemOpChains, 4070 TailCallArguments, dl); 4071 ArgOffset += 16; 4072 } 4073 break; 4074 } 4075 } 4076 4077 if (!MemOpChains.empty()) 4078 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4079 &MemOpChains[0], MemOpChains.size()); 4080 4081 // Check if this is an indirect call (MTCTR/BCTRL). 4082 // See PrepareCall() for more information about calls through function 4083 // pointers in the 64-bit SVR4 ABI. 4084 if (!isTailCall && 4085 !dyn_cast<GlobalAddressSDNode>(Callee) && 4086 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4087 !isBLACompatibleAddress(Callee, DAG)) { 4088 // Load r2 into a virtual register and store it to the TOC save area. 4089 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 4090 // TOC save area offset. 4091 SDValue PtrOff = DAG.getIntPtrConstant(40); 4092 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4093 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), 4094 false, false, 0); 4095 // R12 must contain the address of an indirect callee. This does not 4096 // mean the MTCTR instruction must use R12; it's easier to model this 4097 // as an extra parameter, so do that. 4098 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 4099 } 4100 4101 // Build a sequence of copy-to-reg nodes chained together with token chain 4102 // and flag operands which copy the outgoing args into the appropriate regs. 4103 SDValue InFlag; 4104 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4105 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4106 RegsToPass[i].second, InFlag); 4107 InFlag = Chain.getValue(1); 4108 } 4109 4110 if (isTailCall) 4111 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 4112 FPOp, true, TailCallArguments); 4113 4114 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4115 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4116 Ins, InVals); 4117 } 4118 4119 SDValue 4120 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 4121 CallingConv::ID CallConv, bool isVarArg, 4122 bool isTailCall, 4123 const SmallVectorImpl<ISD::OutputArg> &Outs, 4124 const SmallVectorImpl<SDValue> &OutVals, 4125 const SmallVectorImpl<ISD::InputArg> &Ins, 4126 SDLoc dl, SelectionDAG &DAG, 4127 SmallVectorImpl<SDValue> &InVals) const { 4128 4129 unsigned NumOps = Outs.size(); 4130 4131 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4132 bool isPPC64 = PtrVT == MVT::i64; 4133 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4134 4135 MachineFunction &MF = DAG.getMachineFunction(); 4136 4137 // Mark this function as potentially containing a function that contains a 4138 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4139 // and restoring the callers stack pointer in this functions epilog. This is 4140 // done because by tail calling the called function might overwrite the value 4141 // in this function's (MF) stack pointer stack slot 0(SP). 4142 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4143 CallConv == CallingConv::Fast) 4144 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4145 4146 unsigned nAltivecParamsAtEnd = 0; 4147 4148 // Count how many bytes are to be pushed on the stack, including the linkage 4149 // area, and parameter passing area. We start with 24/48 bytes, which is 4150 // prereserved space for [SP][CR][LR][3 x unused]. 4151 unsigned NumBytes = 4152 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv, 4153 Outs, OutVals, 4154 nAltivecParamsAtEnd); 4155 4156 // Calculate by how many bytes the stack has to be adjusted in case of tail 4157 // call optimization. 4158 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4159 4160 // To protect arguments on the stack from being clobbered in a tail call, 4161 // force all the loads to happen before doing any other lowering. 4162 if (isTailCall) 4163 Chain = DAG.getStackArgumentTokenFactor(Chain); 4164 4165 // Adjust the stack pointer for the new arguments... 4166 // These operations are automatically eliminated by the prolog/epilog pass 4167 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 4168 dl); 4169 SDValue CallSeqStart = Chain; 4170 4171 // Load the return address and frame pointer so it can be move somewhere else 4172 // later. 4173 SDValue LROp, FPOp; 4174 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4175 dl); 4176 4177 // Set up a copy of the stack pointer for use loading and storing any 4178 // arguments that may not fit in the registers available for argument 4179 // passing. 4180 SDValue StackPtr; 4181 if (isPPC64) 4182 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4183 else 4184 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4185 4186 // Figure out which arguments are going to go in registers, and which in 4187 // memory. Also, if this is a vararg function, floating point operations 4188 // must be stored to our stack, and loaded into integer regs as well, if 4189 // any integer regs are available for argument passing. 4190 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 4191 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4192 4193 static const uint16_t GPR_32[] = { // 32-bit registers. 4194 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4195 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4196 }; 4197 static const uint16_t GPR_64[] = { // 64-bit registers. 4198 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4199 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4200 }; 4201 static const uint16_t *FPR = GetFPR(); 4202 4203 static const uint16_t VR[] = { 4204 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4205 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4206 }; 4207 const unsigned NumGPRs = array_lengthof(GPR_32); 4208 const unsigned NumFPRs = 13; 4209 const unsigned NumVRs = array_lengthof(VR); 4210 4211 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 4212 4213 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4214 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4215 4216 SmallVector<SDValue, 8> MemOpChains; 4217 for (unsigned i = 0; i != NumOps; ++i) { 4218 SDValue Arg = OutVals[i]; 4219 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4220 4221 // PtrOff will be used to store the current argument to the stack if a 4222 // register cannot be found for it. 4223 SDValue PtrOff; 4224 4225 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 4226 4227 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4228 4229 // On PPC64, promote integers to 64-bit values. 4230 if (isPPC64 && Arg.getValueType() == MVT::i32) { 4231 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4232 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4233 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4234 } 4235 4236 // FIXME memcpy is used way more than necessary. Correctness first. 4237 // Note: "by value" is code for passing a structure by value, not 4238 // basic types. 4239 if (Flags.isByVal()) { 4240 unsigned Size = Flags.getByValSize(); 4241 // Very small objects are passed right-justified. Everything else is 4242 // passed left-justified. 4243 if (Size==1 || Size==2) { 4244 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 4245 if (GPR_idx != NumGPRs) { 4246 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4247 MachinePointerInfo(), VT, 4248 false, false, 0); 4249 MemOpChains.push_back(Load.getValue(1)); 4250 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4251 4252 ArgOffset += PtrByteSize; 4253 } else { 4254 SDValue Const = DAG.getConstant(PtrByteSize - Size, 4255 PtrOff.getValueType()); 4256 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4257 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4258 CallSeqStart, 4259 Flags, DAG, dl); 4260 ArgOffset += PtrByteSize; 4261 } 4262 continue; 4263 } 4264 // Copy entire object into memory. There are cases where gcc-generated 4265 // code assumes it is there, even if it could be put entirely into 4266 // registers. (This is not what the doc says.) 4267 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4268 CallSeqStart, 4269 Flags, DAG, dl); 4270 4271 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 4272 // copy the pieces of the object that fit into registers from the 4273 // parameter save area. 4274 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4275 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 4276 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4277 if (GPR_idx != NumGPRs) { 4278 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4279 MachinePointerInfo(), 4280 false, false, false, 0); 4281 MemOpChains.push_back(Load.getValue(1)); 4282 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4283 ArgOffset += PtrByteSize; 4284 } else { 4285 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4286 break; 4287 } 4288 } 4289 continue; 4290 } 4291 4292 switch (Arg.getSimpleValueType().SimpleTy) { 4293 default: llvm_unreachable("Unexpected ValueType for argument!"); 4294 case MVT::i32: 4295 case MVT::i64: 4296 if (GPR_idx != NumGPRs) { 4297 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4298 } else { 4299 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4300 isPPC64, isTailCall, false, MemOpChains, 4301 TailCallArguments, dl); 4302 } 4303 ArgOffset += PtrByteSize; 4304 break; 4305 case MVT::f32: 4306 case MVT::f64: 4307 if (FPR_idx != NumFPRs) { 4308 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4309 4310 if (isVarArg) { 4311 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4312 MachinePointerInfo(), false, false, 0); 4313 MemOpChains.push_back(Store); 4314 4315 // Float varargs are always shadowed in available integer registers 4316 if (GPR_idx != NumGPRs) { 4317 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4318 MachinePointerInfo(), false, false, 4319 false, 0); 4320 MemOpChains.push_back(Load.getValue(1)); 4321 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4322 } 4323 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 4324 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4325 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4326 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4327 MachinePointerInfo(), 4328 false, false, false, 0); 4329 MemOpChains.push_back(Load.getValue(1)); 4330 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4331 } 4332 } else { 4333 // If we have any FPRs remaining, we may also have GPRs remaining. 4334 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 4335 // GPRs. 4336 if (GPR_idx != NumGPRs) 4337 ++GPR_idx; 4338 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 4339 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 4340 ++GPR_idx; 4341 } 4342 } else 4343 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4344 isPPC64, isTailCall, false, MemOpChains, 4345 TailCallArguments, dl); 4346 if (isPPC64) 4347 ArgOffset += 8; 4348 else 4349 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 4350 break; 4351 case MVT::v4f32: 4352 case MVT::v4i32: 4353 case MVT::v8i16: 4354 case MVT::v16i8: 4355 if (isVarArg) { 4356 // These go aligned on the stack, or in the corresponding R registers 4357 // when within range. The Darwin PPC ABI doc claims they also go in 4358 // V registers; in fact gcc does this only for arguments that are 4359 // prototyped, not for those that match the ... We do it for all 4360 // arguments, seems to work. 4361 while (ArgOffset % 16 !=0) { 4362 ArgOffset += PtrByteSize; 4363 if (GPR_idx != NumGPRs) 4364 GPR_idx++; 4365 } 4366 // We could elide this store in the case where the object fits 4367 // entirely in R registers. Maybe later. 4368 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4369 DAG.getConstant(ArgOffset, PtrVT)); 4370 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4371 MachinePointerInfo(), false, false, 0); 4372 MemOpChains.push_back(Store); 4373 if (VR_idx != NumVRs) { 4374 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4375 MachinePointerInfo(), 4376 false, false, false, 0); 4377 MemOpChains.push_back(Load.getValue(1)); 4378 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4379 } 4380 ArgOffset += 16; 4381 for (unsigned i=0; i<16; i+=PtrByteSize) { 4382 if (GPR_idx == NumGPRs) 4383 break; 4384 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4385 DAG.getConstant(i, PtrVT)); 4386 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4387 false, false, false, 0); 4388 MemOpChains.push_back(Load.getValue(1)); 4389 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4390 } 4391 break; 4392 } 4393 4394 // Non-varargs Altivec params generally go in registers, but have 4395 // stack space allocated at the end. 4396 if (VR_idx != NumVRs) { 4397 // Doesn't have GPR space allocated. 4398 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4399 } else if (nAltivecParamsAtEnd==0) { 4400 // We are emitting Altivec params in order. 4401 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4402 isPPC64, isTailCall, true, MemOpChains, 4403 TailCallArguments, dl); 4404 ArgOffset += 16; 4405 } 4406 break; 4407 } 4408 } 4409 // If all Altivec parameters fit in registers, as they usually do, 4410 // they get stack space following the non-Altivec parameters. We 4411 // don't track this here because nobody below needs it. 4412 // If there are more Altivec parameters than fit in registers emit 4413 // the stores here. 4414 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 4415 unsigned j = 0; 4416 // Offset is aligned; skip 1st 12 params which go in V registers. 4417 ArgOffset = ((ArgOffset+15)/16)*16; 4418 ArgOffset += 12*16; 4419 for (unsigned i = 0; i != NumOps; ++i) { 4420 SDValue Arg = OutVals[i]; 4421 EVT ArgType = Outs[i].VT; 4422 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 4423 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 4424 if (++j > NumVRs) { 4425 SDValue PtrOff; 4426 // We are emitting Altivec params in order. 4427 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4428 isPPC64, isTailCall, true, MemOpChains, 4429 TailCallArguments, dl); 4430 ArgOffset += 16; 4431 } 4432 } 4433 } 4434 } 4435 4436 if (!MemOpChains.empty()) 4437 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4438 &MemOpChains[0], MemOpChains.size()); 4439 4440 // On Darwin, R12 must contain the address of an indirect callee. This does 4441 // not mean the MTCTR instruction must use R12; it's easier to model this as 4442 // an extra parameter, so do that. 4443 if (!isTailCall && 4444 !dyn_cast<GlobalAddressSDNode>(Callee) && 4445 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4446 !isBLACompatibleAddress(Callee, DAG)) 4447 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 4448 PPC::R12), Callee)); 4449 4450 // Build a sequence of copy-to-reg nodes chained together with token chain 4451 // and flag operands which copy the outgoing args into the appropriate regs. 4452 SDValue InFlag; 4453 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4454 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4455 RegsToPass[i].second, InFlag); 4456 InFlag = Chain.getValue(1); 4457 } 4458 4459 if (isTailCall) 4460 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 4461 FPOp, true, TailCallArguments); 4462 4463 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4464 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4465 Ins, InVals); 4466 } 4467 4468 bool 4469 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 4470 MachineFunction &MF, bool isVarArg, 4471 const SmallVectorImpl<ISD::OutputArg> &Outs, 4472 LLVMContext &Context) const { 4473 SmallVector<CCValAssign, 16> RVLocs; 4474 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 4475 RVLocs, Context); 4476 return CCInfo.CheckReturn(Outs, RetCC_PPC); 4477 } 4478 4479 SDValue 4480 PPCTargetLowering::LowerReturn(SDValue Chain, 4481 CallingConv::ID CallConv, bool isVarArg, 4482 const SmallVectorImpl<ISD::OutputArg> &Outs, 4483 const SmallVectorImpl<SDValue> &OutVals, 4484 SDLoc dl, SelectionDAG &DAG) const { 4485 4486 SmallVector<CCValAssign, 16> RVLocs; 4487 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4488 getTargetMachine(), RVLocs, *DAG.getContext()); 4489 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 4490 4491 SDValue Flag; 4492 SmallVector<SDValue, 4> RetOps(1, Chain); 4493 4494 // Copy the result values into the output registers. 4495 for (unsigned i = 0; i != RVLocs.size(); ++i) { 4496 CCValAssign &VA = RVLocs[i]; 4497 assert(VA.isRegLoc() && "Can only return in registers!"); 4498 4499 SDValue Arg = OutVals[i]; 4500 4501 switch (VA.getLocInfo()) { 4502 default: llvm_unreachable("Unknown loc info!"); 4503 case CCValAssign::Full: break; 4504 case CCValAssign::AExt: 4505 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 4506 break; 4507 case CCValAssign::ZExt: 4508 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 4509 break; 4510 case CCValAssign::SExt: 4511 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 4512 break; 4513 } 4514 4515 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 4516 Flag = Chain.getValue(1); 4517 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 4518 } 4519 4520 RetOps[0] = Chain; // Update chain. 4521 4522 // Add the flag if we have it. 4523 if (Flag.getNode()) 4524 RetOps.push_back(Flag); 4525 4526 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, 4527 &RetOps[0], RetOps.size()); 4528 } 4529 4530 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 4531 const PPCSubtarget &Subtarget) const { 4532 // When we pop the dynamic allocation we need to restore the SP link. 4533 SDLoc dl(Op); 4534 4535 // Get the corect type for pointers. 4536 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4537 4538 // Construct the stack pointer operand. 4539 bool isPPC64 = Subtarget.isPPC64(); 4540 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 4541 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 4542 4543 // Get the operands for the STACKRESTORE. 4544 SDValue Chain = Op.getOperand(0); 4545 SDValue SaveSP = Op.getOperand(1); 4546 4547 // Load the old link SP. 4548 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 4549 MachinePointerInfo(), 4550 false, false, false, 0); 4551 4552 // Restore the stack pointer. 4553 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 4554 4555 // Store the old link SP. 4556 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 4557 false, false, 0); 4558 } 4559 4560 4561 4562 SDValue 4563 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 4564 MachineFunction &MF = DAG.getMachineFunction(); 4565 bool isPPC64 = PPCSubTarget.isPPC64(); 4566 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4567 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4568 4569 // Get current frame pointer save index. The users of this index will be 4570 // primarily DYNALLOC instructions. 4571 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4572 int RASI = FI->getReturnAddrSaveIndex(); 4573 4574 // If the frame pointer save index hasn't been defined yet. 4575 if (!RASI) { 4576 // Find out what the fix offset of the frame pointer save area. 4577 int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); 4578 // Allocate the frame index for frame pointer save area. 4579 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true); 4580 // Save the result. 4581 FI->setReturnAddrSaveIndex(RASI); 4582 } 4583 return DAG.getFrameIndex(RASI, PtrVT); 4584 } 4585 4586 SDValue 4587 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 4588 MachineFunction &MF = DAG.getMachineFunction(); 4589 bool isPPC64 = PPCSubTarget.isPPC64(); 4590 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4591 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4592 4593 // Get current frame pointer save index. The users of this index will be 4594 // primarily DYNALLOC instructions. 4595 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4596 int FPSI = FI->getFramePointerSaveIndex(); 4597 4598 // If the frame pointer save index hasn't been defined yet. 4599 if (!FPSI) { 4600 // Find out what the fix offset of the frame pointer save area. 4601 int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, 4602 isDarwinABI); 4603 4604 // Allocate the frame index for frame pointer save area. 4605 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 4606 // Save the result. 4607 FI->setFramePointerSaveIndex(FPSI); 4608 } 4609 return DAG.getFrameIndex(FPSI, PtrVT); 4610 } 4611 4612 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 4613 SelectionDAG &DAG, 4614 const PPCSubtarget &Subtarget) const { 4615 // Get the inputs. 4616 SDValue Chain = Op.getOperand(0); 4617 SDValue Size = Op.getOperand(1); 4618 SDLoc dl(Op); 4619 4620 // Get the corect type for pointers. 4621 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4622 // Negate the size. 4623 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 4624 DAG.getConstant(0, PtrVT), Size); 4625 // Construct a node for the frame pointer save index. 4626 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 4627 // Build a DYNALLOC node. 4628 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 4629 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 4630 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3); 4631 } 4632 4633 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 4634 SelectionDAG &DAG) const { 4635 SDLoc DL(Op); 4636 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 4637 DAG.getVTList(MVT::i32, MVT::Other), 4638 Op.getOperand(0), Op.getOperand(1)); 4639 } 4640 4641 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 4642 SelectionDAG &DAG) const { 4643 SDLoc DL(Op); 4644 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 4645 Op.getOperand(0), Op.getOperand(1)); 4646 } 4647 4648 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 4649 /// possible. 4650 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 4651 // Not FP? Not a fsel. 4652 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 4653 !Op.getOperand(2).getValueType().isFloatingPoint()) 4654 return Op; 4655 4656 // We might be able to do better than this under some circumstances, but in 4657 // general, fsel-based lowering of select is a finite-math-only optimization. 4658 // For more information, see section F.3 of the 2.06 ISA specification. 4659 if (!DAG.getTarget().Options.NoInfsFPMath || 4660 !DAG.getTarget().Options.NoNaNsFPMath) 4661 return Op; 4662 4663 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4664 4665 EVT ResVT = Op.getValueType(); 4666 EVT CmpVT = Op.getOperand(0).getValueType(); 4667 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4668 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 4669 SDLoc dl(Op); 4670 4671 // If the RHS of the comparison is a 0.0, we don't need to do the 4672 // subtraction at all. 4673 SDValue Sel1; 4674 if (isFloatingPointZero(RHS)) 4675 switch (CC) { 4676 default: break; // SETUO etc aren't handled by fsel. 4677 case ISD::SETNE: 4678 std::swap(TV, FV); 4679 case ISD::SETEQ: 4680 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4681 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4682 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 4683 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 4684 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 4685 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4686 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 4687 case ISD::SETULT: 4688 case ISD::SETLT: 4689 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4690 case ISD::SETOGE: 4691 case ISD::SETGE: 4692 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4693 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4694 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 4695 case ISD::SETUGT: 4696 case ISD::SETGT: 4697 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4698 case ISD::SETOLE: 4699 case ISD::SETLE: 4700 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4701 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4702 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4703 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 4704 } 4705 4706 SDValue Cmp; 4707 switch (CC) { 4708 default: break; // SETUO etc aren't handled by fsel. 4709 case ISD::SETNE: 4710 std::swap(TV, FV); 4711 case ISD::SETEQ: 4712 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4713 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4714 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4715 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4716 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 4717 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 4718 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4719 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 4720 case ISD::SETULT: 4721 case ISD::SETLT: 4722 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4723 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4724 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4725 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4726 case ISD::SETOGE: 4727 case ISD::SETGE: 4728 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4729 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4730 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4731 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4732 case ISD::SETUGT: 4733 case ISD::SETGT: 4734 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4735 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4736 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4737 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4738 case ISD::SETOLE: 4739 case ISD::SETLE: 4740 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4741 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4742 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4743 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4744 } 4745 return Op; 4746 } 4747 4748 // FIXME: Split this code up when LegalizeDAGTypes lands. 4749 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 4750 SDLoc dl) const { 4751 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 4752 SDValue Src = Op.getOperand(0); 4753 if (Src.getValueType() == MVT::f32) 4754 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 4755 4756 SDValue Tmp; 4757 switch (Op.getSimpleValueType().SimpleTy) { 4758 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 4759 case MVT::i32: 4760 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 4761 (PPCSubTarget.hasFPCVT() ? PPCISD::FCTIWUZ : 4762 PPCISD::FCTIDZ), 4763 dl, MVT::f64, Src); 4764 break; 4765 case MVT::i64: 4766 assert((Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()) && 4767 "i64 FP_TO_UINT is supported only with FPCVT"); 4768 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 4769 PPCISD::FCTIDUZ, 4770 dl, MVT::f64, Src); 4771 break; 4772 } 4773 4774 // Convert the FP value to an int value through memory. 4775 bool i32Stack = Op.getValueType() == MVT::i32 && PPCSubTarget.hasSTFIWX() && 4776 (Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()); 4777 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 4778 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 4779 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI); 4780 4781 // Emit a store to the stack slot. 4782 SDValue Chain; 4783 if (i32Stack) { 4784 MachineFunction &MF = DAG.getMachineFunction(); 4785 MachineMemOperand *MMO = 4786 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 4787 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 4788 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 4789 DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), 4790 MVT::i32, MMO); 4791 } else 4792 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 4793 MPI, false, false, 0); 4794 4795 // Result is a load from the stack slot. If loading 4 bytes, make sure to 4796 // add in a bias. 4797 if (Op.getValueType() == MVT::i32 && !i32Stack) { 4798 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 4799 DAG.getConstant(4, FIPtr.getValueType())); 4800 MPI = MachinePointerInfo(); 4801 } 4802 4803 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MPI, 4804 false, false, false, 0); 4805 } 4806 4807 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 4808 SelectionDAG &DAG) const { 4809 SDLoc dl(Op); 4810 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 4811 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 4812 return SDValue(); 4813 4814 assert((Op.getOpcode() == ISD::SINT_TO_FP || PPCSubTarget.hasFPCVT()) && 4815 "UINT_TO_FP is supported only with FPCVT"); 4816 4817 // If we have FCFIDS, then use it when converting to single-precision. 4818 // Otherwise, convert to double-precision and then round. 4819 unsigned FCFOp = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 4820 (Op.getOpcode() == ISD::UINT_TO_FP ? 4821 PPCISD::FCFIDUS : PPCISD::FCFIDS) : 4822 (Op.getOpcode() == ISD::UINT_TO_FP ? 4823 PPCISD::FCFIDU : PPCISD::FCFID); 4824 MVT FCFTy = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 4825 MVT::f32 : MVT::f64; 4826 4827 if (Op.getOperand(0).getValueType() == MVT::i64) { 4828 SDValue SINT = Op.getOperand(0); 4829 // When converting to single-precision, we actually need to convert 4830 // to double-precision first and then round to single-precision. 4831 // To avoid double-rounding effects during that operation, we have 4832 // to prepare the input operand. Bits that might be truncated when 4833 // converting to double-precision are replaced by a bit that won't 4834 // be lost at this stage, but is below the single-precision rounding 4835 // position. 4836 // 4837 // However, if -enable-unsafe-fp-math is in effect, accept double 4838 // rounding to avoid the extra overhead. 4839 if (Op.getValueType() == MVT::f32 && 4840 !PPCSubTarget.hasFPCVT() && 4841 !DAG.getTarget().Options.UnsafeFPMath) { 4842 4843 // Twiddle input to make sure the low 11 bits are zero. (If this 4844 // is the case, we are guaranteed the value will fit into the 53 bit 4845 // mantissa of an IEEE double-precision value without rounding.) 4846 // If any of those low 11 bits were not zero originally, make sure 4847 // bit 12 (value 2048) is set instead, so that the final rounding 4848 // to single-precision gets the correct result. 4849 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4850 SINT, DAG.getConstant(2047, MVT::i64)); 4851 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 4852 Round, DAG.getConstant(2047, MVT::i64)); 4853 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 4854 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4855 Round, DAG.getConstant(-2048, MVT::i64)); 4856 4857 // However, we cannot use that value unconditionally: if the magnitude 4858 // of the input value is small, the bit-twiddling we did above might 4859 // end up visibly changing the output. Fortunately, in that case, we 4860 // don't need to twiddle bits since the original input will convert 4861 // exactly to double-precision floating-point already. Therefore, 4862 // construct a conditional to use the original value if the top 11 4863 // bits are all sign-bit copies, and use the rounded value computed 4864 // above otherwise. 4865 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 4866 SINT, DAG.getConstant(53, MVT::i32)); 4867 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 4868 Cond, DAG.getConstant(1, MVT::i64)); 4869 Cond = DAG.getSetCC(dl, MVT::i32, 4870 Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT); 4871 4872 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 4873 } 4874 4875 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 4876 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 4877 4878 if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) 4879 FP = DAG.getNode(ISD::FP_ROUND, dl, 4880 MVT::f32, FP, DAG.getIntPtrConstant(0)); 4881 return FP; 4882 } 4883 4884 assert(Op.getOperand(0).getValueType() == MVT::i32 && 4885 "Unhandled INT_TO_FP type in custom expander!"); 4886 // Since we only generate this in 64-bit mode, we can take advantage of 4887 // 64-bit registers. In particular, sign extend the input value into the 4888 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 4889 // then lfd it and fcfid it. 4890 MachineFunction &MF = DAG.getMachineFunction(); 4891 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 4892 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4893 4894 SDValue Ld; 4895 if (PPCSubTarget.hasLFIWAX() || PPCSubTarget.hasFPCVT()) { 4896 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 4897 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4898 4899 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 4900 MachinePointerInfo::getFixedStack(FrameIdx), 4901 false, false, 0); 4902 4903 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 4904 "Expected an i32 store"); 4905 MachineMemOperand *MMO = 4906 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), 4907 MachineMemOperand::MOLoad, 4, 4); 4908 SDValue Ops[] = { Store, FIdx }; 4909 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 4910 PPCISD::LFIWZX : PPCISD::LFIWAX, 4911 dl, DAG.getVTList(MVT::f64, MVT::Other), 4912 Ops, 2, MVT::i32, MMO); 4913 } else { 4914 assert(PPCSubTarget.isPPC64() && 4915 "i32->FP without LFIWAX supported only on PPC64"); 4916 4917 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 4918 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4919 4920 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 4921 Op.getOperand(0)); 4922 4923 // STD the extended value into the stack slot. 4924 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx, 4925 MachinePointerInfo::getFixedStack(FrameIdx), 4926 false, false, 0); 4927 4928 // Load the value as a double. 4929 Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, 4930 MachinePointerInfo::getFixedStack(FrameIdx), 4931 false, false, false, 0); 4932 } 4933 4934 // FCFID it and return it. 4935 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 4936 if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) 4937 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 4938 return FP; 4939 } 4940 4941 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 4942 SelectionDAG &DAG) const { 4943 SDLoc dl(Op); 4944 /* 4945 The rounding mode is in bits 30:31 of FPSR, and has the following 4946 settings: 4947 00 Round to nearest 4948 01 Round to 0 4949 10 Round to +inf 4950 11 Round to -inf 4951 4952 FLT_ROUNDS, on the other hand, expects the following: 4953 -1 Undefined 4954 0 Round to 0 4955 1 Round to nearest 4956 2 Round to +inf 4957 3 Round to -inf 4958 4959 To perform the conversion, we do: 4960 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 4961 */ 4962 4963 MachineFunction &MF = DAG.getMachineFunction(); 4964 EVT VT = Op.getValueType(); 4965 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4966 SDValue MFFSreg, InFlag; 4967 4968 // Save FP Control Word to register 4969 EVT NodeTys[] = { 4970 MVT::f64, // return register 4971 MVT::Glue // unused in this context 4972 }; 4973 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 4974 4975 // Save FP register to stack slot 4976 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 4977 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 4978 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 4979 StackSlot, MachinePointerInfo(), false, false,0); 4980 4981 // Load FP Control Word from low 32 bits of stack slot. 4982 SDValue Four = DAG.getConstant(4, PtrVT); 4983 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 4984 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 4985 false, false, false, 0); 4986 4987 // Transform as necessary 4988 SDValue CWD1 = 4989 DAG.getNode(ISD::AND, dl, MVT::i32, 4990 CWD, DAG.getConstant(3, MVT::i32)); 4991 SDValue CWD2 = 4992 DAG.getNode(ISD::SRL, dl, MVT::i32, 4993 DAG.getNode(ISD::AND, dl, MVT::i32, 4994 DAG.getNode(ISD::XOR, dl, MVT::i32, 4995 CWD, DAG.getConstant(3, MVT::i32)), 4996 DAG.getConstant(3, MVT::i32)), 4997 DAG.getConstant(1, MVT::i32)); 4998 4999 SDValue RetVal = 5000 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 5001 5002 return DAG.getNode((VT.getSizeInBits() < 16 ? 5003 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 5004 } 5005 5006 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 5007 EVT VT = Op.getValueType(); 5008 unsigned BitWidth = VT.getSizeInBits(); 5009 SDLoc dl(Op); 5010 assert(Op.getNumOperands() == 3 && 5011 VT == Op.getOperand(1).getValueType() && 5012 "Unexpected SHL!"); 5013 5014 // Expand into a bunch of logical ops. Note that these ops 5015 // depend on the PPC behavior for oversized shift amounts. 5016 SDValue Lo = Op.getOperand(0); 5017 SDValue Hi = Op.getOperand(1); 5018 SDValue Amt = Op.getOperand(2); 5019 EVT AmtVT = Amt.getValueType(); 5020 5021 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5022 DAG.getConstant(BitWidth, AmtVT), Amt); 5023 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 5024 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 5025 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 5026 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5027 DAG.getConstant(-BitWidth, AmtVT)); 5028 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 5029 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5030 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 5031 SDValue OutOps[] = { OutLo, OutHi }; 5032 return DAG.getMergeValues(OutOps, 2, dl); 5033 } 5034 5035 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 5036 EVT VT = Op.getValueType(); 5037 SDLoc dl(Op); 5038 unsigned BitWidth = VT.getSizeInBits(); 5039 assert(Op.getNumOperands() == 3 && 5040 VT == Op.getOperand(1).getValueType() && 5041 "Unexpected SRL!"); 5042 5043 // Expand into a bunch of logical ops. Note that these ops 5044 // depend on the PPC behavior for oversized shift amounts. 5045 SDValue Lo = Op.getOperand(0); 5046 SDValue Hi = Op.getOperand(1); 5047 SDValue Amt = Op.getOperand(2); 5048 EVT AmtVT = Amt.getValueType(); 5049 5050 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5051 DAG.getConstant(BitWidth, AmtVT), Amt); 5052 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5053 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5054 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5055 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5056 DAG.getConstant(-BitWidth, AmtVT)); 5057 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 5058 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5059 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 5060 SDValue OutOps[] = { OutLo, OutHi }; 5061 return DAG.getMergeValues(OutOps, 2, dl); 5062 } 5063 5064 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 5065 SDLoc dl(Op); 5066 EVT VT = Op.getValueType(); 5067 unsigned BitWidth = VT.getSizeInBits(); 5068 assert(Op.getNumOperands() == 3 && 5069 VT == Op.getOperand(1).getValueType() && 5070 "Unexpected SRA!"); 5071 5072 // Expand into a bunch of logical ops, followed by a select_cc. 5073 SDValue Lo = Op.getOperand(0); 5074 SDValue Hi = Op.getOperand(1); 5075 SDValue Amt = Op.getOperand(2); 5076 EVT AmtVT = Amt.getValueType(); 5077 5078 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5079 DAG.getConstant(BitWidth, AmtVT), Amt); 5080 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5081 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5082 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5083 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5084 DAG.getConstant(-BitWidth, AmtVT)); 5085 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 5086 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 5087 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 5088 Tmp4, Tmp6, ISD::SETLE); 5089 SDValue OutOps[] = { OutLo, OutHi }; 5090 return DAG.getMergeValues(OutOps, 2, dl); 5091 } 5092 5093 //===----------------------------------------------------------------------===// 5094 // Vector related lowering. 5095 // 5096 5097 /// BuildSplatI - Build a canonical splati of Val with an element size of 5098 /// SplatSize. Cast the result to VT. 5099 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 5100 SelectionDAG &DAG, SDLoc dl) { 5101 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 5102 5103 static const EVT VTys[] = { // canonical VT to use for each size. 5104 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 5105 }; 5106 5107 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 5108 5109 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 5110 if (Val == -1) 5111 SplatSize = 1; 5112 5113 EVT CanonicalVT = VTys[SplatSize-1]; 5114 5115 // Build a canonical splat for this value. 5116 SDValue Elt = DAG.getConstant(Val, MVT::i32); 5117 SmallVector<SDValue, 8> Ops; 5118 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 5119 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, 5120 &Ops[0], Ops.size()); 5121 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 5122 } 5123 5124 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 5125 /// specified intrinsic ID. 5126 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 5127 SelectionDAG &DAG, SDLoc dl, 5128 EVT DestVT = MVT::Other) { 5129 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 5130 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5131 DAG.getConstant(IID, MVT::i32), Op); 5132 } 5133 5134 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 5135 /// specified intrinsic ID. 5136 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 5137 SelectionDAG &DAG, SDLoc dl, 5138 EVT DestVT = MVT::Other) { 5139 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 5140 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5141 DAG.getConstant(IID, MVT::i32), LHS, RHS); 5142 } 5143 5144 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 5145 /// specified intrinsic ID. 5146 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 5147 SDValue Op2, SelectionDAG &DAG, 5148 SDLoc dl, EVT DestVT = MVT::Other) { 5149 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 5150 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5151 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 5152 } 5153 5154 5155 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 5156 /// amount. The result has the specified value type. 5157 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 5158 EVT VT, SelectionDAG &DAG, SDLoc dl) { 5159 // Force LHS/RHS to be the right type. 5160 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 5161 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 5162 5163 int Ops[16]; 5164 for (unsigned i = 0; i != 16; ++i) 5165 Ops[i] = i + Amt; 5166 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 5167 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5168 } 5169 5170 // If this is a case we can't handle, return null and let the default 5171 // expansion code take care of it. If we CAN select this case, and if it 5172 // selects to a single instruction, return Op. Otherwise, if we can codegen 5173 // this case more efficiently than a constant pool load, lower it to the 5174 // sequence of ops that should be used. 5175 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 5176 SelectionDAG &DAG) const { 5177 SDLoc dl(Op); 5178 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 5179 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 5180 5181 // Check if this is a splat of a constant value. 5182 APInt APSplatBits, APSplatUndef; 5183 unsigned SplatBitSize; 5184 bool HasAnyUndefs; 5185 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 5186 HasAnyUndefs, 0, true) || SplatBitSize > 32) 5187 return SDValue(); 5188 5189 unsigned SplatBits = APSplatBits.getZExtValue(); 5190 unsigned SplatUndef = APSplatUndef.getZExtValue(); 5191 unsigned SplatSize = SplatBitSize / 8; 5192 5193 // First, handle single instruction cases. 5194 5195 // All zeros? 5196 if (SplatBits == 0) { 5197 // Canonicalize all zero vectors to be v4i32. 5198 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 5199 SDValue Z = DAG.getConstant(0, MVT::i32); 5200 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 5201 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 5202 } 5203 return Op; 5204 } 5205 5206 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 5207 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 5208 (32-SplatBitSize)); 5209 if (SextVal >= -16 && SextVal <= 15) 5210 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 5211 5212 5213 // Two instruction sequences. 5214 5215 // If this value is in the range [-32,30] and is even, use: 5216 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 5217 // If this value is in the range [17,31] and is odd, use: 5218 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 5219 // If this value is in the range [-31,-17] and is odd, use: 5220 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 5221 // Note the last two are three-instruction sequences. 5222 if (SextVal >= -32 && SextVal <= 31) { 5223 // To avoid having these optimizations undone by constant folding, 5224 // we convert to a pseudo that will be expanded later into one of 5225 // the above forms. 5226 SDValue Elt = DAG.getConstant(SextVal, MVT::i32); 5227 EVT VT = Op.getValueType(); 5228 int Size = VT == MVT::v16i8 ? 1 : (VT == MVT::v8i16 ? 2 : 4); 5229 SDValue EltSize = DAG.getConstant(Size, MVT::i32); 5230 return DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 5231 } 5232 5233 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 5234 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 5235 // for fneg/fabs. 5236 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 5237 // Make -1 and vspltisw -1: 5238 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 5239 5240 // Make the VSLW intrinsic, computing 0x8000_0000. 5241 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 5242 OnesV, DAG, dl); 5243 5244 // xor by OnesV to invert it. 5245 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 5246 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5247 } 5248 5249 // Check to see if this is a wide variety of vsplti*, binop self cases. 5250 static const signed char SplatCsts[] = { 5251 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 5252 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 5253 }; 5254 5255 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 5256 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 5257 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 5258 int i = SplatCsts[idx]; 5259 5260 // Figure out what shift amount will be used by altivec if shifted by i in 5261 // this splat size. 5262 unsigned TypeShiftAmt = i & (SplatBitSize-1); 5263 5264 // vsplti + shl self. 5265 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 5266 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5267 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5268 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 5269 Intrinsic::ppc_altivec_vslw 5270 }; 5271 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5272 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5273 } 5274 5275 // vsplti + srl self. 5276 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5277 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5278 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5279 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 5280 Intrinsic::ppc_altivec_vsrw 5281 }; 5282 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5283 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5284 } 5285 5286 // vsplti + sra self. 5287 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5288 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5289 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5290 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 5291 Intrinsic::ppc_altivec_vsraw 5292 }; 5293 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5294 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5295 } 5296 5297 // vsplti + rol self. 5298 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 5299 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 5300 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5301 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5302 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 5303 Intrinsic::ppc_altivec_vrlw 5304 }; 5305 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5306 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5307 } 5308 5309 // t = vsplti c, result = vsldoi t, t, 1 5310 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 5311 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5312 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 5313 } 5314 // t = vsplti c, result = vsldoi t, t, 2 5315 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 5316 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5317 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 5318 } 5319 // t = vsplti c, result = vsldoi t, t, 3 5320 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 5321 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5322 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 5323 } 5324 } 5325 5326 return SDValue(); 5327 } 5328 5329 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5330 /// the specified operations to build the shuffle. 5331 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5332 SDValue RHS, SelectionDAG &DAG, 5333 SDLoc dl) { 5334 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5335 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5336 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5337 5338 enum { 5339 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 5340 OP_VMRGHW, 5341 OP_VMRGLW, 5342 OP_VSPLTISW0, 5343 OP_VSPLTISW1, 5344 OP_VSPLTISW2, 5345 OP_VSPLTISW3, 5346 OP_VSLDOI4, 5347 OP_VSLDOI8, 5348 OP_VSLDOI12 5349 }; 5350 5351 if (OpNum == OP_COPY) { 5352 if (LHSID == (1*9+2)*9+3) return LHS; 5353 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 5354 return RHS; 5355 } 5356 5357 SDValue OpLHS, OpRHS; 5358 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 5359 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 5360 5361 int ShufIdxs[16]; 5362 switch (OpNum) { 5363 default: llvm_unreachable("Unknown i32 permute!"); 5364 case OP_VMRGHW: 5365 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 5366 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 5367 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 5368 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 5369 break; 5370 case OP_VMRGLW: 5371 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 5372 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 5373 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 5374 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 5375 break; 5376 case OP_VSPLTISW0: 5377 for (unsigned i = 0; i != 16; ++i) 5378 ShufIdxs[i] = (i&3)+0; 5379 break; 5380 case OP_VSPLTISW1: 5381 for (unsigned i = 0; i != 16; ++i) 5382 ShufIdxs[i] = (i&3)+4; 5383 break; 5384 case OP_VSPLTISW2: 5385 for (unsigned i = 0; i != 16; ++i) 5386 ShufIdxs[i] = (i&3)+8; 5387 break; 5388 case OP_VSPLTISW3: 5389 for (unsigned i = 0; i != 16; ++i) 5390 ShufIdxs[i] = (i&3)+12; 5391 break; 5392 case OP_VSLDOI4: 5393 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 5394 case OP_VSLDOI8: 5395 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 5396 case OP_VSLDOI12: 5397 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 5398 } 5399 EVT VT = OpLHS.getValueType(); 5400 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 5401 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 5402 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 5403 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5404 } 5405 5406 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 5407 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 5408 /// return the code it can be lowered into. Worst case, it can always be 5409 /// lowered into a vperm. 5410 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 5411 SelectionDAG &DAG) const { 5412 SDLoc dl(Op); 5413 SDValue V1 = Op.getOperand(0); 5414 SDValue V2 = Op.getOperand(1); 5415 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5416 EVT VT = Op.getValueType(); 5417 5418 // Cases that are handled by instructions that take permute immediates 5419 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 5420 // selected by the instruction selector. 5421 if (V2.getOpcode() == ISD::UNDEF) { 5422 if (PPC::isSplatShuffleMask(SVOp, 1) || 5423 PPC::isSplatShuffleMask(SVOp, 2) || 5424 PPC::isSplatShuffleMask(SVOp, 4) || 5425 PPC::isVPKUWUMShuffleMask(SVOp, true) || 5426 PPC::isVPKUHUMShuffleMask(SVOp, true) || 5427 PPC::isVSLDOIShuffleMask(SVOp, true) != -1 || 5428 PPC::isVMRGLShuffleMask(SVOp, 1, true) || 5429 PPC::isVMRGLShuffleMask(SVOp, 2, true) || 5430 PPC::isVMRGLShuffleMask(SVOp, 4, true) || 5431 PPC::isVMRGHShuffleMask(SVOp, 1, true) || 5432 PPC::isVMRGHShuffleMask(SVOp, 2, true) || 5433 PPC::isVMRGHShuffleMask(SVOp, 4, true)) { 5434 return Op; 5435 } 5436 } 5437 5438 // Altivec has a variety of "shuffle immediates" that take two vector inputs 5439 // and produce a fixed permutation. If any of these match, do not lower to 5440 // VPERM. 5441 if (PPC::isVPKUWUMShuffleMask(SVOp, false) || 5442 PPC::isVPKUHUMShuffleMask(SVOp, false) || 5443 PPC::isVSLDOIShuffleMask(SVOp, false) != -1 || 5444 PPC::isVMRGLShuffleMask(SVOp, 1, false) || 5445 PPC::isVMRGLShuffleMask(SVOp, 2, false) || 5446 PPC::isVMRGLShuffleMask(SVOp, 4, false) || 5447 PPC::isVMRGHShuffleMask(SVOp, 1, false) || 5448 PPC::isVMRGHShuffleMask(SVOp, 2, false) || 5449 PPC::isVMRGHShuffleMask(SVOp, 4, false)) 5450 return Op; 5451 5452 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 5453 // perfect shuffle table to emit an optimal matching sequence. 5454 ArrayRef<int> PermMask = SVOp->getMask(); 5455 5456 unsigned PFIndexes[4]; 5457 bool isFourElementShuffle = true; 5458 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 5459 unsigned EltNo = 8; // Start out undef. 5460 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 5461 if (PermMask[i*4+j] < 0) 5462 continue; // Undef, ignore it. 5463 5464 unsigned ByteSource = PermMask[i*4+j]; 5465 if ((ByteSource & 3) != j) { 5466 isFourElementShuffle = false; 5467 break; 5468 } 5469 5470 if (EltNo == 8) { 5471 EltNo = ByteSource/4; 5472 } else if (EltNo != ByteSource/4) { 5473 isFourElementShuffle = false; 5474 break; 5475 } 5476 } 5477 PFIndexes[i] = EltNo; 5478 } 5479 5480 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 5481 // perfect shuffle vector to determine if it is cost effective to do this as 5482 // discrete instructions, or whether we should use a vperm. 5483 if (isFourElementShuffle) { 5484 // Compute the index in the perfect shuffle table. 5485 unsigned PFTableIndex = 5486 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5487 5488 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5489 unsigned Cost = (PFEntry >> 30); 5490 5491 // Determining when to avoid vperm is tricky. Many things affect the cost 5492 // of vperm, particularly how many times the perm mask needs to be computed. 5493 // For example, if the perm mask can be hoisted out of a loop or is already 5494 // used (perhaps because there are multiple permutes with the same shuffle 5495 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 5496 // the loop requires an extra register. 5497 // 5498 // As a compromise, we only emit discrete instructions if the shuffle can be 5499 // generated in 3 or fewer operations. When we have loop information 5500 // available, if this block is within a loop, we should avoid using vperm 5501 // for 3-operation perms and use a constant pool load instead. 5502 if (Cost < 3) 5503 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 5504 } 5505 5506 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 5507 // vector that will get spilled to the constant pool. 5508 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 5509 5510 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 5511 // that it is in input element units, not in bytes. Convert now. 5512 EVT EltVT = V1.getValueType().getVectorElementType(); 5513 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 5514 5515 SmallVector<SDValue, 16> ResultMask; 5516 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5517 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 5518 5519 for (unsigned j = 0; j != BytesPerElement; ++j) 5520 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 5521 MVT::i32)); 5522 } 5523 5524 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 5525 &ResultMask[0], ResultMask.size()); 5526 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); 5527 } 5528 5529 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 5530 /// altivec comparison. If it is, return true and fill in Opc/isDot with 5531 /// information about the intrinsic. 5532 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 5533 bool &isDot) { 5534 unsigned IntrinsicID = 5535 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 5536 CompareOpc = -1; 5537 isDot = false; 5538 switch (IntrinsicID) { 5539 default: return false; 5540 // Comparison predicates. 5541 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 5542 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 5543 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 5544 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 5545 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 5546 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 5547 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 5548 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 5549 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 5550 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 5551 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 5552 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 5553 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 5554 5555 // Normal Comparisons. 5556 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 5557 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 5558 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 5559 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 5560 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 5561 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 5562 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 5563 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 5564 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 5565 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 5566 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 5567 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 5568 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 5569 } 5570 return true; 5571 } 5572 5573 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 5574 /// lower, do it, otherwise return null. 5575 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 5576 SelectionDAG &DAG) const { 5577 // If this is a lowered altivec predicate compare, CompareOpc is set to the 5578 // opcode number of the comparison. 5579 SDLoc dl(Op); 5580 int CompareOpc; 5581 bool isDot; 5582 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 5583 return SDValue(); // Don't custom lower most intrinsics. 5584 5585 // If this is a non-dot comparison, make the VCMP node and we are done. 5586 if (!isDot) { 5587 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 5588 Op.getOperand(1), Op.getOperand(2), 5589 DAG.getConstant(CompareOpc, MVT::i32)); 5590 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 5591 } 5592 5593 // Create the PPCISD altivec 'dot' comparison node. 5594 SDValue Ops[] = { 5595 Op.getOperand(2), // LHS 5596 Op.getOperand(3), // RHS 5597 DAG.getConstant(CompareOpc, MVT::i32) 5598 }; 5599 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 5600 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 5601 5602 // Now that we have the comparison, emit a copy from the CR to a GPR. 5603 // This is flagged to the above dot comparison. 5604 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 5605 DAG.getRegister(PPC::CR6, MVT::i32), 5606 CompNode.getValue(1)); 5607 5608 // Unpack the result based on how the target uses it. 5609 unsigned BitNo; // Bit # of CR6. 5610 bool InvertBit; // Invert result? 5611 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 5612 default: // Can't happen, don't crash on invalid number though. 5613 case 0: // Return the value of the EQ bit of CR6. 5614 BitNo = 0; InvertBit = false; 5615 break; 5616 case 1: // Return the inverted value of the EQ bit of CR6. 5617 BitNo = 0; InvertBit = true; 5618 break; 5619 case 2: // Return the value of the LT bit of CR6. 5620 BitNo = 2; InvertBit = false; 5621 break; 5622 case 3: // Return the inverted value of the LT bit of CR6. 5623 BitNo = 2; InvertBit = true; 5624 break; 5625 } 5626 5627 // Shift the bit into the low position. 5628 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 5629 DAG.getConstant(8-(3-BitNo), MVT::i32)); 5630 // Isolate the bit. 5631 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 5632 DAG.getConstant(1, MVT::i32)); 5633 5634 // If we are supposed to, toggle the bit. 5635 if (InvertBit) 5636 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 5637 DAG.getConstant(1, MVT::i32)); 5638 return Flags; 5639 } 5640 5641 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 5642 SelectionDAG &DAG) const { 5643 SDLoc dl(Op); 5644 // Create a stack slot that is 16-byte aligned. 5645 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 5646 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 5647 EVT PtrVT = getPointerTy(); 5648 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 5649 5650 // Store the input value into Value#0 of the stack slot. 5651 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 5652 Op.getOperand(0), FIdx, MachinePointerInfo(), 5653 false, false, 0); 5654 // Load it out. 5655 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 5656 false, false, false, 0); 5657 } 5658 5659 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 5660 SDLoc dl(Op); 5661 if (Op.getValueType() == MVT::v4i32) { 5662 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5663 5664 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 5665 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 5666 5667 SDValue RHSSwap = // = vrlw RHS, 16 5668 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 5669 5670 // Shrinkify inputs to v8i16. 5671 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 5672 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 5673 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 5674 5675 // Low parts multiplied together, generating 32-bit results (we ignore the 5676 // top parts). 5677 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 5678 LHS, RHS, DAG, dl, MVT::v4i32); 5679 5680 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 5681 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 5682 // Shift the high parts up 16 bits. 5683 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 5684 Neg16, DAG, dl); 5685 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 5686 } else if (Op.getValueType() == MVT::v8i16) { 5687 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5688 5689 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 5690 5691 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 5692 LHS, RHS, Zero, DAG, dl); 5693 } else if (Op.getValueType() == MVT::v16i8) { 5694 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5695 5696 // Multiply the even 8-bit parts, producing 16-bit sums. 5697 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 5698 LHS, RHS, DAG, dl, MVT::v8i16); 5699 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 5700 5701 // Multiply the odd 8-bit parts, producing 16-bit sums. 5702 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 5703 LHS, RHS, DAG, dl, MVT::v8i16); 5704 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 5705 5706 // Merge the results together. 5707 int Ops[16]; 5708 for (unsigned i = 0; i != 8; ++i) { 5709 Ops[i*2 ] = 2*i+1; 5710 Ops[i*2+1] = 2*i+1+16; 5711 } 5712 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 5713 } else { 5714 llvm_unreachable("Unknown mul to lower!"); 5715 } 5716 } 5717 5718 /// LowerOperation - Provide custom lowering hooks for some operations. 5719 /// 5720 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 5721 switch (Op.getOpcode()) { 5722 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 5723 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5724 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 5725 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5726 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5727 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5728 case ISD::SETCC: return LowerSETCC(Op, DAG); 5729 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 5730 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 5731 case ISD::VASTART: 5732 return LowerVASTART(Op, DAG, PPCSubTarget); 5733 5734 case ISD::VAARG: 5735 return LowerVAARG(Op, DAG, PPCSubTarget); 5736 5737 case ISD::VACOPY: 5738 return LowerVACOPY(Op, DAG, PPCSubTarget); 5739 5740 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 5741 case ISD::DYNAMIC_STACKALLOC: 5742 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 5743 5744 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 5745 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 5746 5747 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 5748 case ISD::FP_TO_UINT: 5749 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 5750 SDLoc(Op)); 5751 case ISD::UINT_TO_FP: 5752 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 5753 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5754 5755 // Lower 64-bit shifts. 5756 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 5757 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 5758 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 5759 5760 // Vector-related lowering. 5761 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5762 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5763 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5764 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5765 case ISD::MUL: return LowerMUL(Op, DAG); 5766 5767 // For counter-based loop handling. 5768 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 5769 5770 // Frame & Return address. 5771 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5772 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5773 } 5774 } 5775 5776 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 5777 SmallVectorImpl<SDValue>&Results, 5778 SelectionDAG &DAG) const { 5779 const TargetMachine &TM = getTargetMachine(); 5780 SDLoc dl(N); 5781 switch (N->getOpcode()) { 5782 default: 5783 llvm_unreachable("Do not know how to custom type legalize this operation!"); 5784 case ISD::INTRINSIC_W_CHAIN: { 5785 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 5786 Intrinsic::ppc_is_decremented_ctr_nonzero) 5787 break; 5788 5789 assert(N->getValueType(0) == MVT::i1 && 5790 "Unexpected result type for CTR decrement intrinsic"); 5791 EVT SVT = getSetCCResultType(*DAG.getContext(), N->getValueType(0)); 5792 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 5793 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 5794 N->getOperand(1)); 5795 5796 Results.push_back(NewInt); 5797 Results.push_back(NewInt.getValue(1)); 5798 break; 5799 } 5800 case ISD::VAARG: { 5801 if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 5802 || TM.getSubtarget<PPCSubtarget>().isPPC64()) 5803 return; 5804 5805 EVT VT = N->getValueType(0); 5806 5807 if (VT == MVT::i64) { 5808 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, PPCSubTarget); 5809 5810 Results.push_back(NewNode); 5811 Results.push_back(NewNode.getValue(1)); 5812 } 5813 return; 5814 } 5815 case ISD::FP_ROUND_INREG: { 5816 assert(N->getValueType(0) == MVT::ppcf128); 5817 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 5818 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5819 MVT::f64, N->getOperand(0), 5820 DAG.getIntPtrConstant(0)); 5821 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5822 MVT::f64, N->getOperand(0), 5823 DAG.getIntPtrConstant(1)); 5824 5825 // Add the two halves of the long double in round-to-zero mode. 5826 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 5827 5828 // We know the low half is about to be thrown away, so just use something 5829 // convenient. 5830 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 5831 FPreg, FPreg)); 5832 return; 5833 } 5834 case ISD::FP_TO_SINT: 5835 // LowerFP_TO_INT() can only handle f32 and f64. 5836 if (N->getOperand(0).getValueType() == MVT::ppcf128) 5837 return; 5838 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 5839 return; 5840 } 5841 } 5842 5843 5844 //===----------------------------------------------------------------------===// 5845 // Other Lowering Code 5846 //===----------------------------------------------------------------------===// 5847 5848 MachineBasicBlock * 5849 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5850 bool is64bit, unsigned BinOpcode) const { 5851 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5852 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5853 5854 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5855 MachineFunction *F = BB->getParent(); 5856 MachineFunction::iterator It = BB; 5857 ++It; 5858 5859 unsigned dest = MI->getOperand(0).getReg(); 5860 unsigned ptrA = MI->getOperand(1).getReg(); 5861 unsigned ptrB = MI->getOperand(2).getReg(); 5862 unsigned incr = MI->getOperand(3).getReg(); 5863 DebugLoc dl = MI->getDebugLoc(); 5864 5865 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5866 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5867 F->insert(It, loopMBB); 5868 F->insert(It, exitMBB); 5869 exitMBB->splice(exitMBB->begin(), BB, 5870 llvm::next(MachineBasicBlock::iterator(MI)), 5871 BB->end()); 5872 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5873 5874 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5875 unsigned TmpReg = (!BinOpcode) ? incr : 5876 RegInfo.createVirtualRegister( 5877 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5878 (const TargetRegisterClass *) &PPC::GPRCRegClass); 5879 5880 // thisMBB: 5881 // ... 5882 // fallthrough --> loopMBB 5883 BB->addSuccessor(loopMBB); 5884 5885 // loopMBB: 5886 // l[wd]arx dest, ptr 5887 // add r0, dest, incr 5888 // st[wd]cx. r0, ptr 5889 // bne- loopMBB 5890 // fallthrough --> exitMBB 5891 BB = loopMBB; 5892 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 5893 .addReg(ptrA).addReg(ptrB); 5894 if (BinOpcode) 5895 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 5896 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 5897 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 5898 BuildMI(BB, dl, TII->get(PPC::BCC)) 5899 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 5900 BB->addSuccessor(loopMBB); 5901 BB->addSuccessor(exitMBB); 5902 5903 // exitMBB: 5904 // ... 5905 BB = exitMBB; 5906 return BB; 5907 } 5908 5909 MachineBasicBlock * 5910 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 5911 MachineBasicBlock *BB, 5912 bool is8bit, // operation 5913 unsigned BinOpcode) const { 5914 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5915 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5916 // In 64 bit mode we have to use 64 bits for addresses, even though the 5917 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 5918 // registers without caring whether they're 32 or 64, but here we're 5919 // doing actual arithmetic on the addresses. 5920 bool is64bit = PPCSubTarget.isPPC64(); 5921 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 5922 5923 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5924 MachineFunction *F = BB->getParent(); 5925 MachineFunction::iterator It = BB; 5926 ++It; 5927 5928 unsigned dest = MI->getOperand(0).getReg(); 5929 unsigned ptrA = MI->getOperand(1).getReg(); 5930 unsigned ptrB = MI->getOperand(2).getReg(); 5931 unsigned incr = MI->getOperand(3).getReg(); 5932 DebugLoc dl = MI->getDebugLoc(); 5933 5934 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5935 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5936 F->insert(It, loopMBB); 5937 F->insert(It, exitMBB); 5938 exitMBB->splice(exitMBB->begin(), BB, 5939 llvm::next(MachineBasicBlock::iterator(MI)), 5940 BB->end()); 5941 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5942 5943 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5944 const TargetRegisterClass *RC = 5945 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5946 (const TargetRegisterClass *) &PPC::GPRCRegClass; 5947 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 5948 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 5949 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 5950 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 5951 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 5952 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 5953 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 5954 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 5955 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 5956 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 5957 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 5958 unsigned Ptr1Reg; 5959 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 5960 5961 // thisMBB: 5962 // ... 5963 // fallthrough --> loopMBB 5964 BB->addSuccessor(loopMBB); 5965 5966 // The 4-byte load must be aligned, while a char or short may be 5967 // anywhere in the word. Hence all this nasty bookkeeping code. 5968 // add ptr1, ptrA, ptrB [copy if ptrA==0] 5969 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 5970 // xori shift, shift1, 24 [16] 5971 // rlwinm ptr, ptr1, 0, 0, 29 5972 // slw incr2, incr, shift 5973 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 5974 // slw mask, mask2, shift 5975 // loopMBB: 5976 // lwarx tmpDest, ptr 5977 // add tmp, tmpDest, incr2 5978 // andc tmp2, tmpDest, mask 5979 // and tmp3, tmp, mask 5980 // or tmp4, tmp3, tmp2 5981 // stwcx. tmp4, ptr 5982 // bne- loopMBB 5983 // fallthrough --> exitMBB 5984 // srw dest, tmpDest, shift 5985 if (ptrA != ZeroReg) { 5986 Ptr1Reg = RegInfo.createVirtualRegister(RC); 5987 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 5988 .addReg(ptrA).addReg(ptrB); 5989 } else { 5990 Ptr1Reg = ptrB; 5991 } 5992 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 5993 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 5994 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 5995 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 5996 if (is64bit) 5997 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 5998 .addReg(Ptr1Reg).addImm(0).addImm(61); 5999 else 6000 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 6001 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 6002 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 6003 .addReg(incr).addReg(ShiftReg); 6004 if (is8bit) 6005 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 6006 else { 6007 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 6008 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 6009 } 6010 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 6011 .addReg(Mask2Reg).addReg(ShiftReg); 6012 6013 BB = loopMBB; 6014 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 6015 .addReg(ZeroReg).addReg(PtrReg); 6016 if (BinOpcode) 6017 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 6018 .addReg(Incr2Reg).addReg(TmpDestReg); 6019 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 6020 .addReg(TmpDestReg).addReg(MaskReg); 6021 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 6022 .addReg(TmpReg).addReg(MaskReg); 6023 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 6024 .addReg(Tmp3Reg).addReg(Tmp2Reg); 6025 BuildMI(BB, dl, TII->get(PPC::STWCX)) 6026 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 6027 BuildMI(BB, dl, TII->get(PPC::BCC)) 6028 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 6029 BB->addSuccessor(loopMBB); 6030 BB->addSuccessor(exitMBB); 6031 6032 // exitMBB: 6033 // ... 6034 BB = exitMBB; 6035 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 6036 .addReg(ShiftReg); 6037 return BB; 6038 } 6039 6040 llvm::MachineBasicBlock* 6041 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 6042 MachineBasicBlock *MBB) const { 6043 DebugLoc DL = MI->getDebugLoc(); 6044 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6045 6046 MachineFunction *MF = MBB->getParent(); 6047 MachineRegisterInfo &MRI = MF->getRegInfo(); 6048 6049 const BasicBlock *BB = MBB->getBasicBlock(); 6050 MachineFunction::iterator I = MBB; 6051 ++I; 6052 6053 // Memory Reference 6054 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6055 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6056 6057 unsigned DstReg = MI->getOperand(0).getReg(); 6058 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 6059 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 6060 unsigned mainDstReg = MRI.createVirtualRegister(RC); 6061 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 6062 6063 MVT PVT = getPointerTy(); 6064 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6065 "Invalid Pointer Size!"); 6066 // For v = setjmp(buf), we generate 6067 // 6068 // thisMBB: 6069 // SjLjSetup mainMBB 6070 // bl mainMBB 6071 // v_restore = 1 6072 // b sinkMBB 6073 // 6074 // mainMBB: 6075 // buf[LabelOffset] = LR 6076 // v_main = 0 6077 // 6078 // sinkMBB: 6079 // v = phi(main, restore) 6080 // 6081 6082 MachineBasicBlock *thisMBB = MBB; 6083 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 6084 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 6085 MF->insert(I, mainMBB); 6086 MF->insert(I, sinkMBB); 6087 6088 MachineInstrBuilder MIB; 6089 6090 // Transfer the remainder of BB and its successor edges to sinkMBB. 6091 sinkMBB->splice(sinkMBB->begin(), MBB, 6092 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 6093 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 6094 6095 // Note that the structure of the jmp_buf used here is not compatible 6096 // with that used by libc, and is not designed to be. Specifically, it 6097 // stores only those 'reserved' registers that LLVM does not otherwise 6098 // understand how to spill. Also, by convention, by the time this 6099 // intrinsic is called, Clang has already stored the frame address in the 6100 // first slot of the buffer and stack address in the third. Following the 6101 // X86 target code, we'll store the jump address in the second slot. We also 6102 // need to save the TOC pointer (R2) to handle jumps between shared 6103 // libraries, and that will be stored in the fourth slot. The thread 6104 // identifier (R13) is not affected. 6105 6106 // thisMBB: 6107 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6108 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6109 const int64_t BPOffset = 4 * PVT.getStoreSize(); 6110 6111 // Prepare IP either in reg. 6112 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 6113 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 6114 unsigned BufReg = MI->getOperand(1).getReg(); 6115 6116 if (PPCSubTarget.isPPC64() && PPCSubTarget.isSVR4ABI()) { 6117 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 6118 .addReg(PPC::X2) 6119 .addImm(TOCOffset) 6120 .addReg(BufReg); 6121 MIB.setMemRefs(MMOBegin, MMOEnd); 6122 } 6123 6124 // Naked functions never have a base pointer, and so we use r1. For all 6125 // other functions, this decision must be delayed until during PEI. 6126 unsigned BaseReg; 6127 if (MF->getFunction()->getAttributes().hasAttribute( 6128 AttributeSet::FunctionIndex, Attribute::Naked)) 6129 BaseReg = PPCSubTarget.isPPC64() ? PPC::X1 : PPC::R1; 6130 else 6131 BaseReg = PPCSubTarget.isPPC64() ? PPC::BP8 : PPC::BP; 6132 6133 MIB = BuildMI(*thisMBB, MI, DL, 6134 TII->get(PPCSubTarget.isPPC64() ? PPC::STD : PPC::STW)) 6135 .addReg(BaseReg) 6136 .addImm(BPOffset) 6137 .addReg(BufReg); 6138 MIB.setMemRefs(MMOBegin, MMOEnd); 6139 6140 // Setup 6141 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 6142 const PPCRegisterInfo *TRI = 6143 static_cast<const PPCRegisterInfo*>(getTargetMachine().getRegisterInfo()); 6144 MIB.addRegMask(TRI->getNoPreservedMask()); 6145 6146 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 6147 6148 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 6149 .addMBB(mainMBB); 6150 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 6151 6152 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 6153 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 6154 6155 // mainMBB: 6156 // mainDstReg = 0 6157 MIB = BuildMI(mainMBB, DL, 6158 TII->get(PPCSubTarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 6159 6160 // Store IP 6161 if (PPCSubTarget.isPPC64()) { 6162 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 6163 .addReg(LabelReg) 6164 .addImm(LabelOffset) 6165 .addReg(BufReg); 6166 } else { 6167 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 6168 .addReg(LabelReg) 6169 .addImm(LabelOffset) 6170 .addReg(BufReg); 6171 } 6172 6173 MIB.setMemRefs(MMOBegin, MMOEnd); 6174 6175 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 6176 mainMBB->addSuccessor(sinkMBB); 6177 6178 // sinkMBB: 6179 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 6180 TII->get(PPC::PHI), DstReg) 6181 .addReg(mainDstReg).addMBB(mainMBB) 6182 .addReg(restoreDstReg).addMBB(thisMBB); 6183 6184 MI->eraseFromParent(); 6185 return sinkMBB; 6186 } 6187 6188 MachineBasicBlock * 6189 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 6190 MachineBasicBlock *MBB) const { 6191 DebugLoc DL = MI->getDebugLoc(); 6192 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6193 6194 MachineFunction *MF = MBB->getParent(); 6195 MachineRegisterInfo &MRI = MF->getRegInfo(); 6196 6197 // Memory Reference 6198 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6199 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6200 6201 MVT PVT = getPointerTy(); 6202 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6203 "Invalid Pointer Size!"); 6204 6205 const TargetRegisterClass *RC = 6206 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6207 unsigned Tmp = MRI.createVirtualRegister(RC); 6208 // Since FP is only updated here but NOT referenced, it's treated as GPR. 6209 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 6210 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 6211 unsigned BP = (PVT == MVT::i64) ? PPC::X30 : PPC::R30; 6212 6213 MachineInstrBuilder MIB; 6214 6215 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6216 const int64_t SPOffset = 2 * PVT.getStoreSize(); 6217 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6218 const int64_t BPOffset = 4 * PVT.getStoreSize(); 6219 6220 unsigned BufReg = MI->getOperand(0).getReg(); 6221 6222 // Reload FP (the jumped-to function may not have had a 6223 // frame pointer, and if so, then its r31 will be restored 6224 // as necessary). 6225 if (PVT == MVT::i64) { 6226 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 6227 .addImm(0) 6228 .addReg(BufReg); 6229 } else { 6230 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 6231 .addImm(0) 6232 .addReg(BufReg); 6233 } 6234 MIB.setMemRefs(MMOBegin, MMOEnd); 6235 6236 // Reload IP 6237 if (PVT == MVT::i64) { 6238 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 6239 .addImm(LabelOffset) 6240 .addReg(BufReg); 6241 } else { 6242 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 6243 .addImm(LabelOffset) 6244 .addReg(BufReg); 6245 } 6246 MIB.setMemRefs(MMOBegin, MMOEnd); 6247 6248 // Reload SP 6249 if (PVT == MVT::i64) { 6250 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 6251 .addImm(SPOffset) 6252 .addReg(BufReg); 6253 } else { 6254 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 6255 .addImm(SPOffset) 6256 .addReg(BufReg); 6257 } 6258 MIB.setMemRefs(MMOBegin, MMOEnd); 6259 6260 // Reload BP 6261 if (PVT == MVT::i64) { 6262 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 6263 .addImm(BPOffset) 6264 .addReg(BufReg); 6265 } else { 6266 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 6267 .addImm(BPOffset) 6268 .addReg(BufReg); 6269 } 6270 MIB.setMemRefs(MMOBegin, MMOEnd); 6271 6272 // Reload TOC 6273 if (PVT == MVT::i64 && PPCSubTarget.isSVR4ABI()) { 6274 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 6275 .addImm(TOCOffset) 6276 .addReg(BufReg); 6277 6278 MIB.setMemRefs(MMOBegin, MMOEnd); 6279 } 6280 6281 // Jump 6282 BuildMI(*MBB, MI, DL, 6283 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 6284 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 6285 6286 MI->eraseFromParent(); 6287 return MBB; 6288 } 6289 6290 MachineBasicBlock * 6291 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6292 MachineBasicBlock *BB) const { 6293 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 6294 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 6295 return emitEHSjLjSetJmp(MI, BB); 6296 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 6297 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 6298 return emitEHSjLjLongJmp(MI, BB); 6299 } 6300 6301 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6302 6303 // To "insert" these instructions we actually have to insert their 6304 // control-flow patterns. 6305 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6306 MachineFunction::iterator It = BB; 6307 ++It; 6308 6309 MachineFunction *F = BB->getParent(); 6310 6311 if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 6312 MI->getOpcode() == PPC::SELECT_CC_I8)) { 6313 SmallVector<MachineOperand, 2> Cond; 6314 Cond.push_back(MI->getOperand(4)); 6315 Cond.push_back(MI->getOperand(1)); 6316 6317 DebugLoc dl = MI->getDebugLoc(); 6318 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6319 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 6320 Cond, MI->getOperand(2).getReg(), 6321 MI->getOperand(3).getReg()); 6322 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 6323 MI->getOpcode() == PPC::SELECT_CC_I8 || 6324 MI->getOpcode() == PPC::SELECT_CC_F4 || 6325 MI->getOpcode() == PPC::SELECT_CC_F8 || 6326 MI->getOpcode() == PPC::SELECT_CC_VRRC) { 6327 6328 6329 // The incoming instruction knows the destination vreg to set, the 6330 // condition code register to branch on, the true/false values to 6331 // select between, and a branch opcode to use. 6332 6333 // thisMBB: 6334 // ... 6335 // TrueVal = ... 6336 // cmpTY ccX, r1, r2 6337 // bCC copy1MBB 6338 // fallthrough --> copy0MBB 6339 MachineBasicBlock *thisMBB = BB; 6340 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6341 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6342 unsigned SelectPred = MI->getOperand(4).getImm(); 6343 DebugLoc dl = MI->getDebugLoc(); 6344 F->insert(It, copy0MBB); 6345 F->insert(It, sinkMBB); 6346 6347 // Transfer the remainder of BB and its successor edges to sinkMBB. 6348 sinkMBB->splice(sinkMBB->begin(), BB, 6349 llvm::next(MachineBasicBlock::iterator(MI)), 6350 BB->end()); 6351 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6352 6353 // Next, add the true and fallthrough blocks as its successors. 6354 BB->addSuccessor(copy0MBB); 6355 BB->addSuccessor(sinkMBB); 6356 6357 BuildMI(BB, dl, TII->get(PPC::BCC)) 6358 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 6359 6360 // copy0MBB: 6361 // %FalseValue = ... 6362 // # fallthrough to sinkMBB 6363 BB = copy0MBB; 6364 6365 // Update machine-CFG edges 6366 BB->addSuccessor(sinkMBB); 6367 6368 // sinkMBB: 6369 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6370 // ... 6371 BB = sinkMBB; 6372 BuildMI(*BB, BB->begin(), dl, 6373 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 6374 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 6375 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6376 } 6377 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 6378 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 6379 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 6380 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 6381 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 6382 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 6383 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 6384 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 6385 6386 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 6387 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 6388 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 6389 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 6390 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 6391 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 6392 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 6393 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 6394 6395 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 6396 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 6397 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 6398 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 6399 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 6400 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 6401 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 6402 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 6403 6404 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 6405 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 6406 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 6407 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 6408 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 6409 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 6410 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 6411 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 6412 6413 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 6414 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 6415 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 6416 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 6417 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 6418 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 6419 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 6420 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 6421 6422 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 6423 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 6424 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 6425 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 6426 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 6427 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 6428 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 6429 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 6430 6431 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 6432 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 6433 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 6434 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 6435 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 6436 BB = EmitAtomicBinary(MI, BB, false, 0); 6437 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 6438 BB = EmitAtomicBinary(MI, BB, true, 0); 6439 6440 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 6441 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 6442 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 6443 6444 unsigned dest = MI->getOperand(0).getReg(); 6445 unsigned ptrA = MI->getOperand(1).getReg(); 6446 unsigned ptrB = MI->getOperand(2).getReg(); 6447 unsigned oldval = MI->getOperand(3).getReg(); 6448 unsigned newval = MI->getOperand(4).getReg(); 6449 DebugLoc dl = MI->getDebugLoc(); 6450 6451 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6452 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6453 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6454 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6455 F->insert(It, loop1MBB); 6456 F->insert(It, loop2MBB); 6457 F->insert(It, midMBB); 6458 F->insert(It, exitMBB); 6459 exitMBB->splice(exitMBB->begin(), BB, 6460 llvm::next(MachineBasicBlock::iterator(MI)), 6461 BB->end()); 6462 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6463 6464 // thisMBB: 6465 // ... 6466 // fallthrough --> loopMBB 6467 BB->addSuccessor(loop1MBB); 6468 6469 // loop1MBB: 6470 // l[wd]arx dest, ptr 6471 // cmp[wd] dest, oldval 6472 // bne- midMBB 6473 // loop2MBB: 6474 // st[wd]cx. newval, ptr 6475 // bne- loopMBB 6476 // b exitBB 6477 // midMBB: 6478 // st[wd]cx. dest, ptr 6479 // exitBB: 6480 BB = loop1MBB; 6481 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 6482 .addReg(ptrA).addReg(ptrB); 6483 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 6484 .addReg(oldval).addReg(dest); 6485 BuildMI(BB, dl, TII->get(PPC::BCC)) 6486 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6487 BB->addSuccessor(loop2MBB); 6488 BB->addSuccessor(midMBB); 6489 6490 BB = loop2MBB; 6491 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6492 .addReg(newval).addReg(ptrA).addReg(ptrB); 6493 BuildMI(BB, dl, TII->get(PPC::BCC)) 6494 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6495 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6496 BB->addSuccessor(loop1MBB); 6497 BB->addSuccessor(exitMBB); 6498 6499 BB = midMBB; 6500 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6501 .addReg(dest).addReg(ptrA).addReg(ptrB); 6502 BB->addSuccessor(exitMBB); 6503 6504 // exitMBB: 6505 // ... 6506 BB = exitMBB; 6507 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 6508 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 6509 // We must use 64-bit registers for addresses when targeting 64-bit, 6510 // since we're actually doing arithmetic on them. Other registers 6511 // can be 32-bit. 6512 bool is64bit = PPCSubTarget.isPPC64(); 6513 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 6514 6515 unsigned dest = MI->getOperand(0).getReg(); 6516 unsigned ptrA = MI->getOperand(1).getReg(); 6517 unsigned ptrB = MI->getOperand(2).getReg(); 6518 unsigned oldval = MI->getOperand(3).getReg(); 6519 unsigned newval = MI->getOperand(4).getReg(); 6520 DebugLoc dl = MI->getDebugLoc(); 6521 6522 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6523 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6524 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6525 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6526 F->insert(It, loop1MBB); 6527 F->insert(It, loop2MBB); 6528 F->insert(It, midMBB); 6529 F->insert(It, exitMBB); 6530 exitMBB->splice(exitMBB->begin(), BB, 6531 llvm::next(MachineBasicBlock::iterator(MI)), 6532 BB->end()); 6533 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6534 6535 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6536 const TargetRegisterClass *RC = 6537 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 6538 (const TargetRegisterClass *) &PPC::GPRCRegClass; 6539 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 6540 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 6541 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 6542 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 6543 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 6544 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 6545 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 6546 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 6547 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 6548 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 6549 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 6550 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 6551 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 6552 unsigned Ptr1Reg; 6553 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 6554 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 6555 // thisMBB: 6556 // ... 6557 // fallthrough --> loopMBB 6558 BB->addSuccessor(loop1MBB); 6559 6560 // The 4-byte load must be aligned, while a char or short may be 6561 // anywhere in the word. Hence all this nasty bookkeeping code. 6562 // add ptr1, ptrA, ptrB [copy if ptrA==0] 6563 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 6564 // xori shift, shift1, 24 [16] 6565 // rlwinm ptr, ptr1, 0, 0, 29 6566 // slw newval2, newval, shift 6567 // slw oldval2, oldval,shift 6568 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 6569 // slw mask, mask2, shift 6570 // and newval3, newval2, mask 6571 // and oldval3, oldval2, mask 6572 // loop1MBB: 6573 // lwarx tmpDest, ptr 6574 // and tmp, tmpDest, mask 6575 // cmpw tmp, oldval3 6576 // bne- midMBB 6577 // loop2MBB: 6578 // andc tmp2, tmpDest, mask 6579 // or tmp4, tmp2, newval3 6580 // stwcx. tmp4, ptr 6581 // bne- loop1MBB 6582 // b exitBB 6583 // midMBB: 6584 // stwcx. tmpDest, ptr 6585 // exitBB: 6586 // srw dest, tmpDest, shift 6587 if (ptrA != ZeroReg) { 6588 Ptr1Reg = RegInfo.createVirtualRegister(RC); 6589 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 6590 .addReg(ptrA).addReg(ptrB); 6591 } else { 6592 Ptr1Reg = ptrB; 6593 } 6594 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 6595 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 6596 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 6597 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 6598 if (is64bit) 6599 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 6600 .addReg(Ptr1Reg).addImm(0).addImm(61); 6601 else 6602 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 6603 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 6604 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 6605 .addReg(newval).addReg(ShiftReg); 6606 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 6607 .addReg(oldval).addReg(ShiftReg); 6608 if (is8bit) 6609 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 6610 else { 6611 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 6612 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 6613 .addReg(Mask3Reg).addImm(65535); 6614 } 6615 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 6616 .addReg(Mask2Reg).addReg(ShiftReg); 6617 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 6618 .addReg(NewVal2Reg).addReg(MaskReg); 6619 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 6620 .addReg(OldVal2Reg).addReg(MaskReg); 6621 6622 BB = loop1MBB; 6623 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 6624 .addReg(ZeroReg).addReg(PtrReg); 6625 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 6626 .addReg(TmpDestReg).addReg(MaskReg); 6627 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 6628 .addReg(TmpReg).addReg(OldVal3Reg); 6629 BuildMI(BB, dl, TII->get(PPC::BCC)) 6630 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6631 BB->addSuccessor(loop2MBB); 6632 BB->addSuccessor(midMBB); 6633 6634 BB = loop2MBB; 6635 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 6636 .addReg(TmpDestReg).addReg(MaskReg); 6637 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 6638 .addReg(Tmp2Reg).addReg(NewVal3Reg); 6639 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 6640 .addReg(ZeroReg).addReg(PtrReg); 6641 BuildMI(BB, dl, TII->get(PPC::BCC)) 6642 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6643 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6644 BB->addSuccessor(loop1MBB); 6645 BB->addSuccessor(exitMBB); 6646 6647 BB = midMBB; 6648 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 6649 .addReg(ZeroReg).addReg(PtrReg); 6650 BB->addSuccessor(exitMBB); 6651 6652 // exitMBB: 6653 // ... 6654 BB = exitMBB; 6655 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 6656 .addReg(ShiftReg); 6657 } else if (MI->getOpcode() == PPC::FADDrtz) { 6658 // This pseudo performs an FADD with rounding mode temporarily forced 6659 // to round-to-zero. We emit this via custom inserter since the FPSCR 6660 // is not modeled at the SelectionDAG level. 6661 unsigned Dest = MI->getOperand(0).getReg(); 6662 unsigned Src1 = MI->getOperand(1).getReg(); 6663 unsigned Src2 = MI->getOperand(2).getReg(); 6664 DebugLoc dl = MI->getDebugLoc(); 6665 6666 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6667 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 6668 6669 // Save FPSCR value. 6670 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 6671 6672 // Set rounding mode to round-to-zero. 6673 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 6674 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 6675 6676 // Perform addition. 6677 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 6678 6679 // Restore FPSCR value. 6680 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg); 6681 } else { 6682 llvm_unreachable("Unexpected instr type to insert"); 6683 } 6684 6685 MI->eraseFromParent(); // The pseudo instruction is gone now. 6686 return BB; 6687 } 6688 6689 //===----------------------------------------------------------------------===// 6690 // Target Optimization Hooks 6691 //===----------------------------------------------------------------------===// 6692 6693 SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op, 6694 DAGCombinerInfo &DCI) const { 6695 if (DCI.isAfterLegalizeVectorOps()) 6696 return SDValue(); 6697 6698 EVT VT = Op.getValueType(); 6699 6700 if ((VT == MVT::f32 && PPCSubTarget.hasFRES()) || 6701 (VT == MVT::f64 && PPCSubTarget.hasFRE()) || 6702 (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { 6703 6704 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 6705 // For the reciprocal, we need to find the zero of the function: 6706 // F(X) = A X - 1 [which has a zero at X = 1/A] 6707 // => 6708 // X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form 6709 // does not require additional intermediate precision] 6710 6711 // Convergence is quadratic, so we essentially double the number of digits 6712 // correct after every iteration. The minimum architected relative 6713 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 6714 // 23 digits and double has 52 digits. 6715 int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; 6716 if (VT.getScalarType() == MVT::f64) 6717 ++Iterations; 6718 6719 SelectionDAG &DAG = DCI.DAG; 6720 SDLoc dl(Op); 6721 6722 SDValue FPOne = 6723 DAG.getConstantFP(1.0, VT.getScalarType()); 6724 if (VT.isVector()) { 6725 assert(VT.getVectorNumElements() == 4 && 6726 "Unknown vector type"); 6727 FPOne = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 6728 FPOne, FPOne, FPOne, FPOne); 6729 } 6730 6731 SDValue Est = DAG.getNode(PPCISD::FRE, dl, VT, Op); 6732 DCI.AddToWorklist(Est.getNode()); 6733 6734 // Newton iterations: Est = Est + Est (1 - Arg * Est) 6735 for (int i = 0; i < Iterations; ++i) { 6736 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Op, Est); 6737 DCI.AddToWorklist(NewEst.getNode()); 6738 6739 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPOne, NewEst); 6740 DCI.AddToWorklist(NewEst.getNode()); 6741 6742 NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 6743 DCI.AddToWorklist(NewEst.getNode()); 6744 6745 Est = DAG.getNode(ISD::FADD, dl, VT, Est, NewEst); 6746 DCI.AddToWorklist(Est.getNode()); 6747 } 6748 6749 return Est; 6750 } 6751 6752 return SDValue(); 6753 } 6754 6755 SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op, 6756 DAGCombinerInfo &DCI) const { 6757 if (DCI.isAfterLegalizeVectorOps()) 6758 return SDValue(); 6759 6760 EVT VT = Op.getValueType(); 6761 6762 if ((VT == MVT::f32 && PPCSubTarget.hasFRSQRTES()) || 6763 (VT == MVT::f64 && PPCSubTarget.hasFRSQRTE()) || 6764 (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { 6765 6766 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 6767 // For the reciprocal sqrt, we need to find the zero of the function: 6768 // F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 6769 // => 6770 // X_{i+1} = X_i (1.5 - A X_i^2 / 2) 6771 // As a result, we precompute A/2 prior to the iteration loop. 6772 6773 // Convergence is quadratic, so we essentially double the number of digits 6774 // correct after every iteration. The minimum architected relative 6775 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 6776 // 23 digits and double has 52 digits. 6777 int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; 6778 if (VT.getScalarType() == MVT::f64) 6779 ++Iterations; 6780 6781 SelectionDAG &DAG = DCI.DAG; 6782 SDLoc dl(Op); 6783 6784 SDValue FPThreeHalves = 6785 DAG.getConstantFP(1.5, VT.getScalarType()); 6786 if (VT.isVector()) { 6787 assert(VT.getVectorNumElements() == 4 && 6788 "Unknown vector type"); 6789 FPThreeHalves = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 6790 FPThreeHalves, FPThreeHalves, 6791 FPThreeHalves, FPThreeHalves); 6792 } 6793 6794 SDValue Est = DAG.getNode(PPCISD::FRSQRTE, dl, VT, Op); 6795 DCI.AddToWorklist(Est.getNode()); 6796 6797 // We now need 0.5*Arg which we can write as (1.5*Arg - Arg) so that 6798 // this entire sequence requires only one FP constant. 6799 SDValue HalfArg = DAG.getNode(ISD::FMUL, dl, VT, FPThreeHalves, Op); 6800 DCI.AddToWorklist(HalfArg.getNode()); 6801 6802 HalfArg = DAG.getNode(ISD::FSUB, dl, VT, HalfArg, Op); 6803 DCI.AddToWorklist(HalfArg.getNode()); 6804 6805 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est) 6806 for (int i = 0; i < Iterations; ++i) { 6807 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, Est); 6808 DCI.AddToWorklist(NewEst.getNode()); 6809 6810 NewEst = DAG.getNode(ISD::FMUL, dl, VT, HalfArg, NewEst); 6811 DCI.AddToWorklist(NewEst.getNode()); 6812 6813 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPThreeHalves, NewEst); 6814 DCI.AddToWorklist(NewEst.getNode()); 6815 6816 Est = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 6817 DCI.AddToWorklist(Est.getNode()); 6818 } 6819 6820 return Est; 6821 } 6822 6823 return SDValue(); 6824 } 6825 6826 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 6827 // not enforce equality of the chain operands. 6828 static bool isConsecutiveLS(LSBaseSDNode *LS, LSBaseSDNode *Base, 6829 unsigned Bytes, int Dist, 6830 SelectionDAG &DAG) { 6831 EVT VT = LS->getMemoryVT(); 6832 if (VT.getSizeInBits() / 8 != Bytes) 6833 return false; 6834 6835 SDValue Loc = LS->getBasePtr(); 6836 SDValue BaseLoc = Base->getBasePtr(); 6837 if (Loc.getOpcode() == ISD::FrameIndex) { 6838 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6839 return false; 6840 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6841 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6842 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6843 int FS = MFI->getObjectSize(FI); 6844 int BFS = MFI->getObjectSize(BFI); 6845 if (FS != BFS || FS != (int)Bytes) return false; 6846 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 6847 } 6848 6849 // Handle X+C 6850 if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 6851 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 6852 return true; 6853 6854 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6855 const GlobalValue *GV1 = NULL; 6856 const GlobalValue *GV2 = NULL; 6857 int64_t Offset1 = 0; 6858 int64_t Offset2 = 0; 6859 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 6860 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 6861 if (isGA1 && isGA2 && GV1 == GV2) 6862 return Offset1 == (Offset2 + Dist*Bytes); 6863 return false; 6864 } 6865 6866 // Return true is there is a nearyby consecutive load to the one provided 6867 // (regardless of alignment). We search up and down the chain, looking though 6868 // token factors and other loads (but nothing else). As a result, a true 6869 // results indicates that it is safe to create a new consecutive load adjacent 6870 // to the load provided. 6871 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 6872 SDValue Chain = LD->getChain(); 6873 EVT VT = LD->getMemoryVT(); 6874 6875 SmallSet<SDNode *, 16> LoadRoots; 6876 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 6877 SmallSet<SDNode *, 16> Visited; 6878 6879 // First, search up the chain, branching to follow all token-factor operands. 6880 // If we find a consecutive load, then we're done, otherwise, record all 6881 // nodes just above the top-level loads and token factors. 6882 while (!Queue.empty()) { 6883 SDNode *ChainNext = Queue.pop_back_val(); 6884 if (!Visited.insert(ChainNext)) 6885 continue; 6886 6887 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(ChainNext)) { 6888 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 6889 return true; 6890 6891 if (!Visited.count(ChainLD->getChain().getNode())) 6892 Queue.push_back(ChainLD->getChain().getNode()); 6893 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 6894 for (SDNode::op_iterator O = ChainNext->op_begin(), 6895 OE = ChainNext->op_end(); O != OE; ++O) 6896 if (!Visited.count(O->getNode())) 6897 Queue.push_back(O->getNode()); 6898 } else 6899 LoadRoots.insert(ChainNext); 6900 } 6901 6902 // Second, search down the chain, starting from the top-level nodes recorded 6903 // in the first phase. These top-level nodes are the nodes just above all 6904 // loads and token factors. Starting with their uses, recursively look though 6905 // all loads (just the chain uses) and token factors to find a consecutive 6906 // load. 6907 Visited.clear(); 6908 Queue.clear(); 6909 6910 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 6911 IE = LoadRoots.end(); I != IE; ++I) { 6912 Queue.push_back(*I); 6913 6914 while (!Queue.empty()) { 6915 SDNode *LoadRoot = Queue.pop_back_val(); 6916 if (!Visited.insert(LoadRoot)) 6917 continue; 6918 6919 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(LoadRoot)) 6920 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 6921 return true; 6922 6923 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 6924 UE = LoadRoot->use_end(); UI != UE; ++UI) 6925 if (((isa<LoadSDNode>(*UI) && 6926 cast<LoadSDNode>(*UI)->getChain().getNode() == LoadRoot) || 6927 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 6928 Queue.push_back(*UI); 6929 } 6930 } 6931 6932 return false; 6933 } 6934 6935 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 6936 DAGCombinerInfo &DCI) const { 6937 const TargetMachine &TM = getTargetMachine(); 6938 SelectionDAG &DAG = DCI.DAG; 6939 SDLoc dl(N); 6940 switch (N->getOpcode()) { 6941 default: break; 6942 case PPCISD::SHL: 6943 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6944 if (C->isNullValue()) // 0 << V -> 0. 6945 return N->getOperand(0); 6946 } 6947 break; 6948 case PPCISD::SRL: 6949 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6950 if (C->isNullValue()) // 0 >>u V -> 0. 6951 return N->getOperand(0); 6952 } 6953 break; 6954 case PPCISD::SRA: 6955 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6956 if (C->isNullValue() || // 0 >>s V -> 0. 6957 C->isAllOnesValue()) // -1 >>s V -> -1. 6958 return N->getOperand(0); 6959 } 6960 break; 6961 case ISD::FDIV: { 6962 assert(TM.Options.UnsafeFPMath && 6963 "Reciprocal estimates require UnsafeFPMath"); 6964 6965 if (N->getOperand(1).getOpcode() == ISD::FSQRT) { 6966 SDValue RV = 6967 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0), DCI); 6968 if (RV.getNode() != 0) { 6969 DCI.AddToWorklist(RV.getNode()); 6970 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6971 N->getOperand(0), RV); 6972 } 6973 } else if (N->getOperand(1).getOpcode() == ISD::FP_EXTEND && 6974 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 6975 SDValue RV = 6976 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 6977 DCI); 6978 if (RV.getNode() != 0) { 6979 DCI.AddToWorklist(RV.getNode()); 6980 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N->getOperand(1)), 6981 N->getValueType(0), RV); 6982 DCI.AddToWorklist(RV.getNode()); 6983 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6984 N->getOperand(0), RV); 6985 } 6986 } else if (N->getOperand(1).getOpcode() == ISD::FP_ROUND && 6987 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 6988 SDValue RV = 6989 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 6990 DCI); 6991 if (RV.getNode() != 0) { 6992 DCI.AddToWorklist(RV.getNode()); 6993 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N->getOperand(1)), 6994 N->getValueType(0), RV, 6995 N->getOperand(1).getOperand(1)); 6996 DCI.AddToWorklist(RV.getNode()); 6997 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6998 N->getOperand(0), RV); 6999 } 7000 } 7001 7002 SDValue RV = DAGCombineFastRecip(N->getOperand(1), DCI); 7003 if (RV.getNode() != 0) { 7004 DCI.AddToWorklist(RV.getNode()); 7005 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7006 N->getOperand(0), RV); 7007 } 7008 7009 } 7010 break; 7011 case ISD::FSQRT: { 7012 assert(TM.Options.UnsafeFPMath && 7013 "Reciprocal estimates require UnsafeFPMath"); 7014 7015 // Compute this as 1/(1/sqrt(X)), which is the reciprocal of the 7016 // reciprocal sqrt. 7017 SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(0), DCI); 7018 if (RV.getNode() != 0) { 7019 DCI.AddToWorklist(RV.getNode()); 7020 RV = DAGCombineFastRecip(RV, DCI); 7021 if (RV.getNode() != 0) 7022 return RV; 7023 } 7024 7025 } 7026 break; 7027 case ISD::SINT_TO_FP: 7028 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 7029 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 7030 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 7031 // We allow the src/dst to be either f32/f64, but the intermediate 7032 // type must be i64. 7033 if (N->getOperand(0).getValueType() == MVT::i64 && 7034 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 7035 SDValue Val = N->getOperand(0).getOperand(0); 7036 if (Val.getValueType() == MVT::f32) { 7037 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 7038 DCI.AddToWorklist(Val.getNode()); 7039 } 7040 7041 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val); 7042 DCI.AddToWorklist(Val.getNode()); 7043 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val); 7044 DCI.AddToWorklist(Val.getNode()); 7045 if (N->getValueType(0) == MVT::f32) { 7046 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 7047 DAG.getIntPtrConstant(0)); 7048 DCI.AddToWorklist(Val.getNode()); 7049 } 7050 return Val; 7051 } else if (N->getOperand(0).getValueType() == MVT::i32) { 7052 // If the intermediate type is i32, we can avoid the load/store here 7053 // too. 7054 } 7055 } 7056 } 7057 break; 7058 case ISD::STORE: 7059 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 7060 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 7061 !cast<StoreSDNode>(N)->isTruncatingStore() && 7062 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 7063 N->getOperand(1).getValueType() == MVT::i32 && 7064 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 7065 SDValue Val = N->getOperand(1).getOperand(0); 7066 if (Val.getValueType() == MVT::f32) { 7067 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 7068 DCI.AddToWorklist(Val.getNode()); 7069 } 7070 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 7071 DCI.AddToWorklist(Val.getNode()); 7072 7073 SDValue Ops[] = { 7074 N->getOperand(0), Val, N->getOperand(2), 7075 DAG.getValueType(N->getOperand(1).getValueType()) 7076 }; 7077 7078 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7079 DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), 7080 cast<StoreSDNode>(N)->getMemoryVT(), 7081 cast<StoreSDNode>(N)->getMemOperand()); 7082 DCI.AddToWorklist(Val.getNode()); 7083 return Val; 7084 } 7085 7086 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 7087 if (cast<StoreSDNode>(N)->isUnindexed() && 7088 N->getOperand(1).getOpcode() == ISD::BSWAP && 7089 N->getOperand(1).getNode()->hasOneUse() && 7090 (N->getOperand(1).getValueType() == MVT::i32 || 7091 N->getOperand(1).getValueType() == MVT::i16 || 7092 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 7093 TM.getSubtarget<PPCSubtarget>().isPPC64() && 7094 N->getOperand(1).getValueType() == MVT::i64))) { 7095 SDValue BSwapOp = N->getOperand(1).getOperand(0); 7096 // Do an any-extend to 32-bits if this is a half-word input. 7097 if (BSwapOp.getValueType() == MVT::i16) 7098 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 7099 7100 SDValue Ops[] = { 7101 N->getOperand(0), BSwapOp, N->getOperand(2), 7102 DAG.getValueType(N->getOperand(1).getValueType()) 7103 }; 7104 return 7105 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 7106 Ops, array_lengthof(Ops), 7107 cast<StoreSDNode>(N)->getMemoryVT(), 7108 cast<StoreSDNode>(N)->getMemOperand()); 7109 } 7110 break; 7111 case ISD::LOAD: { 7112 LoadSDNode *LD = cast<LoadSDNode>(N); 7113 EVT VT = LD->getValueType(0); 7114 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 7115 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty); 7116 if (ISD::isNON_EXTLoad(N) && VT.isVector() && 7117 TM.getSubtarget<PPCSubtarget>().hasAltivec() && 7118 DCI.getDAGCombineLevel() == AfterLegalizeTypes && 7119 LD->getAlignment() < ABIAlignment) { 7120 // This is a type-legal unaligned Altivec load. 7121 SDValue Chain = LD->getChain(); 7122 SDValue Ptr = LD->getBasePtr(); 7123 7124 // This implements the loading of unaligned vectors as described in 7125 // the venerable Apple Velocity Engine overview. Specifically: 7126 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 7127 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 7128 // 7129 // The general idea is to expand a sequence of one or more unaligned 7130 // loads into a alignment-based permutation-control instruction (lvsl), 7131 // a series of regular vector loads (which always truncate their 7132 // input address to an aligned address), and a series of permutations. 7133 // The results of these permutations are the requested loaded values. 7134 // The trick is that the last "extra" load is not taken from the address 7135 // you might suspect (sizeof(vector) bytes after the last requested 7136 // load), but rather sizeof(vector) - 1 bytes after the last 7137 // requested vector. The point of this is to avoid a page fault if the 7138 // base address happend to be aligned. This works because if the base 7139 // address is aligned, then adding less than a full vector length will 7140 // cause the last vector in the sequence to be (re)loaded. Otherwise, 7141 // the next vector will be fetched as you might suspect was necessary. 7142 7143 // We might be able to reuse the permutation generation from 7144 // a different base address offset from this one by an aligned amount. 7145 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 7146 // optimization later. 7147 SDValue PermCntl = BuildIntrinsicOp(Intrinsic::ppc_altivec_lvsl, Ptr, 7148 DAG, dl, MVT::v16i8); 7149 7150 // Refine the alignment of the original load (a "new" load created here 7151 // which was identical to the first except for the alignment would be 7152 // merged with the existing node regardless). 7153 MachineFunction &MF = DAG.getMachineFunction(); 7154 MachineMemOperand *MMO = 7155 MF.getMachineMemOperand(LD->getPointerInfo(), 7156 LD->getMemOperand()->getFlags(), 7157 LD->getMemoryVT().getStoreSize(), 7158 ABIAlignment); 7159 LD->refineAlignment(MMO); 7160 SDValue BaseLoad = SDValue(LD, 0); 7161 7162 // Note that the value of IncOffset (which is provided to the next 7163 // load's pointer info offset value, and thus used to calculate the 7164 // alignment), and the value of IncValue (which is actually used to 7165 // increment the pointer value) are different! This is because we 7166 // require the next load to appear to be aligned, even though it 7167 // is actually offset from the base pointer by a lesser amount. 7168 int IncOffset = VT.getSizeInBits() / 8; 7169 int IncValue = IncOffset; 7170 7171 // Walk (both up and down) the chain looking for another load at the real 7172 // (aligned) offset (the alignment of the other load does not matter in 7173 // this case). If found, then do not use the offset reduction trick, as 7174 // that will prevent the loads from being later combined (as they would 7175 // otherwise be duplicates). 7176 if (!findConsecutiveLoad(LD, DAG)) 7177 --IncValue; 7178 7179 SDValue Increment = DAG.getConstant(IncValue, getPointerTy()); 7180 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 7181 7182 SDValue ExtraLoad = 7183 DAG.getLoad(VT, dl, Chain, Ptr, 7184 LD->getPointerInfo().getWithOffset(IncOffset), 7185 LD->isVolatile(), LD->isNonTemporal(), 7186 LD->isInvariant(), ABIAlignment); 7187 7188 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7189 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 7190 7191 if (BaseLoad.getValueType() != MVT::v4i32) 7192 BaseLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, BaseLoad); 7193 7194 if (ExtraLoad.getValueType() != MVT::v4i32) 7195 ExtraLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ExtraLoad); 7196 7197 SDValue Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm, 7198 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 7199 7200 if (VT != MVT::v4i32) 7201 Perm = DAG.getNode(ISD::BITCAST, dl, VT, Perm); 7202 7203 // Now we need to be really careful about how we update the users of the 7204 // original load. We cannot just call DCI.CombineTo (or 7205 // DAG.ReplaceAllUsesWith for that matter), because the load still has 7206 // uses created here (the permutation for example) that need to stay. 7207 SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7208 while (UI != UE) { 7209 SDUse &Use = UI.getUse(); 7210 SDNode *User = *UI; 7211 // Note: BaseLoad is checked here because it might not be N, but a 7212 // bitcast of N. 7213 if (User == Perm.getNode() || User == BaseLoad.getNode() || 7214 User == TF.getNode() || Use.getResNo() > 1) { 7215 ++UI; 7216 continue; 7217 } 7218 7219 SDValue To = Use.getResNo() ? TF : Perm; 7220 ++UI; 7221 7222 SmallVector<SDValue, 8> Ops; 7223 for (SDNode::op_iterator O = User->op_begin(), 7224 OE = User->op_end(); O != OE; ++O) { 7225 if (*O == Use) 7226 Ops.push_back(To); 7227 else 7228 Ops.push_back(*O); 7229 } 7230 7231 DAG.UpdateNodeOperands(User, Ops.data(), Ops.size()); 7232 } 7233 7234 return SDValue(N, 0); 7235 } 7236 } 7237 break; 7238 case ISD::INTRINSIC_WO_CHAIN: 7239 if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() == 7240 Intrinsic::ppc_altivec_lvsl && 7241 N->getOperand(1)->getOpcode() == ISD::ADD) { 7242 SDValue Add = N->getOperand(1); 7243 7244 if (DAG.MaskedValueIsZero(Add->getOperand(1), 7245 APInt::getAllOnesValue(4 /* 16 byte alignment */).zext( 7246 Add.getValueType().getScalarType().getSizeInBits()))) { 7247 SDNode *BasePtr = Add->getOperand(0).getNode(); 7248 for (SDNode::use_iterator UI = BasePtr->use_begin(), 7249 UE = BasePtr->use_end(); UI != UE; ++UI) { 7250 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 7251 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == 7252 Intrinsic::ppc_altivec_lvsl) { 7253 // We've found another LVSL, and this address if an aligned 7254 // multiple of that one. The results will be the same, so use the 7255 // one we've just found instead. 7256 7257 return SDValue(*UI, 0); 7258 } 7259 } 7260 } 7261 } 7262 case ISD::BSWAP: 7263 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 7264 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 7265 N->getOperand(0).hasOneUse() && 7266 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 7267 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 7268 TM.getSubtarget<PPCSubtarget>().isPPC64() && 7269 N->getValueType(0) == MVT::i64))) { 7270 SDValue Load = N->getOperand(0); 7271 LoadSDNode *LD = cast<LoadSDNode>(Load); 7272 // Create the byte-swapping load. 7273 SDValue Ops[] = { 7274 LD->getChain(), // Chain 7275 LD->getBasePtr(), // Ptr 7276 DAG.getValueType(N->getValueType(0)) // VT 7277 }; 7278 SDValue BSLoad = 7279 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 7280 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 7281 MVT::i64 : MVT::i32, MVT::Other), 7282 Ops, 3, LD->getMemoryVT(), LD->getMemOperand()); 7283 7284 // If this is an i16 load, insert the truncate. 7285 SDValue ResVal = BSLoad; 7286 if (N->getValueType(0) == MVT::i16) 7287 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 7288 7289 // First, combine the bswap away. This makes the value produced by the 7290 // load dead. 7291 DCI.CombineTo(N, ResVal); 7292 7293 // Next, combine the load away, we give it a bogus result value but a real 7294 // chain result. The result value is dead because the bswap is dead. 7295 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 7296 7297 // Return N so it doesn't get rechecked! 7298 return SDValue(N, 0); 7299 } 7300 7301 break; 7302 case PPCISD::VCMP: { 7303 // If a VCMPo node already exists with exactly the same operands as this 7304 // node, use its result instead of this node (VCMPo computes both a CR6 and 7305 // a normal output). 7306 // 7307 if (!N->getOperand(0).hasOneUse() && 7308 !N->getOperand(1).hasOneUse() && 7309 !N->getOperand(2).hasOneUse()) { 7310 7311 // Scan all of the users of the LHS, looking for VCMPo's that match. 7312 SDNode *VCMPoNode = 0; 7313 7314 SDNode *LHSN = N->getOperand(0).getNode(); 7315 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 7316 UI != E; ++UI) 7317 if (UI->getOpcode() == PPCISD::VCMPo && 7318 UI->getOperand(1) == N->getOperand(1) && 7319 UI->getOperand(2) == N->getOperand(2) && 7320 UI->getOperand(0) == N->getOperand(0)) { 7321 VCMPoNode = *UI; 7322 break; 7323 } 7324 7325 // If there is no VCMPo node, or if the flag value has a single use, don't 7326 // transform this. 7327 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 7328 break; 7329 7330 // Look at the (necessarily single) use of the flag value. If it has a 7331 // chain, this transformation is more complex. Note that multiple things 7332 // could use the value result, which we should ignore. 7333 SDNode *FlagUser = 0; 7334 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 7335 FlagUser == 0; ++UI) { 7336 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 7337 SDNode *User = *UI; 7338 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 7339 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 7340 FlagUser = User; 7341 break; 7342 } 7343 } 7344 } 7345 7346 // If the user is a MFOCRF instruction, we know this is safe. 7347 // Otherwise we give up for right now. 7348 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 7349 return SDValue(VCMPoNode, 0); 7350 } 7351 break; 7352 } 7353 case ISD::BR_CC: { 7354 // If this is a branch on an altivec predicate comparison, lower this so 7355 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 7356 // lowering is done pre-legalize, because the legalizer lowers the predicate 7357 // compare down to code that is difficult to reassemble. 7358 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 7359 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 7360 7361 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 7362 // value. If so, pass-through the AND to get to the intrinsic. 7363 if (LHS.getOpcode() == ISD::AND && 7364 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 7365 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 7366 Intrinsic::ppc_is_decremented_ctr_nonzero && 7367 isa<ConstantSDNode>(LHS.getOperand(1)) && 7368 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 7369 isZero()) 7370 LHS = LHS.getOperand(0); 7371 7372 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 7373 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 7374 Intrinsic::ppc_is_decremented_ctr_nonzero && 7375 isa<ConstantSDNode>(RHS)) { 7376 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 7377 "Counter decrement comparison is not EQ or NE"); 7378 7379 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 7380 bool isBDNZ = (CC == ISD::SETEQ && Val) || 7381 (CC == ISD::SETNE && !Val); 7382 7383 // We now need to make the intrinsic dead (it cannot be instruction 7384 // selected). 7385 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 7386 assert(LHS.getNode()->hasOneUse() && 7387 "Counter decrement has more than one use"); 7388 7389 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 7390 N->getOperand(0), N->getOperand(4)); 7391 } 7392 7393 int CompareOpc; 7394 bool isDot; 7395 7396 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 7397 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 7398 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 7399 assert(isDot && "Can't compare against a vector result!"); 7400 7401 // If this is a comparison against something other than 0/1, then we know 7402 // that the condition is never/always true. 7403 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 7404 if (Val != 0 && Val != 1) { 7405 if (CC == ISD::SETEQ) // Cond never true, remove branch. 7406 return N->getOperand(0); 7407 // Always !=, turn it into an unconditional branch. 7408 return DAG.getNode(ISD::BR, dl, MVT::Other, 7409 N->getOperand(0), N->getOperand(4)); 7410 } 7411 7412 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 7413 7414 // Create the PPCISD altivec 'dot' comparison node. 7415 SDValue Ops[] = { 7416 LHS.getOperand(2), // LHS of compare 7417 LHS.getOperand(3), // RHS of compare 7418 DAG.getConstant(CompareOpc, MVT::i32) 7419 }; 7420 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 7421 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 7422 7423 // Unpack the result based on how the target uses it. 7424 PPC::Predicate CompOpc; 7425 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 7426 default: // Can't happen, don't crash on invalid number though. 7427 case 0: // Branch on the value of the EQ bit of CR6. 7428 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 7429 break; 7430 case 1: // Branch on the inverted value of the EQ bit of CR6. 7431 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 7432 break; 7433 case 2: // Branch on the value of the LT bit of CR6. 7434 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 7435 break; 7436 case 3: // Branch on the inverted value of the LT bit of CR6. 7437 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 7438 break; 7439 } 7440 7441 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 7442 DAG.getConstant(CompOpc, MVT::i32), 7443 DAG.getRegister(PPC::CR6, MVT::i32), 7444 N->getOperand(4), CompNode.getValue(1)); 7445 } 7446 break; 7447 } 7448 } 7449 7450 return SDValue(); 7451 } 7452 7453 //===----------------------------------------------------------------------===// 7454 // Inline Assembly Support 7455 //===----------------------------------------------------------------------===// 7456 7457 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 7458 APInt &KnownZero, 7459 APInt &KnownOne, 7460 const SelectionDAG &DAG, 7461 unsigned Depth) const { 7462 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 7463 switch (Op.getOpcode()) { 7464 default: break; 7465 case PPCISD::LBRX: { 7466 // lhbrx is known to have the top bits cleared out. 7467 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 7468 KnownZero = 0xFFFF0000; 7469 break; 7470 } 7471 case ISD::INTRINSIC_WO_CHAIN: { 7472 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 7473 default: break; 7474 case Intrinsic::ppc_altivec_vcmpbfp_p: 7475 case Intrinsic::ppc_altivec_vcmpeqfp_p: 7476 case Intrinsic::ppc_altivec_vcmpequb_p: 7477 case Intrinsic::ppc_altivec_vcmpequh_p: 7478 case Intrinsic::ppc_altivec_vcmpequw_p: 7479 case Intrinsic::ppc_altivec_vcmpgefp_p: 7480 case Intrinsic::ppc_altivec_vcmpgtfp_p: 7481 case Intrinsic::ppc_altivec_vcmpgtsb_p: 7482 case Intrinsic::ppc_altivec_vcmpgtsh_p: 7483 case Intrinsic::ppc_altivec_vcmpgtsw_p: 7484 case Intrinsic::ppc_altivec_vcmpgtub_p: 7485 case Intrinsic::ppc_altivec_vcmpgtuh_p: 7486 case Intrinsic::ppc_altivec_vcmpgtuw_p: 7487 KnownZero = ~1U; // All bits but the low one are known to be zero. 7488 break; 7489 } 7490 } 7491 } 7492 } 7493 7494 7495 /// getConstraintType - Given a constraint, return the type of 7496 /// constraint it is for this target. 7497 PPCTargetLowering::ConstraintType 7498 PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 7499 if (Constraint.size() == 1) { 7500 switch (Constraint[0]) { 7501 default: break; 7502 case 'b': 7503 case 'r': 7504 case 'f': 7505 case 'v': 7506 case 'y': 7507 return C_RegisterClass; 7508 case 'Z': 7509 // FIXME: While Z does indicate a memory constraint, it specifically 7510 // indicates an r+r address (used in conjunction with the 'y' modifier 7511 // in the replacement string). Currently, we're forcing the base 7512 // register to be r0 in the asm printer (which is interpreted as zero) 7513 // and forming the complete address in the second register. This is 7514 // suboptimal. 7515 return C_Memory; 7516 } 7517 } 7518 return TargetLowering::getConstraintType(Constraint); 7519 } 7520 7521 /// Examine constraint type and operand type and determine a weight value. 7522 /// This object must already have been set up with the operand type 7523 /// and the current alternative constraint selected. 7524 TargetLowering::ConstraintWeight 7525 PPCTargetLowering::getSingleConstraintMatchWeight( 7526 AsmOperandInfo &info, const char *constraint) const { 7527 ConstraintWeight weight = CW_Invalid; 7528 Value *CallOperandVal = info.CallOperandVal; 7529 // If we don't have a value, we can't do a match, 7530 // but allow it at the lowest weight. 7531 if (CallOperandVal == NULL) 7532 return CW_Default; 7533 Type *type = CallOperandVal->getType(); 7534 // Look at the constraint type. 7535 switch (*constraint) { 7536 default: 7537 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 7538 break; 7539 case 'b': 7540 if (type->isIntegerTy()) 7541 weight = CW_Register; 7542 break; 7543 case 'f': 7544 if (type->isFloatTy()) 7545 weight = CW_Register; 7546 break; 7547 case 'd': 7548 if (type->isDoubleTy()) 7549 weight = CW_Register; 7550 break; 7551 case 'v': 7552 if (type->isVectorTy()) 7553 weight = CW_Register; 7554 break; 7555 case 'y': 7556 weight = CW_Register; 7557 break; 7558 case 'Z': 7559 weight = CW_Memory; 7560 break; 7561 } 7562 return weight; 7563 } 7564 7565 std::pair<unsigned, const TargetRegisterClass*> 7566 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 7567 MVT VT) const { 7568 if (Constraint.size() == 1) { 7569 // GCC RS6000 Constraint Letters 7570 switch (Constraint[0]) { 7571 case 'b': // R1-R31 7572 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 7573 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 7574 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 7575 case 'r': // R0-R31 7576 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 7577 return std::make_pair(0U, &PPC::G8RCRegClass); 7578 return std::make_pair(0U, &PPC::GPRCRegClass); 7579 case 'f': 7580 if (VT == MVT::f32 || VT == MVT::i32) 7581 return std::make_pair(0U, &PPC::F4RCRegClass); 7582 if (VT == MVT::f64 || VT == MVT::i64) 7583 return std::make_pair(0U, &PPC::F8RCRegClass); 7584 break; 7585 case 'v': 7586 return std::make_pair(0U, &PPC::VRRCRegClass); 7587 case 'y': // crrc 7588 return std::make_pair(0U, &PPC::CRRCRegClass); 7589 } 7590 } 7591 7592 std::pair<unsigned, const TargetRegisterClass*> R = 7593 TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 7594 7595 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 7596 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 7597 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 7598 // register. 7599 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 7600 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 7601 if (R.first && VT == MVT::i64 && PPCSubTarget.isPPC64() && 7602 PPC::GPRCRegClass.contains(R.first)) { 7603 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 7604 return std::make_pair(TRI->getMatchingSuperReg(R.first, 7605 PPC::sub_32, &PPC::G8RCRegClass), 7606 &PPC::G8RCRegClass); 7607 } 7608 7609 return R; 7610 } 7611 7612 7613 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 7614 /// vector. If it is invalid, don't add anything to Ops. 7615 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 7616 std::string &Constraint, 7617 std::vector<SDValue>&Ops, 7618 SelectionDAG &DAG) const { 7619 SDValue Result(0,0); 7620 7621 // Only support length 1 constraints. 7622 if (Constraint.length() > 1) return; 7623 7624 char Letter = Constraint[0]; 7625 switch (Letter) { 7626 default: break; 7627 case 'I': 7628 case 'J': 7629 case 'K': 7630 case 'L': 7631 case 'M': 7632 case 'N': 7633 case 'O': 7634 case 'P': { 7635 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 7636 if (!CST) return; // Must be an immediate to match. 7637 unsigned Value = CST->getZExtValue(); 7638 switch (Letter) { 7639 default: llvm_unreachable("Unknown constraint letter!"); 7640 case 'I': // "I" is a signed 16-bit constant. 7641 if ((short)Value == (int)Value) 7642 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7643 break; 7644 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 7645 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 7646 if ((short)Value == 0) 7647 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7648 break; 7649 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 7650 if ((Value >> 16) == 0) 7651 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7652 break; 7653 case 'M': // "M" is a constant that is greater than 31. 7654 if (Value > 31) 7655 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7656 break; 7657 case 'N': // "N" is a positive constant that is an exact power of two. 7658 if ((int)Value > 0 && isPowerOf2_32(Value)) 7659 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7660 break; 7661 case 'O': // "O" is the constant zero. 7662 if (Value == 0) 7663 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7664 break; 7665 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 7666 if ((short)-Value == (int)-Value) 7667 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7668 break; 7669 } 7670 break; 7671 } 7672 } 7673 7674 if (Result.getNode()) { 7675 Ops.push_back(Result); 7676 return; 7677 } 7678 7679 // Handle standard constraint letters. 7680 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 7681 } 7682 7683 // isLegalAddressingMode - Return true if the addressing mode represented 7684 // by AM is legal for this target, for a load/store of the specified type. 7685 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 7686 Type *Ty) const { 7687 // FIXME: PPC does not allow r+i addressing modes for vectors! 7688 7689 // PPC allows a sign-extended 16-bit immediate field. 7690 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 7691 return false; 7692 7693 // No global is ever allowed as a base. 7694 if (AM.BaseGV) 7695 return false; 7696 7697 // PPC only support r+r, 7698 switch (AM.Scale) { 7699 case 0: // "r+i" or just "i", depending on HasBaseReg. 7700 break; 7701 case 1: 7702 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 7703 return false; 7704 // Otherwise we have r+r or r+i. 7705 break; 7706 case 2: 7707 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 7708 return false; 7709 // Allow 2*r as r+r. 7710 break; 7711 default: 7712 // No other scales are supported. 7713 return false; 7714 } 7715 7716 return true; 7717 } 7718 7719 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 7720 SelectionDAG &DAG) const { 7721 MachineFunction &MF = DAG.getMachineFunction(); 7722 MachineFrameInfo *MFI = MF.getFrameInfo(); 7723 MFI->setReturnAddressIsTaken(true); 7724 7725 SDLoc dl(Op); 7726 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7727 7728 // Make sure the function does not optimize away the store of the RA to 7729 // the stack. 7730 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 7731 FuncInfo->setLRStoreRequired(); 7732 bool isPPC64 = PPCSubTarget.isPPC64(); 7733 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 7734 7735 if (Depth > 0) { 7736 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 7737 SDValue Offset = 7738 7739 DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI), 7740 isPPC64? MVT::i64 : MVT::i32); 7741 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7742 DAG.getNode(ISD::ADD, dl, getPointerTy(), 7743 FrameAddr, Offset), 7744 MachinePointerInfo(), false, false, false, 0); 7745 } 7746 7747 // Just load the return address off the stack. 7748 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 7749 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7750 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 7751 } 7752 7753 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 7754 SelectionDAG &DAG) const { 7755 SDLoc dl(Op); 7756 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7757 7758 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 7759 bool isPPC64 = PtrVT == MVT::i64; 7760 7761 MachineFunction &MF = DAG.getMachineFunction(); 7762 MachineFrameInfo *MFI = MF.getFrameInfo(); 7763 MFI->setFrameAddressIsTaken(true); 7764 7765 // Naked functions never have a frame pointer, and so we use r1. For all 7766 // other functions, this decision must be delayed until during PEI. 7767 unsigned FrameReg; 7768 if (MF.getFunction()->getAttributes().hasAttribute( 7769 AttributeSet::FunctionIndex, Attribute::Naked)) 7770 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 7771 else 7772 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 7773 7774 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 7775 PtrVT); 7776 while (Depth--) 7777 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 7778 FrameAddr, MachinePointerInfo(), false, false, 7779 false, 0); 7780 return FrameAddr; 7781 } 7782 7783 bool 7784 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 7785 // The PowerPC target isn't yet aware of offsets. 7786 return false; 7787 } 7788 7789 /// getOptimalMemOpType - Returns the target specific optimal type for load 7790 /// and store operations as a result of memset, memcpy, and memmove 7791 /// lowering. If DstAlign is zero that means it's safe to destination 7792 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 7793 /// means there isn't a need to check it against alignment requirement, 7794 /// probably because the source does not need to be loaded. If 'IsMemset' is 7795 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 7796 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 7797 /// source is constant so it does not need to be loaded. 7798 /// It returns EVT::Other if the type should be determined using generic 7799 /// target-independent logic. 7800 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 7801 unsigned DstAlign, unsigned SrcAlign, 7802 bool IsMemset, bool ZeroMemset, 7803 bool MemcpyStrSrc, 7804 MachineFunction &MF) const { 7805 if (this->PPCSubTarget.isPPC64()) { 7806 return MVT::i64; 7807 } else { 7808 return MVT::i32; 7809 } 7810 } 7811 7812 bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 7813 bool *Fast) const { 7814 if (DisablePPCUnaligned) 7815 return false; 7816 7817 // PowerPC supports unaligned memory access for simple non-vector types. 7818 // Although accessing unaligned addresses is not as efficient as accessing 7819 // aligned addresses, it is generally more efficient than manual expansion, 7820 // and generally only traps for software emulation when crossing page 7821 // boundaries. 7822 7823 if (!VT.isSimple()) 7824 return false; 7825 7826 if (VT.getSimpleVT().isVector()) 7827 return false; 7828 7829 if (VT == MVT::ppcf128) 7830 return false; 7831 7832 if (Fast) 7833 *Fast = true; 7834 7835 return true; 7836 } 7837 7838 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 7839 VT = VT.getScalarType(); 7840 7841 if (!VT.isSimple()) 7842 return false; 7843 7844 switch (VT.getSimpleVT().SimpleTy) { 7845 case MVT::f32: 7846 case MVT::f64: 7847 return true; 7848 default: 7849 break; 7850 } 7851 7852 return false; 7853 } 7854 7855 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 7856 if (DisableILPPref) 7857 return TargetLowering::getSchedulingPreference(N); 7858 7859 return Sched::ILP; 7860 } 7861 7862 // Create a fast isel object. 7863 FastISel * 7864 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 7865 const TargetLibraryInfo *LibInfo) const { 7866 return PPC::createFastISel(FuncInfo, LibInfo); 7867 } 7868