1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCMachineFunctionInfo.h" 17 #include "PPCPerfectShuffle.h" 18 #include "PPCTargetMachine.h" 19 #include "PPCTargetObjectFile.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/SelectionDAG.h" 27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28 #include "llvm/IR/CallingConv.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/MathExtras.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include "llvm/Target/TargetOptions.h" 38 using namespace llvm; 39 40 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 41 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 42 43 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 44 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 45 46 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 47 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 48 49 static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) { 50 if (TM.getSubtargetImpl()->isDarwin()) 51 return new TargetLoweringObjectFileMachO(); 52 53 if (TM.getSubtargetImpl()->isSVR4ABI()) 54 return new PPC64LinuxTargetObjectFile(); 55 56 return new TargetLoweringObjectFileELF(); 57 } 58 59 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 60 : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) { 61 const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>(); 62 63 setPow2DivIsCheap(); 64 65 // Use _setjmp/_longjmp instead of setjmp/longjmp. 66 setUseUnderscoreSetJmp(true); 67 setUseUnderscoreLongJmp(true); 68 69 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 70 // arguments are at least 4/8 bytes aligned. 71 bool isPPC64 = Subtarget->isPPC64(); 72 setMinStackArgumentAlignment(isPPC64 ? 8:4); 73 74 // Set up the register classes. 75 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 76 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 77 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 78 79 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 80 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 81 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 82 83 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 84 85 // PowerPC has pre-inc load and store's. 86 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 91 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 92 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 93 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 96 97 // This is used in the ppcf128->int sequence. Note it has different semantics 98 // from FP_ROUND: that rounds to nearest, this rounds to zero. 99 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 100 101 // We do not currently implement these libm ops for PowerPC. 102 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 103 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 104 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 105 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 106 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 107 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 108 109 // PowerPC has no SREM/UREM instructions 110 setOperationAction(ISD::SREM, MVT::i32, Expand); 111 setOperationAction(ISD::UREM, MVT::i32, Expand); 112 setOperationAction(ISD::SREM, MVT::i64, Expand); 113 setOperationAction(ISD::UREM, MVT::i64, Expand); 114 115 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 116 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 117 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 118 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 119 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 120 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 121 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 122 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 123 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 124 125 // We don't support sin/cos/sqrt/fmod/pow 126 setOperationAction(ISD::FSIN , MVT::f64, Expand); 127 setOperationAction(ISD::FCOS , MVT::f64, Expand); 128 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 129 setOperationAction(ISD::FREM , MVT::f64, Expand); 130 setOperationAction(ISD::FPOW , MVT::f64, Expand); 131 setOperationAction(ISD::FMA , MVT::f64, Legal); 132 setOperationAction(ISD::FSIN , MVT::f32, Expand); 133 setOperationAction(ISD::FCOS , MVT::f32, Expand); 134 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 135 setOperationAction(ISD::FREM , MVT::f32, Expand); 136 setOperationAction(ISD::FPOW , MVT::f32, Expand); 137 setOperationAction(ISD::FMA , MVT::f32, Legal); 138 139 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 140 141 // If we're enabling GP optimizations, use hardware square root 142 if (!Subtarget->hasFSQRT() && 143 !(TM.Options.UnsafeFPMath && 144 Subtarget->hasFRSQRTE() && Subtarget->hasFRE())) 145 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 146 147 if (!Subtarget->hasFSQRT() && 148 !(TM.Options.UnsafeFPMath && 149 Subtarget->hasFRSQRTES() && Subtarget->hasFRES())) 150 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 151 152 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 153 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 154 155 if (Subtarget->hasFPRND()) { 156 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 157 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 158 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 159 160 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 161 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 162 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 163 164 // frin does not implement "ties to even." Thus, this is safe only in 165 // fast-math mode. 166 if (TM.Options.UnsafeFPMath) { 167 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 168 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 169 170 // These need to set FE_INEXACT, and use a custom inserter. 171 setOperationAction(ISD::FRINT, MVT::f64, Legal); 172 setOperationAction(ISD::FRINT, MVT::f32, Legal); 173 } 174 } 175 176 // PowerPC does not have BSWAP, CTPOP or CTTZ 177 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 178 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 179 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 180 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 181 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 182 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 183 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 184 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 185 186 if (Subtarget->hasPOPCNTD()) { 187 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 188 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 189 } else { 190 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 191 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 192 } 193 194 // PowerPC does not have ROTR 195 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 196 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 197 198 // PowerPC does not have Select 199 setOperationAction(ISD::SELECT, MVT::i32, Expand); 200 setOperationAction(ISD::SELECT, MVT::i64, Expand); 201 setOperationAction(ISD::SELECT, MVT::f32, Expand); 202 setOperationAction(ISD::SELECT, MVT::f64, Expand); 203 204 // PowerPC wants to turn select_cc of FP into fsel when possible. 205 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 206 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 207 208 // PowerPC wants to optimize integer setcc a bit 209 setOperationAction(ISD::SETCC, MVT::i32, Custom); 210 211 // PowerPC does not have BRCOND which requires SetCC 212 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 213 214 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 215 216 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 217 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 218 219 // PowerPC does not have [U|S]INT_TO_FP 220 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 221 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 222 223 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 224 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 225 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 226 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 227 228 // We cannot sextinreg(i1). Expand to shifts. 229 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 230 231 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 232 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 233 // support continuation, user-level threading, and etc.. As a result, no 234 // other SjLj exception interfaces are implemented and please don't build 235 // your own exception handling based on them. 236 // LLVM/Clang supports zero-cost DWARF exception handling. 237 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 238 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 239 240 // We want to legalize GlobalAddress and ConstantPool nodes into the 241 // appropriate instructions to materialize the address. 242 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 243 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 244 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 245 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 246 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 247 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 248 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 249 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 250 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 251 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 252 253 // TRAP is legal. 254 setOperationAction(ISD::TRAP, MVT::Other, Legal); 255 256 // TRAMPOLINE is custom lowered. 257 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 258 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 259 260 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 261 setOperationAction(ISD::VASTART , MVT::Other, Custom); 262 263 if (Subtarget->isSVR4ABI()) { 264 if (isPPC64) { 265 // VAARG always uses double-word chunks, so promote anything smaller. 266 setOperationAction(ISD::VAARG, MVT::i1, Promote); 267 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 268 setOperationAction(ISD::VAARG, MVT::i8, Promote); 269 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 270 setOperationAction(ISD::VAARG, MVT::i16, Promote); 271 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 272 setOperationAction(ISD::VAARG, MVT::i32, Promote); 273 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 274 setOperationAction(ISD::VAARG, MVT::Other, Expand); 275 } else { 276 // VAARG is custom lowered with the 32-bit SVR4 ABI. 277 setOperationAction(ISD::VAARG, MVT::Other, Custom); 278 setOperationAction(ISD::VAARG, MVT::i64, Custom); 279 } 280 } else 281 setOperationAction(ISD::VAARG, MVT::Other, Expand); 282 283 // Use the default implementation. 284 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 285 setOperationAction(ISD::VAEND , MVT::Other, Expand); 286 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 287 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 288 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 289 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 290 291 // We want to custom lower some of our intrinsics. 292 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 293 294 // To handle counter-based loop conditions. 295 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 296 297 // Comparisons that require checking two conditions. 298 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 299 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 300 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 301 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 302 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 303 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 304 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 305 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 306 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 307 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 308 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 309 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 310 311 if (Subtarget->has64BitSupport()) { 312 // They also have instructions for converting between i64 and fp. 313 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 314 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 315 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 316 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 317 // This is just the low 32 bits of a (signed) fp->i64 conversion. 318 // We cannot do this with Promote because i64 is not a legal type. 319 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 320 321 if (PPCSubTarget.hasLFIWAX() || Subtarget->isPPC64()) 322 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 323 } else { 324 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 325 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 326 } 327 328 // With the instructions enabled under FPCVT, we can do everything. 329 if (PPCSubTarget.hasFPCVT()) { 330 if (Subtarget->has64BitSupport()) { 331 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 332 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 333 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 334 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 335 } 336 337 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 338 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 339 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 340 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 341 } 342 343 if (Subtarget->use64BitRegs()) { 344 // 64-bit PowerPC implementations can support i64 types directly 345 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 346 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 347 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 348 // 64-bit PowerPC wants to expand i128 shifts itself. 349 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 350 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 351 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 352 } else { 353 // 32-bit PowerPC wants to expand i64 shifts itself. 354 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 355 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 356 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 357 } 358 359 if (Subtarget->hasAltivec()) { 360 // First set operation action for all vector types to expand. Then we 361 // will selectively turn on ones that can be effectively codegen'd. 362 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 363 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 364 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 365 366 // add/sub are legal for all supported vector VT's. 367 setOperationAction(ISD::ADD , VT, Legal); 368 setOperationAction(ISD::SUB , VT, Legal); 369 370 // We promote all shuffles to v16i8. 371 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 372 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 373 374 // We promote all non-typed operations to v4i32. 375 setOperationAction(ISD::AND , VT, Promote); 376 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 377 setOperationAction(ISD::OR , VT, Promote); 378 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 379 setOperationAction(ISD::XOR , VT, Promote); 380 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 381 setOperationAction(ISD::LOAD , VT, Promote); 382 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 383 setOperationAction(ISD::SELECT, VT, Promote); 384 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 385 setOperationAction(ISD::STORE, VT, Promote); 386 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 387 388 // No other operations are legal. 389 setOperationAction(ISD::MUL , VT, Expand); 390 setOperationAction(ISD::SDIV, VT, Expand); 391 setOperationAction(ISD::SREM, VT, Expand); 392 setOperationAction(ISD::UDIV, VT, Expand); 393 setOperationAction(ISD::UREM, VT, Expand); 394 setOperationAction(ISD::FDIV, VT, Expand); 395 setOperationAction(ISD::FREM, VT, Expand); 396 setOperationAction(ISD::FNEG, VT, Expand); 397 setOperationAction(ISD::FSQRT, VT, Expand); 398 setOperationAction(ISD::FLOG, VT, Expand); 399 setOperationAction(ISD::FLOG10, VT, Expand); 400 setOperationAction(ISD::FLOG2, VT, Expand); 401 setOperationAction(ISD::FEXP, VT, Expand); 402 setOperationAction(ISD::FEXP2, VT, Expand); 403 setOperationAction(ISD::FSIN, VT, Expand); 404 setOperationAction(ISD::FCOS, VT, Expand); 405 setOperationAction(ISD::FABS, VT, Expand); 406 setOperationAction(ISD::FPOWI, VT, Expand); 407 setOperationAction(ISD::FFLOOR, VT, Expand); 408 setOperationAction(ISD::FCEIL, VT, Expand); 409 setOperationAction(ISD::FTRUNC, VT, Expand); 410 setOperationAction(ISD::FRINT, VT, Expand); 411 setOperationAction(ISD::FNEARBYINT, VT, Expand); 412 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 413 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 414 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 415 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 416 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 417 setOperationAction(ISD::UDIVREM, VT, Expand); 418 setOperationAction(ISD::SDIVREM, VT, Expand); 419 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 420 setOperationAction(ISD::FPOW, VT, Expand); 421 setOperationAction(ISD::CTPOP, VT, Expand); 422 setOperationAction(ISD::CTLZ, VT, Expand); 423 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 424 setOperationAction(ISD::CTTZ, VT, Expand); 425 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 426 setOperationAction(ISD::VSELECT, VT, Expand); 427 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 428 429 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 430 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) { 431 MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j; 432 setTruncStoreAction(VT, InnerVT, Expand); 433 } 434 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 435 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 436 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 437 } 438 439 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 440 // with merges, splats, etc. 441 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 442 443 setOperationAction(ISD::AND , MVT::v4i32, Legal); 444 setOperationAction(ISD::OR , MVT::v4i32, Legal); 445 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 446 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 447 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 448 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 449 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 450 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 451 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 452 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 453 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 454 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 455 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 456 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 457 458 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 459 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 460 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 461 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 462 463 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 464 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 465 466 if (TM.Options.UnsafeFPMath) { 467 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 468 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 469 } 470 471 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 472 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 473 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 474 475 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 476 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 477 478 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 479 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 480 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 481 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 482 483 // Altivec does not contain unordered floating-point compare instructions 484 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 485 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 486 setCondCodeAction(ISD::SETUGT, MVT::v4f32, Expand); 487 setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand); 488 setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand); 489 setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand); 490 491 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 492 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 493 } 494 495 if (Subtarget->has64BitSupport()) { 496 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 497 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 498 } 499 500 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); 501 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); 502 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 503 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 504 505 setBooleanContents(ZeroOrOneBooleanContent); 506 // Altivec instructions set fields to all zeros or all ones. 507 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 508 509 if (isPPC64) { 510 setStackPointerRegisterToSaveRestore(PPC::X1); 511 setExceptionPointerRegister(PPC::X3); 512 setExceptionSelectorRegister(PPC::X4); 513 } else { 514 setStackPointerRegisterToSaveRestore(PPC::R1); 515 setExceptionPointerRegister(PPC::R3); 516 setExceptionSelectorRegister(PPC::R4); 517 } 518 519 // We have target-specific dag combine patterns for the following nodes: 520 setTargetDAGCombine(ISD::SINT_TO_FP); 521 setTargetDAGCombine(ISD::LOAD); 522 setTargetDAGCombine(ISD::STORE); 523 setTargetDAGCombine(ISD::BR_CC); 524 setTargetDAGCombine(ISD::BSWAP); 525 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 526 527 // Use reciprocal estimates. 528 if (TM.Options.UnsafeFPMath) { 529 setTargetDAGCombine(ISD::FDIV); 530 setTargetDAGCombine(ISD::FSQRT); 531 } 532 533 // Darwin long double math library functions have $LDBL128 appended. 534 if (Subtarget->isDarwin()) { 535 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 536 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 537 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 538 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 539 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 540 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 541 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 542 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 543 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 544 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 545 } 546 547 setMinFunctionAlignment(2); 548 if (PPCSubTarget.isDarwin()) 549 setPrefFunctionAlignment(4); 550 551 if (isPPC64 && Subtarget->isJITCodeModel()) 552 // Temporary workaround for the inability of PPC64 JIT to handle jump 553 // tables. 554 setSupportJumpTables(false); 555 556 setInsertFencesForAtomic(true); 557 558 setSchedulingPreference(Sched::Hybrid); 559 560 computeRegisterProperties(); 561 562 // The Freescale cores does better with aggressive inlining of memcpy and 563 // friends. Gcc uses same threshold of 128 bytes (= 32 word stores). 564 if (Subtarget->getDarwinDirective() == PPC::DIR_E500mc || 565 Subtarget->getDarwinDirective() == PPC::DIR_E5500) { 566 MaxStoresPerMemset = 32; 567 MaxStoresPerMemsetOptSize = 16; 568 MaxStoresPerMemcpy = 32; 569 MaxStoresPerMemcpyOptSize = 8; 570 MaxStoresPerMemmove = 32; 571 MaxStoresPerMemmoveOptSize = 8; 572 573 setPrefFunctionAlignment(4); 574 } 575 } 576 577 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 578 /// function arguments in the caller parameter area. 579 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { 580 const TargetMachine &TM = getTargetMachine(); 581 // Darwin passes everything on 4 byte boundary. 582 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 583 return 4; 584 585 // 16byte and wider vectors are passed on 16byte boundary. 586 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 587 if (VTy->getBitWidth() >= 128) 588 return 16; 589 590 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 591 if (PPCSubTarget.isPPC64()) 592 return 8; 593 594 return 4; 595 } 596 597 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 598 switch (Opcode) { 599 default: return 0; 600 case PPCISD::FSEL: return "PPCISD::FSEL"; 601 case PPCISD::FCFID: return "PPCISD::FCFID"; 602 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 603 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 604 case PPCISD::FRE: return "PPCISD::FRE"; 605 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 606 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 607 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 608 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 609 case PPCISD::VPERM: return "PPCISD::VPERM"; 610 case PPCISD::Hi: return "PPCISD::Hi"; 611 case PPCISD::Lo: return "PPCISD::Lo"; 612 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 613 case PPCISD::TOC_RESTORE: return "PPCISD::TOC_RESTORE"; 614 case PPCISD::LOAD: return "PPCISD::LOAD"; 615 case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; 616 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 617 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 618 case PPCISD::SRL: return "PPCISD::SRL"; 619 case PPCISD::SRA: return "PPCISD::SRA"; 620 case PPCISD::SHL: return "PPCISD::SHL"; 621 case PPCISD::CALL: return "PPCISD::CALL"; 622 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 623 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 624 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 625 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 626 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 627 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 628 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 629 case PPCISD::VCMP: return "PPCISD::VCMP"; 630 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 631 case PPCISD::LBRX: return "PPCISD::LBRX"; 632 case PPCISD::STBRX: return "PPCISD::STBRX"; 633 case PPCISD::LARX: return "PPCISD::LARX"; 634 case PPCISD::STCX: return "PPCISD::STCX"; 635 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 636 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 637 case PPCISD::BDZ: return "PPCISD::BDZ"; 638 case PPCISD::MFFS: return "PPCISD::MFFS"; 639 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 640 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 641 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 642 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 643 case PPCISD::ADDIS_TOC_HA: return "PPCISD::ADDIS_TOC_HA"; 644 case PPCISD::LD_TOC_L: return "PPCISD::LD_TOC_L"; 645 case PPCISD::ADDI_TOC_L: return "PPCISD::ADDI_TOC_L"; 646 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 647 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 648 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 649 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 650 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 651 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 652 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 653 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 654 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 655 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 656 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 657 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 658 case PPCISD::SC: return "PPCISD::SC"; 659 } 660 } 661 662 EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 663 if (!VT.isVector()) 664 return MVT::i32; 665 return VT.changeVectorElementTypeToInteger(); 666 } 667 668 //===----------------------------------------------------------------------===// 669 // Node matching predicates, for use by the tblgen matching code. 670 //===----------------------------------------------------------------------===// 671 672 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 673 static bool isFloatingPointZero(SDValue Op) { 674 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 675 return CFP->getValueAPF().isZero(); 676 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 677 // Maybe this has already been legalized into the constant pool? 678 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 679 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 680 return CFP->getValueAPF().isZero(); 681 } 682 return false; 683 } 684 685 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 686 /// true if Op is undef or if it matches the specified value. 687 static bool isConstantOrUndef(int Op, int Val) { 688 return Op < 0 || Op == Val; 689 } 690 691 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 692 /// VPKUHUM instruction. 693 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 694 if (!isUnary) { 695 for (unsigned i = 0; i != 16; ++i) 696 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 697 return false; 698 } else { 699 for (unsigned i = 0; i != 8; ++i) 700 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) || 701 !isConstantOrUndef(N->getMaskElt(i+8), i*2+1)) 702 return false; 703 } 704 return true; 705 } 706 707 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 708 /// VPKUWUM instruction. 709 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 710 if (!isUnary) { 711 for (unsigned i = 0; i != 16; i += 2) 712 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 713 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 714 return false; 715 } else { 716 for (unsigned i = 0; i != 8; i += 2) 717 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 718 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) || 719 !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) || 720 !isConstantOrUndef(N->getMaskElt(i+9), i*2+3)) 721 return false; 722 } 723 return true; 724 } 725 726 /// isVMerge - Common function, used to match vmrg* shuffles. 727 /// 728 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 729 unsigned LHSStart, unsigned RHSStart) { 730 assert(N->getValueType(0) == MVT::v16i8 && 731 "PPC only supports shuffles by bytes!"); 732 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 733 "Unsupported merge size!"); 734 735 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 736 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 737 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 738 LHSStart+j+i*UnitSize) || 739 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 740 RHSStart+j+i*UnitSize)) 741 return false; 742 } 743 return true; 744 } 745 746 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 747 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 748 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 749 bool isUnary) { 750 if (!isUnary) 751 return isVMerge(N, UnitSize, 8, 24); 752 return isVMerge(N, UnitSize, 8, 8); 753 } 754 755 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 756 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 757 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 758 bool isUnary) { 759 if (!isUnary) 760 return isVMerge(N, UnitSize, 0, 16); 761 return isVMerge(N, UnitSize, 0, 0); 762 } 763 764 765 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 766 /// amount, otherwise return -1. 767 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 768 assert(N->getValueType(0) == MVT::v16i8 && 769 "PPC only supports shuffles by bytes!"); 770 771 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 772 773 // Find the first non-undef value in the shuffle mask. 774 unsigned i; 775 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 776 /*search*/; 777 778 if (i == 16) return -1; // all undef. 779 780 // Otherwise, check to see if the rest of the elements are consecutively 781 // numbered from this value. 782 unsigned ShiftAmt = SVOp->getMaskElt(i); 783 if (ShiftAmt < i) return -1; 784 ShiftAmt -= i; 785 786 if (!isUnary) { 787 // Check the rest of the elements to see if they are consecutive. 788 for (++i; i != 16; ++i) 789 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 790 return -1; 791 } else { 792 // Check the rest of the elements to see if they are consecutive. 793 for (++i; i != 16; ++i) 794 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 795 return -1; 796 } 797 return ShiftAmt; 798 } 799 800 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 801 /// specifies a splat of a single element that is suitable for input to 802 /// VSPLTB/VSPLTH/VSPLTW. 803 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 804 assert(N->getValueType(0) == MVT::v16i8 && 805 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 806 807 // This is a splat operation if each element of the permute is the same, and 808 // if the value doesn't reference the second vector. 809 unsigned ElementBase = N->getMaskElt(0); 810 811 // FIXME: Handle UNDEF elements too! 812 if (ElementBase >= 16) 813 return false; 814 815 // Check that the indices are consecutive, in the case of a multi-byte element 816 // splatted with a v16i8 mask. 817 for (unsigned i = 1; i != EltSize; ++i) 818 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 819 return false; 820 821 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 822 if (N->getMaskElt(i) < 0) continue; 823 for (unsigned j = 0; j != EltSize; ++j) 824 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 825 return false; 826 } 827 return true; 828 } 829 830 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 831 /// are -0.0. 832 bool PPC::isAllNegativeZeroVector(SDNode *N) { 833 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 834 835 APInt APVal, APUndef; 836 unsigned BitSize; 837 bool HasAnyUndefs; 838 839 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) 840 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 841 return CFP->getValueAPF().isNegZero(); 842 843 return false; 844 } 845 846 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 847 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 848 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 849 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 850 assert(isSplatShuffleMask(SVOp, EltSize)); 851 return SVOp->getMaskElt(0) / EltSize; 852 } 853 854 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 855 /// by using a vspltis[bhw] instruction of the specified element size, return 856 /// the constant being splatted. The ByteSize field indicates the number of 857 /// bytes of each element [124] -> [bhw]. 858 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 859 SDValue OpVal(0, 0); 860 861 // If ByteSize of the splat is bigger than the element size of the 862 // build_vector, then we have a case where we are checking for a splat where 863 // multiple elements of the buildvector are folded together into a single 864 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 865 unsigned EltSize = 16/N->getNumOperands(); 866 if (EltSize < ByteSize) { 867 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 868 SDValue UniquedVals[4]; 869 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 870 871 // See if all of the elements in the buildvector agree across. 872 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 873 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 874 // If the element isn't a constant, bail fully out. 875 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 876 877 878 if (UniquedVals[i&(Multiple-1)].getNode() == 0) 879 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 880 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 881 return SDValue(); // no match. 882 } 883 884 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 885 // either constant or undef values that are identical for each chunk. See 886 // if these chunks can form into a larger vspltis*. 887 888 // Check to see if all of the leading entries are either 0 or -1. If 889 // neither, then this won't fit into the immediate field. 890 bool LeadingZero = true; 891 bool LeadingOnes = true; 892 for (unsigned i = 0; i != Multiple-1; ++i) { 893 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. 894 895 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 896 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 897 } 898 // Finally, check the least significant entry. 899 if (LeadingZero) { 900 if (UniquedVals[Multiple-1].getNode() == 0) 901 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 902 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 903 if (Val < 16) 904 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 905 } 906 if (LeadingOnes) { 907 if (UniquedVals[Multiple-1].getNode() == 0) 908 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 909 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 910 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 911 return DAG.getTargetConstant(Val, MVT::i32); 912 } 913 914 return SDValue(); 915 } 916 917 // Check to see if this buildvec has a single non-undef value in its elements. 918 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 919 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 920 if (OpVal.getNode() == 0) 921 OpVal = N->getOperand(i); 922 else if (OpVal != N->getOperand(i)) 923 return SDValue(); 924 } 925 926 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. 927 928 unsigned ValSizeInBytes = EltSize; 929 uint64_t Value = 0; 930 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 931 Value = CN->getZExtValue(); 932 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 933 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 934 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 935 } 936 937 // If the splat value is larger than the element value, then we can never do 938 // this splat. The only case that we could fit the replicated bits into our 939 // immediate field for would be zero, and we prefer to use vxor for it. 940 if (ValSizeInBytes < ByteSize) return SDValue(); 941 942 // If the element value is larger than the splat value, cut it in half and 943 // check to see if the two halves are equal. Continue doing this until we 944 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 945 while (ValSizeInBytes > ByteSize) { 946 ValSizeInBytes >>= 1; 947 948 // If the top half equals the bottom half, we're still ok. 949 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 950 (Value & ((1 << (8*ValSizeInBytes))-1))) 951 return SDValue(); 952 } 953 954 // Properly sign extend the value. 955 int MaskVal = SignExtend32(Value, ByteSize * 8); 956 957 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 958 if (MaskVal == 0) return SDValue(); 959 960 // Finally, if this value fits in a 5 bit sext field, return it 961 if (SignExtend32<5>(MaskVal) == MaskVal) 962 return DAG.getTargetConstant(MaskVal, MVT::i32); 963 return SDValue(); 964 } 965 966 //===----------------------------------------------------------------------===// 967 // Addressing Mode Selection 968 //===----------------------------------------------------------------------===// 969 970 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 971 /// or 64-bit immediate, and if the value can be accurately represented as a 972 /// sign extension from a 16-bit value. If so, this returns true and the 973 /// immediate. 974 static bool isIntS16Immediate(SDNode *N, short &Imm) { 975 if (N->getOpcode() != ISD::Constant) 976 return false; 977 978 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 979 if (N->getValueType(0) == MVT::i32) 980 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 981 else 982 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 983 } 984 static bool isIntS16Immediate(SDValue Op, short &Imm) { 985 return isIntS16Immediate(Op.getNode(), Imm); 986 } 987 988 989 /// SelectAddressRegReg - Given the specified addressed, check to see if it 990 /// can be represented as an indexed [r+r] operation. Returns false if it 991 /// can be more efficiently represented with [r+imm]. 992 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 993 SDValue &Index, 994 SelectionDAG &DAG) const { 995 short imm = 0; 996 if (N.getOpcode() == ISD::ADD) { 997 if (isIntS16Immediate(N.getOperand(1), imm)) 998 return false; // r+i 999 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1000 return false; // r+i 1001 1002 Base = N.getOperand(0); 1003 Index = N.getOperand(1); 1004 return true; 1005 } else if (N.getOpcode() == ISD::OR) { 1006 if (isIntS16Immediate(N.getOperand(1), imm)) 1007 return false; // r+i can fold it if we can. 1008 1009 // If this is an or of disjoint bitfields, we can codegen this as an add 1010 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1011 // disjoint. 1012 APInt LHSKnownZero, LHSKnownOne; 1013 APInt RHSKnownZero, RHSKnownOne; 1014 DAG.ComputeMaskedBits(N.getOperand(0), 1015 LHSKnownZero, LHSKnownOne); 1016 1017 if (LHSKnownZero.getBoolValue()) { 1018 DAG.ComputeMaskedBits(N.getOperand(1), 1019 RHSKnownZero, RHSKnownOne); 1020 // If all of the bits are known zero on the LHS or RHS, the add won't 1021 // carry. 1022 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1023 Base = N.getOperand(0); 1024 Index = N.getOperand(1); 1025 return true; 1026 } 1027 } 1028 } 1029 1030 return false; 1031 } 1032 1033 // If we happen to be doing an i64 load or store into a stack slot that has 1034 // less than a 4-byte alignment, then the frame-index elimination may need to 1035 // use an indexed load or store instruction (because the offset may not be a 1036 // multiple of 4). The extra register needed to hold the offset comes from the 1037 // register scavenger, and it is possible that the scavenger will need to use 1038 // an emergency spill slot. As a result, we need to make sure that a spill slot 1039 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1040 // stack slot. 1041 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1042 // FIXME: This does not handle the LWA case. 1043 if (VT != MVT::i64) 1044 return; 1045 1046 // NOTE: We'll exclude negative FIs here, which come from argument 1047 // lowering, because there are no known test cases triggering this problem 1048 // using packed structures (or similar). We can remove this exclusion if 1049 // we find such a test case. The reason why this is so test-case driven is 1050 // because this entire 'fixup' is only to prevent crashes (from the 1051 // register scavenger) on not-really-valid inputs. For example, if we have: 1052 // %a = alloca i1 1053 // %b = bitcast i1* %a to i64* 1054 // store i64* a, i64 b 1055 // then the store should really be marked as 'align 1', but is not. If it 1056 // were marked as 'align 1' then the indexed form would have been 1057 // instruction-selected initially, and the problem this 'fixup' is preventing 1058 // won't happen regardless. 1059 if (FrameIdx < 0) 1060 return; 1061 1062 MachineFunction &MF = DAG.getMachineFunction(); 1063 MachineFrameInfo *MFI = MF.getFrameInfo(); 1064 1065 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1066 if (Align >= 4) 1067 return; 1068 1069 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1070 FuncInfo->setHasNonRISpills(); 1071 } 1072 1073 /// Returns true if the address N can be represented by a base register plus 1074 /// a signed 16-bit displacement [r+imm], and if it is not better 1075 /// represented as reg+reg. If Aligned is true, only accept displacements 1076 /// suitable for STD and friends, i.e. multiples of 4. 1077 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1078 SDValue &Base, 1079 SelectionDAG &DAG, 1080 bool Aligned) const { 1081 // FIXME dl should come from parent load or store, not from address 1082 SDLoc dl(N); 1083 // If this can be more profitably realized as r+r, fail. 1084 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1085 return false; 1086 1087 if (N.getOpcode() == ISD::ADD) { 1088 short imm = 0; 1089 if (isIntS16Immediate(N.getOperand(1), imm) && 1090 (!Aligned || (imm & 3) == 0)) { 1091 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1092 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1093 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1094 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1095 } else { 1096 Base = N.getOperand(0); 1097 } 1098 return true; // [r+i] 1099 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1100 // Match LOAD (ADD (X, Lo(G))). 1101 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1102 && "Cannot handle constant offsets yet!"); 1103 Disp = N.getOperand(1).getOperand(0); // The global address. 1104 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1105 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1106 Disp.getOpcode() == ISD::TargetConstantPool || 1107 Disp.getOpcode() == ISD::TargetJumpTable); 1108 Base = N.getOperand(0); 1109 return true; // [&g+r] 1110 } 1111 } else if (N.getOpcode() == ISD::OR) { 1112 short imm = 0; 1113 if (isIntS16Immediate(N.getOperand(1), imm) && 1114 (!Aligned || (imm & 3) == 0)) { 1115 // If this is an or of disjoint bitfields, we can codegen this as an add 1116 // (for better address arithmetic) if the LHS and RHS of the OR are 1117 // provably disjoint. 1118 APInt LHSKnownZero, LHSKnownOne; 1119 DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1120 1121 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1122 // If all of the bits are known zero on the LHS or RHS, the add won't 1123 // carry. 1124 Base = N.getOperand(0); 1125 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1126 return true; 1127 } 1128 } 1129 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1130 // Loading from a constant address. 1131 1132 // If this address fits entirely in a 16-bit sext immediate field, codegen 1133 // this as "d, 0" 1134 short Imm; 1135 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1136 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 1137 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1138 CN->getValueType(0)); 1139 return true; 1140 } 1141 1142 // Handle 32-bit sext immediates with LIS + addr mode. 1143 if ((CN->getValueType(0) == MVT::i32 || 1144 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1145 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1146 int Addr = (int)CN->getZExtValue(); 1147 1148 // Otherwise, break this down into an LIS + disp. 1149 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 1150 1151 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 1152 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1153 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1154 return true; 1155 } 1156 } 1157 1158 Disp = DAG.getTargetConstant(0, getPointerTy()); 1159 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1160 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1161 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1162 } else 1163 Base = N; 1164 return true; // [r+0] 1165 } 1166 1167 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1168 /// represented as an indexed [r+r] operation. 1169 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1170 SDValue &Index, 1171 SelectionDAG &DAG) const { 1172 // Check to see if we can easily represent this as an [r+r] address. This 1173 // will fail if it thinks that the address is more profitably represented as 1174 // reg+imm, e.g. where imm = 0. 1175 if (SelectAddressRegReg(N, Base, Index, DAG)) 1176 return true; 1177 1178 // If the operand is an addition, always emit this as [r+r], since this is 1179 // better (for code size, and execution, as the memop does the add for free) 1180 // than emitting an explicit add. 1181 if (N.getOpcode() == ISD::ADD) { 1182 Base = N.getOperand(0); 1183 Index = N.getOperand(1); 1184 return true; 1185 } 1186 1187 // Otherwise, do it the hard way, using R0 as the base register. 1188 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1189 N.getValueType()); 1190 Index = N; 1191 return true; 1192 } 1193 1194 /// getPreIndexedAddressParts - returns true by value, base pointer and 1195 /// offset pointer and addressing mode by reference if the node's address 1196 /// can be legally represented as pre-indexed load / store address. 1197 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1198 SDValue &Offset, 1199 ISD::MemIndexedMode &AM, 1200 SelectionDAG &DAG) const { 1201 if (DisablePPCPreinc) return false; 1202 1203 bool isLoad = true; 1204 SDValue Ptr; 1205 EVT VT; 1206 unsigned Alignment; 1207 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1208 Ptr = LD->getBasePtr(); 1209 VT = LD->getMemoryVT(); 1210 Alignment = LD->getAlignment(); 1211 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1212 Ptr = ST->getBasePtr(); 1213 VT = ST->getMemoryVT(); 1214 Alignment = ST->getAlignment(); 1215 isLoad = false; 1216 } else 1217 return false; 1218 1219 // PowerPC doesn't have preinc load/store instructions for vectors. 1220 if (VT.isVector()) 1221 return false; 1222 1223 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1224 1225 // Common code will reject creating a pre-inc form if the base pointer 1226 // is a frame index, or if N is a store and the base pointer is either 1227 // the same as or a predecessor of the value being stored. Check for 1228 // those situations here, and try with swapped Base/Offset instead. 1229 bool Swap = false; 1230 1231 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1232 Swap = true; 1233 else if (!isLoad) { 1234 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1235 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1236 Swap = true; 1237 } 1238 1239 if (Swap) 1240 std::swap(Base, Offset); 1241 1242 AM = ISD::PRE_INC; 1243 return true; 1244 } 1245 1246 // LDU/STU can only handle immediates that are a multiple of 4. 1247 if (VT != MVT::i64) { 1248 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1249 return false; 1250 } else { 1251 // LDU/STU need an address with at least 4-byte alignment. 1252 if (Alignment < 4) 1253 return false; 1254 1255 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1256 return false; 1257 } 1258 1259 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1260 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1261 // sext i32 to i64 when addr mode is r+i. 1262 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1263 LD->getExtensionType() == ISD::SEXTLOAD && 1264 isa<ConstantSDNode>(Offset)) 1265 return false; 1266 } 1267 1268 AM = ISD::PRE_INC; 1269 return true; 1270 } 1271 1272 //===----------------------------------------------------------------------===// 1273 // LowerOperation implementation 1274 //===----------------------------------------------------------------------===// 1275 1276 /// GetLabelAccessInfo - Return true if we should reference labels using a 1277 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1278 static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, 1279 unsigned &LoOpFlags, const GlobalValue *GV = 0) { 1280 HiOpFlags = PPCII::MO_HA; 1281 LoOpFlags = PPCII::MO_LO; 1282 1283 // Don't use the pic base if not in PIC relocation model. Or if we are on a 1284 // non-darwin platform. We don't support PIC on other platforms yet. 1285 bool isPIC = TM.getRelocationModel() == Reloc::PIC_ && 1286 TM.getSubtarget<PPCSubtarget>().isDarwin(); 1287 if (isPIC) { 1288 HiOpFlags |= PPCII::MO_PIC_FLAG; 1289 LoOpFlags |= PPCII::MO_PIC_FLAG; 1290 } 1291 1292 // If this is a reference to a global value that requires a non-lazy-ptr, make 1293 // sure that instruction lowering adds it. 1294 if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) { 1295 HiOpFlags |= PPCII::MO_NLP_FLAG; 1296 LoOpFlags |= PPCII::MO_NLP_FLAG; 1297 1298 if (GV->hasHiddenVisibility()) { 1299 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1300 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1301 } 1302 } 1303 1304 return isPIC; 1305 } 1306 1307 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1308 SelectionDAG &DAG) { 1309 EVT PtrVT = HiPart.getValueType(); 1310 SDValue Zero = DAG.getConstant(0, PtrVT); 1311 SDLoc DL(HiPart); 1312 1313 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1314 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1315 1316 // With PIC, the first instruction is actually "GR+hi(&G)". 1317 if (isPIC) 1318 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1319 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1320 1321 // Generate non-pic code that has direct accesses to the constant pool. 1322 // The address of the global is just (hi(&g)+lo(&g)). 1323 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1324 } 1325 1326 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1327 SelectionDAG &DAG) const { 1328 EVT PtrVT = Op.getValueType(); 1329 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1330 const Constant *C = CP->getConstVal(); 1331 1332 // 64-bit SVR4 ABI code is always position-independent. 1333 // The actual address of the GlobalValue is stored in the TOC. 1334 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1335 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 1336 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA, 1337 DAG.getRegister(PPC::X2, MVT::i64)); 1338 } 1339 1340 unsigned MOHiFlag, MOLoFlag; 1341 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1342 SDValue CPIHi = 1343 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 1344 SDValue CPILo = 1345 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 1346 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 1347 } 1348 1349 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1350 EVT PtrVT = Op.getValueType(); 1351 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1352 1353 // 64-bit SVR4 ABI code is always position-independent. 1354 // The actual address of the GlobalValue is stored in the TOC. 1355 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1356 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1357 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA, 1358 DAG.getRegister(PPC::X2, MVT::i64)); 1359 } 1360 1361 unsigned MOHiFlag, MOLoFlag; 1362 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1363 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 1364 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 1365 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 1366 } 1367 1368 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1369 SelectionDAG &DAG) const { 1370 EVT PtrVT = Op.getValueType(); 1371 1372 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1373 1374 unsigned MOHiFlag, MOLoFlag; 1375 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1376 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 1377 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 1378 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 1379 } 1380 1381 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1382 SelectionDAG &DAG) const { 1383 1384 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1385 SDLoc dl(GA); 1386 const GlobalValue *GV = GA->getGlobal(); 1387 EVT PtrVT = getPointerTy(); 1388 bool is64bit = PPCSubTarget.isPPC64(); 1389 1390 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 1391 1392 if (Model == TLSModel::LocalExec) { 1393 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1394 PPCII::MO_TPREL_HA); 1395 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1396 PPCII::MO_TPREL_LO); 1397 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 1398 is64bit ? MVT::i64 : MVT::i32); 1399 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 1400 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 1401 } 1402 1403 if (!is64bit) 1404 llvm_unreachable("only local-exec is currently supported for ppc32"); 1405 1406 if (Model == TLSModel::InitialExec) { 1407 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1408 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1409 PPCII::MO_TLS); 1410 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1411 SDValue TPOffsetHi = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 1412 PtrVT, GOTReg, TGA); 1413 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 1414 PtrVT, TGA, TPOffsetHi); 1415 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 1416 } 1417 1418 if (Model == TLSModel::GeneralDynamic) { 1419 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1420 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1421 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 1422 GOTReg, TGA); 1423 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT, 1424 GOTEntryHi, TGA); 1425 1426 // We need a chain node, and don't have one handy. The underlying 1427 // call has no side effects, so using the function entry node 1428 // suffices. 1429 SDValue Chain = DAG.getEntryNode(); 1430 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1431 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1432 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLS_ADDR, dl, 1433 PtrVT, ParmReg, TGA); 1434 // The return value from GET_TLS_ADDR really is in X3 already, but 1435 // some hacks are needed here to tie everything together. The extra 1436 // copies dissolve during subsequent transforms. 1437 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1438 return DAG.getCopyFromReg(Chain, dl, PPC::X3, PtrVT); 1439 } 1440 1441 if (Model == TLSModel::LocalDynamic) { 1442 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1443 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1444 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 1445 GOTReg, TGA); 1446 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT, 1447 GOTEntryHi, TGA); 1448 1449 // We need a chain node, and don't have one handy. The underlying 1450 // call has no side effects, so using the function entry node 1451 // suffices. 1452 SDValue Chain = DAG.getEntryNode(); 1453 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1454 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1455 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLSLD_ADDR, dl, 1456 PtrVT, ParmReg, TGA); 1457 // The return value from GET_TLSLD_ADDR really is in X3 already, but 1458 // some hacks are needed here to tie everything together. The extra 1459 // copies dissolve during subsequent transforms. 1460 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1461 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT, 1462 Chain, ParmReg, TGA); 1463 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 1464 } 1465 1466 llvm_unreachable("Unknown TLS model!"); 1467 } 1468 1469 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1470 SelectionDAG &DAG) const { 1471 EVT PtrVT = Op.getValueType(); 1472 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1473 SDLoc DL(GSDN); 1474 const GlobalValue *GV = GSDN->getGlobal(); 1475 1476 // 64-bit SVR4 ABI code is always position-independent. 1477 // The actual address of the GlobalValue is stored in the TOC. 1478 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1479 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 1480 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA, 1481 DAG.getRegister(PPC::X2, MVT::i64)); 1482 } 1483 1484 unsigned MOHiFlag, MOLoFlag; 1485 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV); 1486 1487 SDValue GAHi = 1488 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 1489 SDValue GALo = 1490 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 1491 1492 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 1493 1494 // If the global reference is actually to a non-lazy-pointer, we have to do an 1495 // extra load to get the address of the global. 1496 if (MOHiFlag & PPCII::MO_NLP_FLAG) 1497 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 1498 false, false, false, 0); 1499 return Ptr; 1500 } 1501 1502 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1503 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1504 SDLoc dl(Op); 1505 1506 // If we're comparing for equality to zero, expose the fact that this is 1507 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1508 // fold the new nodes. 1509 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1510 if (C->isNullValue() && CC == ISD::SETEQ) { 1511 EVT VT = Op.getOperand(0).getValueType(); 1512 SDValue Zext = Op.getOperand(0); 1513 if (VT.bitsLT(MVT::i32)) { 1514 VT = MVT::i32; 1515 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1516 } 1517 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1518 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1519 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1520 DAG.getConstant(Log2b, MVT::i32)); 1521 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1522 } 1523 // Leave comparisons against 0 and -1 alone for now, since they're usually 1524 // optimized. FIXME: revisit this when we can custom lower all setcc 1525 // optimizations. 1526 if (C->isAllOnesValue() || C->isNullValue()) 1527 return SDValue(); 1528 } 1529 1530 // If we have an integer seteq/setne, turn it into a compare against zero 1531 // by xor'ing the rhs with the lhs, which is faster than setting a 1532 // condition register, reading it back out, and masking the correct bit. The 1533 // normal approach here uses sub to do this instead of xor. Using xor exposes 1534 // the result to other bit-twiddling opportunities. 1535 EVT LHSVT = Op.getOperand(0).getValueType(); 1536 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1537 EVT VT = Op.getValueType(); 1538 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1539 Op.getOperand(1)); 1540 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1541 } 1542 return SDValue(); 1543 } 1544 1545 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1546 const PPCSubtarget &Subtarget) const { 1547 SDNode *Node = Op.getNode(); 1548 EVT VT = Node->getValueType(0); 1549 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1550 SDValue InChain = Node->getOperand(0); 1551 SDValue VAListPtr = Node->getOperand(1); 1552 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1553 SDLoc dl(Node); 1554 1555 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 1556 1557 // gpr_index 1558 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1559 VAListPtr, MachinePointerInfo(SV), MVT::i8, 1560 false, false, 0); 1561 InChain = GprIndex.getValue(1); 1562 1563 if (VT == MVT::i64) { 1564 // Check if GprIndex is even 1565 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 1566 DAG.getConstant(1, MVT::i32)); 1567 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 1568 DAG.getConstant(0, MVT::i32), ISD::SETNE); 1569 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 1570 DAG.getConstant(1, MVT::i32)); 1571 // Align GprIndex to be even if it isn't 1572 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 1573 GprIndex); 1574 } 1575 1576 // fpr index is 1 byte after gpr 1577 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1578 DAG.getConstant(1, MVT::i32)); 1579 1580 // fpr 1581 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1582 FprPtr, MachinePointerInfo(SV), MVT::i8, 1583 false, false, 0); 1584 InChain = FprIndex.getValue(1); 1585 1586 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1587 DAG.getConstant(8, MVT::i32)); 1588 1589 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1590 DAG.getConstant(4, MVT::i32)); 1591 1592 // areas 1593 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 1594 MachinePointerInfo(), false, false, 1595 false, 0); 1596 InChain = OverflowArea.getValue(1); 1597 1598 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 1599 MachinePointerInfo(), false, false, 1600 false, 0); 1601 InChain = RegSaveArea.getValue(1); 1602 1603 // select overflow_area if index > 8 1604 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 1605 DAG.getConstant(8, MVT::i32), ISD::SETLT); 1606 1607 // adjustment constant gpr_index * 4/8 1608 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 1609 VT.isInteger() ? GprIndex : FprIndex, 1610 DAG.getConstant(VT.isInteger() ? 4 : 8, 1611 MVT::i32)); 1612 1613 // OurReg = RegSaveArea + RegConstant 1614 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 1615 RegConstant); 1616 1617 // Floating types are 32 bytes into RegSaveArea 1618 if (VT.isFloatingPoint()) 1619 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 1620 DAG.getConstant(32, MVT::i32)); 1621 1622 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 1623 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 1624 VT.isInteger() ? GprIndex : FprIndex, 1625 DAG.getConstant(VT == MVT::i64 ? 2 : 1, 1626 MVT::i32)); 1627 1628 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 1629 VT.isInteger() ? VAListPtr : FprPtr, 1630 MachinePointerInfo(SV), 1631 MVT::i8, false, false, 0); 1632 1633 // determine if we should load from reg_save_area or overflow_area 1634 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 1635 1636 // increase overflow_area by 4/8 if gpr/fpr > 8 1637 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 1638 DAG.getConstant(VT.isInteger() ? 4 : 8, 1639 MVT::i32)); 1640 1641 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 1642 OverflowAreaPlusN); 1643 1644 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 1645 OverflowAreaPtr, 1646 MachinePointerInfo(), 1647 MVT::i32, false, false, 0); 1648 1649 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 1650 false, false, false, 0); 1651 } 1652 1653 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 1654 SelectionDAG &DAG) const { 1655 return Op.getOperand(0); 1656 } 1657 1658 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 1659 SelectionDAG &DAG) const { 1660 SDValue Chain = Op.getOperand(0); 1661 SDValue Trmp = Op.getOperand(1); // trampoline 1662 SDValue FPtr = Op.getOperand(2); // nested function 1663 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 1664 SDLoc dl(Op); 1665 1666 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1667 bool isPPC64 = (PtrVT == MVT::i64); 1668 Type *IntPtrTy = 1669 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType( 1670 *DAG.getContext()); 1671 1672 TargetLowering::ArgListTy Args; 1673 TargetLowering::ArgListEntry Entry; 1674 1675 Entry.Ty = IntPtrTy; 1676 Entry.Node = Trmp; Args.push_back(Entry); 1677 1678 // TrampSize == (isPPC64 ? 48 : 40); 1679 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 1680 isPPC64 ? MVT::i64 : MVT::i32); 1681 Args.push_back(Entry); 1682 1683 Entry.Node = FPtr; Args.push_back(Entry); 1684 Entry.Node = Nest; Args.push_back(Entry); 1685 1686 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 1687 TargetLowering::CallLoweringInfo CLI(Chain, 1688 Type::getVoidTy(*DAG.getContext()), 1689 false, false, false, false, 0, 1690 CallingConv::C, 1691 /*isTailCall=*/false, 1692 /*doesNotRet=*/false, 1693 /*isReturnValueUsed=*/true, 1694 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 1695 Args, DAG, dl); 1696 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 1697 1698 return CallResult.second; 1699 } 1700 1701 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1702 const PPCSubtarget &Subtarget) const { 1703 MachineFunction &MF = DAG.getMachineFunction(); 1704 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1705 1706 SDLoc dl(Op); 1707 1708 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 1709 // vastart just stores the address of the VarArgsFrameIndex slot into the 1710 // memory location argument. 1711 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1712 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1713 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1714 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 1715 MachinePointerInfo(SV), 1716 false, false, 0); 1717 } 1718 1719 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 1720 // We suppose the given va_list is already allocated. 1721 // 1722 // typedef struct { 1723 // char gpr; /* index into the array of 8 GPRs 1724 // * stored in the register save area 1725 // * gpr=0 corresponds to r3, 1726 // * gpr=1 to r4, etc. 1727 // */ 1728 // char fpr; /* index into the array of 8 FPRs 1729 // * stored in the register save area 1730 // * fpr=0 corresponds to f1, 1731 // * fpr=1 to f2, etc. 1732 // */ 1733 // char *overflow_arg_area; 1734 // /* location on stack that holds 1735 // * the next overflow argument 1736 // */ 1737 // char *reg_save_area; 1738 // /* where r3:r10 and f1:f8 (if saved) 1739 // * are stored 1740 // */ 1741 // } va_list[1]; 1742 1743 1744 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32); 1745 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32); 1746 1747 1748 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1749 1750 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 1751 PtrVT); 1752 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1753 PtrVT); 1754 1755 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 1756 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1757 1758 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 1759 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1760 1761 uint64_t FPROffset = 1; 1762 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1763 1764 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1765 1766 // Store first byte : number of int regs 1767 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 1768 Op.getOperand(1), 1769 MachinePointerInfo(SV), 1770 MVT::i8, false, false, 0); 1771 uint64_t nextOffset = FPROffset; 1772 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 1773 ConstFPROffset); 1774 1775 // Store second byte : number of float regs 1776 SDValue secondStore = 1777 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 1778 MachinePointerInfo(SV, nextOffset), MVT::i8, 1779 false, false, 0); 1780 nextOffset += StackOffset; 1781 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 1782 1783 // Store second word : arguments given on stack 1784 SDValue thirdStore = 1785 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 1786 MachinePointerInfo(SV, nextOffset), 1787 false, false, 0); 1788 nextOffset += FrameOffset; 1789 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 1790 1791 // Store third word : arguments given in registers 1792 return DAG.getStore(thirdStore, dl, FR, nextPtr, 1793 MachinePointerInfo(SV, nextOffset), 1794 false, false, 0); 1795 1796 } 1797 1798 #include "PPCGenCallingConv.inc" 1799 1800 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 1801 CCValAssign::LocInfo &LocInfo, 1802 ISD::ArgFlagsTy &ArgFlags, 1803 CCState &State) { 1804 return true; 1805 } 1806 1807 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 1808 MVT &LocVT, 1809 CCValAssign::LocInfo &LocInfo, 1810 ISD::ArgFlagsTy &ArgFlags, 1811 CCState &State) { 1812 static const uint16_t ArgRegs[] = { 1813 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1814 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1815 }; 1816 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1817 1818 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1819 1820 // Skip one register if the first unallocated register has an even register 1821 // number and there are still argument registers available which have not been 1822 // allocated yet. RegNum is actually an index into ArgRegs, which means we 1823 // need to skip a register if RegNum is odd. 1824 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 1825 State.AllocateReg(ArgRegs[RegNum]); 1826 } 1827 1828 // Always return false here, as this function only makes sure that the first 1829 // unallocated register has an odd register number and does not actually 1830 // allocate a register for the current argument. 1831 return false; 1832 } 1833 1834 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 1835 MVT &LocVT, 1836 CCValAssign::LocInfo &LocInfo, 1837 ISD::ArgFlagsTy &ArgFlags, 1838 CCState &State) { 1839 static const uint16_t ArgRegs[] = { 1840 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1841 PPC::F8 1842 }; 1843 1844 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1845 1846 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1847 1848 // If there is only one Floating-point register left we need to put both f64 1849 // values of a split ppc_fp128 value on the stack. 1850 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 1851 State.AllocateReg(ArgRegs[RegNum]); 1852 } 1853 1854 // Always return false here, as this function only makes sure that the two f64 1855 // values a ppc_fp128 value is split into are both passed in registers or both 1856 // passed on the stack and does not actually allocate a register for the 1857 // current argument. 1858 return false; 1859 } 1860 1861 /// GetFPR - Get the set of FP registers that should be allocated for arguments, 1862 /// on Darwin. 1863 static const uint16_t *GetFPR() { 1864 static const uint16_t FPR[] = { 1865 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1866 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1867 }; 1868 1869 return FPR; 1870 } 1871 1872 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 1873 /// the stack. 1874 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 1875 unsigned PtrByteSize) { 1876 unsigned ArgSize = ArgVT.getSizeInBits()/8; 1877 if (Flags.isByVal()) 1878 ArgSize = Flags.getByValSize(); 1879 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1880 1881 return ArgSize; 1882 } 1883 1884 SDValue 1885 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 1886 CallingConv::ID CallConv, bool isVarArg, 1887 const SmallVectorImpl<ISD::InputArg> 1888 &Ins, 1889 SDLoc dl, SelectionDAG &DAG, 1890 SmallVectorImpl<SDValue> &InVals) 1891 const { 1892 if (PPCSubTarget.isSVR4ABI()) { 1893 if (PPCSubTarget.isPPC64()) 1894 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 1895 dl, DAG, InVals); 1896 else 1897 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 1898 dl, DAG, InVals); 1899 } else { 1900 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 1901 dl, DAG, InVals); 1902 } 1903 } 1904 1905 SDValue 1906 PPCTargetLowering::LowerFormalArguments_32SVR4( 1907 SDValue Chain, 1908 CallingConv::ID CallConv, bool isVarArg, 1909 const SmallVectorImpl<ISD::InputArg> 1910 &Ins, 1911 SDLoc dl, SelectionDAG &DAG, 1912 SmallVectorImpl<SDValue> &InVals) const { 1913 1914 // 32-bit SVR4 ABI Stack Frame Layout: 1915 // +-----------------------------------+ 1916 // +--> | Back chain | 1917 // | +-----------------------------------+ 1918 // | | Floating-point register save area | 1919 // | +-----------------------------------+ 1920 // | | General register save area | 1921 // | +-----------------------------------+ 1922 // | | CR save word | 1923 // | +-----------------------------------+ 1924 // | | VRSAVE save word | 1925 // | +-----------------------------------+ 1926 // | | Alignment padding | 1927 // | +-----------------------------------+ 1928 // | | Vector register save area | 1929 // | +-----------------------------------+ 1930 // | | Local variable space | 1931 // | +-----------------------------------+ 1932 // | | Parameter list area | 1933 // | +-----------------------------------+ 1934 // | | LR save word | 1935 // | +-----------------------------------+ 1936 // SP--> +--- | Back chain | 1937 // +-----------------------------------+ 1938 // 1939 // Specifications: 1940 // System V Application Binary Interface PowerPC Processor Supplement 1941 // AltiVec Technology Programming Interface Manual 1942 1943 MachineFunction &MF = DAG.getMachineFunction(); 1944 MachineFrameInfo *MFI = MF.getFrameInfo(); 1945 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1946 1947 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1948 // Potential tail calls could cause overwriting of argument stack slots. 1949 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 1950 (CallConv == CallingConv::Fast)); 1951 unsigned PtrByteSize = 4; 1952 1953 // Assign locations to all of the incoming arguments. 1954 SmallVector<CCValAssign, 16> ArgLocs; 1955 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1956 getTargetMachine(), ArgLocs, *DAG.getContext()); 1957 1958 // Reserve space for the linkage area on the stack. 1959 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 1960 1961 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 1962 1963 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1964 CCValAssign &VA = ArgLocs[i]; 1965 1966 // Arguments stored in registers. 1967 if (VA.isRegLoc()) { 1968 const TargetRegisterClass *RC; 1969 EVT ValVT = VA.getValVT(); 1970 1971 switch (ValVT.getSimpleVT().SimpleTy) { 1972 default: 1973 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 1974 case MVT::i32: 1975 RC = &PPC::GPRCRegClass; 1976 break; 1977 case MVT::f32: 1978 RC = &PPC::F4RCRegClass; 1979 break; 1980 case MVT::f64: 1981 RC = &PPC::F8RCRegClass; 1982 break; 1983 case MVT::v16i8: 1984 case MVT::v8i16: 1985 case MVT::v4i32: 1986 case MVT::v4f32: 1987 RC = &PPC::VRRCRegClass; 1988 break; 1989 } 1990 1991 // Transform the arguments stored in physical registers into virtual ones. 1992 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1993 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT); 1994 1995 InVals.push_back(ArgValue); 1996 } else { 1997 // Argument stored in memory. 1998 assert(VA.isMemLoc()); 1999 2000 unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; 2001 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2002 isImmutable); 2003 2004 // Create load nodes to retrieve arguments from the stack. 2005 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2006 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2007 MachinePointerInfo(), 2008 false, false, false, 0)); 2009 } 2010 } 2011 2012 // Assign locations to all of the incoming aggregate by value arguments. 2013 // Aggregates passed by value are stored in the local variable space of the 2014 // caller's stack frame, right above the parameter list area. 2015 SmallVector<CCValAssign, 16> ByValArgLocs; 2016 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2017 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 2018 2019 // Reserve stack space for the allocations in CCInfo. 2020 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2021 2022 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2023 2024 // Area that is at least reserved in the caller of this function. 2025 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2026 2027 // Set the size that is at least reserved in caller of this function. Tail 2028 // call optimized function's reserved stack space needs to be aligned so that 2029 // taking the difference between two stack areas will result in an aligned 2030 // stack. 2031 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2032 2033 MinReservedArea = 2034 std::max(MinReservedArea, 2035 PPCFrameLowering::getMinCallFrameSize(false, false)); 2036 2037 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2038 getStackAlignment(); 2039 unsigned AlignMask = TargetAlign-1; 2040 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2041 2042 FI->setMinReservedArea(MinReservedArea); 2043 2044 SmallVector<SDValue, 8> MemOps; 2045 2046 // If the function takes variable number of arguments, make a frame index for 2047 // the start of the first vararg value... for expansion of llvm.va_start. 2048 if (isVarArg) { 2049 static const uint16_t GPArgRegs[] = { 2050 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2051 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2052 }; 2053 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2054 2055 static const uint16_t FPArgRegs[] = { 2056 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2057 PPC::F8 2058 }; 2059 const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2060 2061 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, 2062 NumGPArgRegs)); 2063 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, 2064 NumFPArgRegs)); 2065 2066 // Make room for NumGPArgRegs and NumFPArgRegs. 2067 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2068 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8; 2069 2070 FuncInfo->setVarArgsStackOffset( 2071 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2072 CCInfo.getNextStackOffset(), true)); 2073 2074 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2075 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2076 2077 // The fixed integer arguments of a variadic function are stored to the 2078 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2079 // the result of va_next. 2080 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2081 // Get an existing live-in vreg, or add a new one. 2082 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2083 if (!VReg) 2084 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2085 2086 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2087 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2088 MachinePointerInfo(), false, false, 0); 2089 MemOps.push_back(Store); 2090 // Increment the address by four for the next argument to store 2091 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2092 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2093 } 2094 2095 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2096 // is set. 2097 // The double arguments are stored to the VarArgsFrameIndex 2098 // on the stack. 2099 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2100 // Get an existing live-in vreg, or add a new one. 2101 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2102 if (!VReg) 2103 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2104 2105 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2106 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2107 MachinePointerInfo(), false, false, 0); 2108 MemOps.push_back(Store); 2109 // Increment the address by eight for the next argument to store 2110 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 2111 PtrVT); 2112 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2113 } 2114 } 2115 2116 if (!MemOps.empty()) 2117 Chain = DAG.getNode(ISD::TokenFactor, dl, 2118 MVT::Other, &MemOps[0], MemOps.size()); 2119 2120 return Chain; 2121 } 2122 2123 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2124 // value to MVT::i64 and then truncate to the correct register size. 2125 SDValue 2126 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 2127 SelectionDAG &DAG, SDValue ArgVal, 2128 SDLoc dl) const { 2129 if (Flags.isSExt()) 2130 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 2131 DAG.getValueType(ObjectVT)); 2132 else if (Flags.isZExt()) 2133 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 2134 DAG.getValueType(ObjectVT)); 2135 2136 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 2137 } 2138 2139 // Set the size that is at least reserved in caller of this function. Tail 2140 // call optimized functions' reserved stack space needs to be aligned so that 2141 // taking the difference between two stack areas will result in an aligned 2142 // stack. 2143 void 2144 PPCTargetLowering::setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, 2145 unsigned nAltivecParamsAtEnd, 2146 unsigned MinReservedArea, 2147 bool isPPC64) const { 2148 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2149 // Add the Altivec parameters at the end, if needed. 2150 if (nAltivecParamsAtEnd) { 2151 MinReservedArea = ((MinReservedArea+15)/16)*16; 2152 MinReservedArea += 16*nAltivecParamsAtEnd; 2153 } 2154 MinReservedArea = 2155 std::max(MinReservedArea, 2156 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2157 unsigned TargetAlign 2158 = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2159 getStackAlignment(); 2160 unsigned AlignMask = TargetAlign-1; 2161 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2162 FI->setMinReservedArea(MinReservedArea); 2163 } 2164 2165 SDValue 2166 PPCTargetLowering::LowerFormalArguments_64SVR4( 2167 SDValue Chain, 2168 CallingConv::ID CallConv, bool isVarArg, 2169 const SmallVectorImpl<ISD::InputArg> 2170 &Ins, 2171 SDLoc dl, SelectionDAG &DAG, 2172 SmallVectorImpl<SDValue> &InVals) const { 2173 // TODO: add description of PPC stack frame format, or at least some docs. 2174 // 2175 MachineFunction &MF = DAG.getMachineFunction(); 2176 MachineFrameInfo *MFI = MF.getFrameInfo(); 2177 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2178 2179 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2180 // Potential tail calls could cause overwriting of argument stack slots. 2181 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2182 (CallConv == CallingConv::Fast)); 2183 unsigned PtrByteSize = 8; 2184 2185 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 2186 // Area that is at least reserved in caller of this function. 2187 unsigned MinReservedArea = ArgOffset; 2188 2189 static const uint16_t GPR[] = { 2190 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2191 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2192 }; 2193 2194 static const uint16_t *FPR = GetFPR(); 2195 2196 static const uint16_t VR[] = { 2197 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2198 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2199 }; 2200 2201 const unsigned Num_GPR_Regs = array_lengthof(GPR); 2202 const unsigned Num_FPR_Regs = 13; 2203 const unsigned Num_VR_Regs = array_lengthof(VR); 2204 2205 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2206 2207 // Add DAG nodes to load the arguments or copy them out of registers. On 2208 // entry to a function on PPC, the arguments start after the linkage area, 2209 // although the first ones are often in registers. 2210 2211 SmallVector<SDValue, 8> MemOps; 2212 unsigned nAltivecParamsAtEnd = 0; 2213 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2214 unsigned CurArgIdx = 0; 2215 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2216 SDValue ArgVal; 2217 bool needsLoad = false; 2218 EVT ObjectVT = Ins[ArgNo].VT; 2219 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2220 unsigned ArgSize = ObjSize; 2221 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2222 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2223 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2224 2225 unsigned CurArgOffset = ArgOffset; 2226 2227 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2228 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2229 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2230 if (isVarArg) { 2231 MinReservedArea = ((MinReservedArea+15)/16)*16; 2232 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2233 Flags, 2234 PtrByteSize); 2235 } else 2236 nAltivecParamsAtEnd++; 2237 } else 2238 // Calculate min reserved area. 2239 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2240 Flags, 2241 PtrByteSize); 2242 2243 // FIXME the codegen can be much improved in some cases. 2244 // We do not have to keep everything in memory. 2245 if (Flags.isByVal()) { 2246 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2247 ObjSize = Flags.getByValSize(); 2248 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2249 // Empty aggregate parameters do not take up registers. Examples: 2250 // struct { } a; 2251 // union { } b; 2252 // int c[0]; 2253 // etc. However, we have to provide a place-holder in InVals, so 2254 // pretend we have an 8-byte item at the current address for that 2255 // purpose. 2256 if (!ObjSize) { 2257 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2258 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2259 InVals.push_back(FIN); 2260 continue; 2261 } 2262 // All aggregates smaller than 8 bytes must be passed right-justified. 2263 if (ObjSize < PtrByteSize) 2264 CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize); 2265 // The value of the object is its address. 2266 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2267 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2268 InVals.push_back(FIN); 2269 2270 if (ObjSize < 8) { 2271 if (GPR_idx != Num_GPR_Regs) { 2272 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2273 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2274 SDValue Store; 2275 2276 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 2277 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 2278 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 2279 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2280 MachinePointerInfo(FuncArg, CurArgOffset), 2281 ObjType, false, false, 0); 2282 } else { 2283 // For sizes that don't fit a truncating store (3, 5, 6, 7), 2284 // store the whole register as-is to the parameter save area 2285 // slot. The address of the parameter was already calculated 2286 // above (InVals.push_back(FIN)) to be the right-justified 2287 // offset within the slot. For this store, we need a new 2288 // frame index that points at the beginning of the slot. 2289 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2290 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2291 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2292 MachinePointerInfo(FuncArg, ArgOffset), 2293 false, false, 0); 2294 } 2295 2296 MemOps.push_back(Store); 2297 ++GPR_idx; 2298 } 2299 // Whether we copied from a register or not, advance the offset 2300 // into the parameter save area by a full doubleword. 2301 ArgOffset += PtrByteSize; 2302 continue; 2303 } 2304 2305 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2306 // Store whatever pieces of the object are in registers 2307 // to memory. ArgOffset will be the address of the beginning 2308 // of the object. 2309 if (GPR_idx != Num_GPR_Regs) { 2310 unsigned VReg; 2311 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2312 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2313 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2314 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2315 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2316 MachinePointerInfo(FuncArg, ArgOffset), 2317 false, false, 0); 2318 MemOps.push_back(Store); 2319 ++GPR_idx; 2320 ArgOffset += PtrByteSize; 2321 } else { 2322 ArgOffset += ArgSize - j; 2323 break; 2324 } 2325 } 2326 continue; 2327 } 2328 2329 switch (ObjectVT.getSimpleVT().SimpleTy) { 2330 default: llvm_unreachable("Unhandled argument type!"); 2331 case MVT::i32: 2332 case MVT::i64: 2333 if (GPR_idx != Num_GPR_Regs) { 2334 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2335 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2336 2337 if (ObjectVT == MVT::i32) 2338 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2339 // value to MVT::i64 and then truncate to the correct register size. 2340 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2341 2342 ++GPR_idx; 2343 } else { 2344 needsLoad = true; 2345 ArgSize = PtrByteSize; 2346 } 2347 ArgOffset += 8; 2348 break; 2349 2350 case MVT::f32: 2351 case MVT::f64: 2352 // Every 8 bytes of argument space consumes one of the GPRs available for 2353 // argument passing. 2354 if (GPR_idx != Num_GPR_Regs) { 2355 ++GPR_idx; 2356 } 2357 if (FPR_idx != Num_FPR_Regs) { 2358 unsigned VReg; 2359 2360 if (ObjectVT == MVT::f32) 2361 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2362 else 2363 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2364 2365 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2366 ++FPR_idx; 2367 } else { 2368 needsLoad = true; 2369 ArgSize = PtrByteSize; 2370 } 2371 2372 ArgOffset += 8; 2373 break; 2374 case MVT::v4f32: 2375 case MVT::v4i32: 2376 case MVT::v8i16: 2377 case MVT::v16i8: 2378 // Note that vector arguments in registers don't reserve stack space, 2379 // except in varargs functions. 2380 if (VR_idx != Num_VR_Regs) { 2381 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2382 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2383 if (isVarArg) { 2384 while ((ArgOffset % 16) != 0) { 2385 ArgOffset += PtrByteSize; 2386 if (GPR_idx != Num_GPR_Regs) 2387 GPR_idx++; 2388 } 2389 ArgOffset += 16; 2390 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2391 } 2392 ++VR_idx; 2393 } else { 2394 // Vectors are aligned. 2395 ArgOffset = ((ArgOffset+15)/16)*16; 2396 CurArgOffset = ArgOffset; 2397 ArgOffset += 16; 2398 needsLoad = true; 2399 } 2400 break; 2401 } 2402 2403 // We need to load the argument to a virtual register if we determined 2404 // above that we ran out of physical registers of the appropriate type. 2405 if (needsLoad) { 2406 int FI = MFI->CreateFixedObject(ObjSize, 2407 CurArgOffset + (ArgSize - ObjSize), 2408 isImmutable); 2409 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2410 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2411 false, false, false, 0); 2412 } 2413 2414 InVals.push_back(ArgVal); 2415 } 2416 2417 // Set the size that is at least reserved in caller of this function. Tail 2418 // call optimized functions' reserved stack space needs to be aligned so that 2419 // taking the difference between two stack areas will result in an aligned 2420 // stack. 2421 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, true); 2422 2423 // If the function takes variable number of arguments, make a frame index for 2424 // the start of the first vararg value... for expansion of llvm.va_start. 2425 if (isVarArg) { 2426 int Depth = ArgOffset; 2427 2428 FuncInfo->setVarArgsFrameIndex( 2429 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 2430 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2431 2432 // If this function is vararg, store any remaining integer argument regs 2433 // to their spots on the stack so that they may be loaded by deferencing the 2434 // result of va_next. 2435 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2436 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2437 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2438 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2439 MachinePointerInfo(), false, false, 0); 2440 MemOps.push_back(Store); 2441 // Increment the address by four for the next argument to store 2442 SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT); 2443 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2444 } 2445 } 2446 2447 if (!MemOps.empty()) 2448 Chain = DAG.getNode(ISD::TokenFactor, dl, 2449 MVT::Other, &MemOps[0], MemOps.size()); 2450 2451 return Chain; 2452 } 2453 2454 SDValue 2455 PPCTargetLowering::LowerFormalArguments_Darwin( 2456 SDValue Chain, 2457 CallingConv::ID CallConv, bool isVarArg, 2458 const SmallVectorImpl<ISD::InputArg> 2459 &Ins, 2460 SDLoc dl, SelectionDAG &DAG, 2461 SmallVectorImpl<SDValue> &InVals) const { 2462 // TODO: add description of PPC stack frame format, or at least some docs. 2463 // 2464 MachineFunction &MF = DAG.getMachineFunction(); 2465 MachineFrameInfo *MFI = MF.getFrameInfo(); 2466 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2467 2468 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2469 bool isPPC64 = PtrVT == MVT::i64; 2470 // Potential tail calls could cause overwriting of argument stack slots. 2471 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2472 (CallConv == CallingConv::Fast)); 2473 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2474 2475 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 2476 // Area that is at least reserved in caller of this function. 2477 unsigned MinReservedArea = ArgOffset; 2478 2479 static const uint16_t GPR_32[] = { // 32-bit registers. 2480 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2481 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2482 }; 2483 static const uint16_t GPR_64[] = { // 64-bit registers. 2484 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2485 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2486 }; 2487 2488 static const uint16_t *FPR = GetFPR(); 2489 2490 static const uint16_t VR[] = { 2491 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2492 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2493 }; 2494 2495 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 2496 const unsigned Num_FPR_Regs = 13; 2497 const unsigned Num_VR_Regs = array_lengthof( VR); 2498 2499 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2500 2501 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 2502 2503 // In 32-bit non-varargs functions, the stack space for vectors is after the 2504 // stack space for non-vectors. We do not use this space unless we have 2505 // too many vectors to fit in registers, something that only occurs in 2506 // constructed examples:), but we have to walk the arglist to figure 2507 // that out...for the pathological case, compute VecArgOffset as the 2508 // start of the vector parameter area. Computing VecArgOffset is the 2509 // entire point of the following loop. 2510 unsigned VecArgOffset = ArgOffset; 2511 if (!isVarArg && !isPPC64) { 2512 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 2513 ++ArgNo) { 2514 EVT ObjectVT = Ins[ArgNo].VT; 2515 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2516 2517 if (Flags.isByVal()) { 2518 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 2519 unsigned ObjSize = Flags.getByValSize(); 2520 unsigned ArgSize = 2521 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2522 VecArgOffset += ArgSize; 2523 continue; 2524 } 2525 2526 switch(ObjectVT.getSimpleVT().SimpleTy) { 2527 default: llvm_unreachable("Unhandled argument type!"); 2528 case MVT::i32: 2529 case MVT::f32: 2530 VecArgOffset += 4; 2531 break; 2532 case MVT::i64: // PPC64 2533 case MVT::f64: 2534 // FIXME: We are guaranteed to be !isPPC64 at this point. 2535 // Does MVT::i64 apply? 2536 VecArgOffset += 8; 2537 break; 2538 case MVT::v4f32: 2539 case MVT::v4i32: 2540 case MVT::v8i16: 2541 case MVT::v16i8: 2542 // Nothing to do, we're only looking at Nonvector args here. 2543 break; 2544 } 2545 } 2546 } 2547 // We've found where the vector parameter area in memory is. Skip the 2548 // first 12 parameters; these don't use that memory. 2549 VecArgOffset = ((VecArgOffset+15)/16)*16; 2550 VecArgOffset += 12*16; 2551 2552 // Add DAG nodes to load the arguments or copy them out of registers. On 2553 // entry to a function on PPC, the arguments start after the linkage area, 2554 // although the first ones are often in registers. 2555 2556 SmallVector<SDValue, 8> MemOps; 2557 unsigned nAltivecParamsAtEnd = 0; 2558 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2559 unsigned CurArgIdx = 0; 2560 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2561 SDValue ArgVal; 2562 bool needsLoad = false; 2563 EVT ObjectVT = Ins[ArgNo].VT; 2564 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2565 unsigned ArgSize = ObjSize; 2566 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2567 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2568 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2569 2570 unsigned CurArgOffset = ArgOffset; 2571 2572 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2573 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2574 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2575 if (isVarArg || isPPC64) { 2576 MinReservedArea = ((MinReservedArea+15)/16)*16; 2577 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2578 Flags, 2579 PtrByteSize); 2580 } else nAltivecParamsAtEnd++; 2581 } else 2582 // Calculate min reserved area. 2583 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2584 Flags, 2585 PtrByteSize); 2586 2587 // FIXME the codegen can be much improved in some cases. 2588 // We do not have to keep everything in memory. 2589 if (Flags.isByVal()) { 2590 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2591 ObjSize = Flags.getByValSize(); 2592 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2593 // Objects of size 1 and 2 are right justified, everything else is 2594 // left justified. This means the memory address is adjusted forwards. 2595 if (ObjSize==1 || ObjSize==2) { 2596 CurArgOffset = CurArgOffset + (4 - ObjSize); 2597 } 2598 // The value of the object is its address. 2599 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2600 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2601 InVals.push_back(FIN); 2602 if (ObjSize==1 || ObjSize==2) { 2603 if (GPR_idx != Num_GPR_Regs) { 2604 unsigned VReg; 2605 if (isPPC64) 2606 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2607 else 2608 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2609 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2610 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 2611 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2612 MachinePointerInfo(FuncArg, 2613 CurArgOffset), 2614 ObjType, false, false, 0); 2615 MemOps.push_back(Store); 2616 ++GPR_idx; 2617 } 2618 2619 ArgOffset += PtrByteSize; 2620 2621 continue; 2622 } 2623 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2624 // Store whatever pieces of the object are in registers 2625 // to memory. ArgOffset will be the address of the beginning 2626 // of the object. 2627 if (GPR_idx != Num_GPR_Regs) { 2628 unsigned VReg; 2629 if (isPPC64) 2630 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2631 else 2632 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2633 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2634 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2635 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2636 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2637 MachinePointerInfo(FuncArg, ArgOffset), 2638 false, false, 0); 2639 MemOps.push_back(Store); 2640 ++GPR_idx; 2641 ArgOffset += PtrByteSize; 2642 } else { 2643 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 2644 break; 2645 } 2646 } 2647 continue; 2648 } 2649 2650 switch (ObjectVT.getSimpleVT().SimpleTy) { 2651 default: llvm_unreachable("Unhandled argument type!"); 2652 case MVT::i32: 2653 if (!isPPC64) { 2654 if (GPR_idx != Num_GPR_Regs) { 2655 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2656 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2657 ++GPR_idx; 2658 } else { 2659 needsLoad = true; 2660 ArgSize = PtrByteSize; 2661 } 2662 // All int arguments reserve stack space in the Darwin ABI. 2663 ArgOffset += PtrByteSize; 2664 break; 2665 } 2666 // FALLTHROUGH 2667 case MVT::i64: // PPC64 2668 if (GPR_idx != Num_GPR_Regs) { 2669 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2670 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2671 2672 if (ObjectVT == MVT::i32) 2673 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2674 // value to MVT::i64 and then truncate to the correct register size. 2675 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2676 2677 ++GPR_idx; 2678 } else { 2679 needsLoad = true; 2680 ArgSize = PtrByteSize; 2681 } 2682 // All int arguments reserve stack space in the Darwin ABI. 2683 ArgOffset += 8; 2684 break; 2685 2686 case MVT::f32: 2687 case MVT::f64: 2688 // Every 4 bytes of argument space consumes one of the GPRs available for 2689 // argument passing. 2690 if (GPR_idx != Num_GPR_Regs) { 2691 ++GPR_idx; 2692 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 2693 ++GPR_idx; 2694 } 2695 if (FPR_idx != Num_FPR_Regs) { 2696 unsigned VReg; 2697 2698 if (ObjectVT == MVT::f32) 2699 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2700 else 2701 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2702 2703 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2704 ++FPR_idx; 2705 } else { 2706 needsLoad = true; 2707 } 2708 2709 // All FP arguments reserve stack space in the Darwin ABI. 2710 ArgOffset += isPPC64 ? 8 : ObjSize; 2711 break; 2712 case MVT::v4f32: 2713 case MVT::v4i32: 2714 case MVT::v8i16: 2715 case MVT::v16i8: 2716 // Note that vector arguments in registers don't reserve stack space, 2717 // except in varargs functions. 2718 if (VR_idx != Num_VR_Regs) { 2719 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2720 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2721 if (isVarArg) { 2722 while ((ArgOffset % 16) != 0) { 2723 ArgOffset += PtrByteSize; 2724 if (GPR_idx != Num_GPR_Regs) 2725 GPR_idx++; 2726 } 2727 ArgOffset += 16; 2728 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2729 } 2730 ++VR_idx; 2731 } else { 2732 if (!isVarArg && !isPPC64) { 2733 // Vectors go after all the nonvectors. 2734 CurArgOffset = VecArgOffset; 2735 VecArgOffset += 16; 2736 } else { 2737 // Vectors are aligned. 2738 ArgOffset = ((ArgOffset+15)/16)*16; 2739 CurArgOffset = ArgOffset; 2740 ArgOffset += 16; 2741 } 2742 needsLoad = true; 2743 } 2744 break; 2745 } 2746 2747 // We need to load the argument to a virtual register if we determined above 2748 // that we ran out of physical registers of the appropriate type. 2749 if (needsLoad) { 2750 int FI = MFI->CreateFixedObject(ObjSize, 2751 CurArgOffset + (ArgSize - ObjSize), 2752 isImmutable); 2753 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2754 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2755 false, false, false, 0); 2756 } 2757 2758 InVals.push_back(ArgVal); 2759 } 2760 2761 // Set the size that is at least reserved in caller of this function. Tail 2762 // call optimized functions' reserved stack space needs to be aligned so that 2763 // taking the difference between two stack areas will result in an aligned 2764 // stack. 2765 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, isPPC64); 2766 2767 // If the function takes variable number of arguments, make a frame index for 2768 // the start of the first vararg value... for expansion of llvm.va_start. 2769 if (isVarArg) { 2770 int Depth = ArgOffset; 2771 2772 FuncInfo->setVarArgsFrameIndex( 2773 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2774 Depth, true)); 2775 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2776 2777 // If this function is vararg, store any remaining integer argument regs 2778 // to their spots on the stack so that they may be loaded by deferencing the 2779 // result of va_next. 2780 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2781 unsigned VReg; 2782 2783 if (isPPC64) 2784 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2785 else 2786 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2787 2788 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2789 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2790 MachinePointerInfo(), false, false, 0); 2791 MemOps.push_back(Store); 2792 // Increment the address by four for the next argument to store 2793 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2794 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2795 } 2796 } 2797 2798 if (!MemOps.empty()) 2799 Chain = DAG.getNode(ISD::TokenFactor, dl, 2800 MVT::Other, &MemOps[0], MemOps.size()); 2801 2802 return Chain; 2803 } 2804 2805 /// CalculateParameterAndLinkageAreaSize - Get the size of the parameter plus 2806 /// linkage area for the Darwin ABI, or the 64-bit SVR4 ABI. 2807 static unsigned 2808 CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, 2809 bool isPPC64, 2810 bool isVarArg, 2811 unsigned CC, 2812 const SmallVectorImpl<ISD::OutputArg> 2813 &Outs, 2814 const SmallVectorImpl<SDValue> &OutVals, 2815 unsigned &nAltivecParamsAtEnd) { 2816 // Count how many bytes are to be pushed on the stack, including the linkage 2817 // area, and parameter passing area. We start with 24/48 bytes, which is 2818 // prereserved space for [SP][CR][LR][3 x unused]. 2819 unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true); 2820 unsigned NumOps = Outs.size(); 2821 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2822 2823 // Add up all the space actually used. 2824 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 2825 // they all go in registers, but we must reserve stack space for them for 2826 // possible use by the caller. In varargs or 64-bit calls, parameters are 2827 // assigned stack space in order, with padding so Altivec parameters are 2828 // 16-byte aligned. 2829 nAltivecParamsAtEnd = 0; 2830 for (unsigned i = 0; i != NumOps; ++i) { 2831 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2832 EVT ArgVT = Outs[i].VT; 2833 // Varargs Altivec parameters are padded to a 16 byte boundary. 2834 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 2835 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 2836 if (!isVarArg && !isPPC64) { 2837 // Non-varargs Altivec parameters go after all the non-Altivec 2838 // parameters; handle those later so we know how much padding we need. 2839 nAltivecParamsAtEnd++; 2840 continue; 2841 } 2842 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 2843 NumBytes = ((NumBytes+15)/16)*16; 2844 } 2845 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2846 } 2847 2848 // Allow for Altivec parameters at the end, if needed. 2849 if (nAltivecParamsAtEnd) { 2850 NumBytes = ((NumBytes+15)/16)*16; 2851 NumBytes += 16*nAltivecParamsAtEnd; 2852 } 2853 2854 // The prolog code of the callee may store up to 8 GPR argument registers to 2855 // the stack, allowing va_start to index over them in memory if its varargs. 2856 // Because we cannot tell if this is needed on the caller side, we have to 2857 // conservatively assume that it is needed. As such, make sure we have at 2858 // least enough stack space for the caller to store the 8 GPRs. 2859 NumBytes = std::max(NumBytes, 2860 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2861 2862 // Tail call needs the stack to be aligned. 2863 if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){ 2864 unsigned TargetAlign = DAG.getMachineFunction().getTarget(). 2865 getFrameLowering()->getStackAlignment(); 2866 unsigned AlignMask = TargetAlign-1; 2867 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2868 } 2869 2870 return NumBytes; 2871 } 2872 2873 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 2874 /// adjusted to accommodate the arguments for the tailcall. 2875 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 2876 unsigned ParamSize) { 2877 2878 if (!isTailCall) return 0; 2879 2880 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 2881 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 2882 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 2883 // Remember only if the new adjustement is bigger. 2884 if (SPDiff < FI->getTailCallSPDelta()) 2885 FI->setTailCallSPDelta(SPDiff); 2886 2887 return SPDiff; 2888 } 2889 2890 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2891 /// for tail call optimization. Targets which want to do tail call 2892 /// optimization should implement this function. 2893 bool 2894 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2895 CallingConv::ID CalleeCC, 2896 bool isVarArg, 2897 const SmallVectorImpl<ISD::InputArg> &Ins, 2898 SelectionDAG& DAG) const { 2899 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 2900 return false; 2901 2902 // Variable argument functions are not supported. 2903 if (isVarArg) 2904 return false; 2905 2906 MachineFunction &MF = DAG.getMachineFunction(); 2907 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 2908 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 2909 // Functions containing by val parameters are not supported. 2910 for (unsigned i = 0; i != Ins.size(); i++) { 2911 ISD::ArgFlagsTy Flags = Ins[i].Flags; 2912 if (Flags.isByVal()) return false; 2913 } 2914 2915 // Non PIC/GOT tail calls are supported. 2916 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 2917 return true; 2918 2919 // At the moment we can only do local tail calls (in same module, hidden 2920 // or protected) if we are generating PIC. 2921 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2922 return G->getGlobal()->hasHiddenVisibility() 2923 || G->getGlobal()->hasProtectedVisibility(); 2924 } 2925 2926 return false; 2927 } 2928 2929 /// isCallCompatibleAddress - Return the immediate to use if the specified 2930 /// 32-bit value is representable in the immediate field of a BxA instruction. 2931 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 2932 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2933 if (!C) return 0; 2934 2935 int Addr = C->getZExtValue(); 2936 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 2937 SignExtend32<26>(Addr) != Addr) 2938 return 0; // Top 6 bits have to be sext of immediate. 2939 2940 return DAG.getConstant((int)C->getZExtValue() >> 2, 2941 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 2942 } 2943 2944 namespace { 2945 2946 struct TailCallArgumentInfo { 2947 SDValue Arg; 2948 SDValue FrameIdxOp; 2949 int FrameIdx; 2950 2951 TailCallArgumentInfo() : FrameIdx(0) {} 2952 }; 2953 2954 } 2955 2956 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 2957 static void 2958 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 2959 SDValue Chain, 2960 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 2961 SmallVectorImpl<SDValue> &MemOpChains, 2962 SDLoc dl) { 2963 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 2964 SDValue Arg = TailCallArgs[i].Arg; 2965 SDValue FIN = TailCallArgs[i].FrameIdxOp; 2966 int FI = TailCallArgs[i].FrameIdx; 2967 // Store relative to framepointer. 2968 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 2969 MachinePointerInfo::getFixedStack(FI), 2970 false, false, 0)); 2971 } 2972 } 2973 2974 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 2975 /// the appropriate stack slot for the tail call optimized function call. 2976 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 2977 MachineFunction &MF, 2978 SDValue Chain, 2979 SDValue OldRetAddr, 2980 SDValue OldFP, 2981 int SPDiff, 2982 bool isPPC64, 2983 bool isDarwinABI, 2984 SDLoc dl) { 2985 if (SPDiff) { 2986 // Calculate the new stack slot for the return address. 2987 int SlotSize = isPPC64 ? 8 : 4; 2988 int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64, 2989 isDarwinABI); 2990 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 2991 NewRetAddrLoc, true); 2992 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2993 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 2994 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 2995 MachinePointerInfo::getFixedStack(NewRetAddr), 2996 false, false, 0); 2997 2998 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 2999 // slot as the FP is never overwritten. 3000 if (isDarwinABI) { 3001 int NewFPLoc = 3002 SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI); 3003 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3004 true); 3005 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3006 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 3007 MachinePointerInfo::getFixedStack(NewFPIdx), 3008 false, false, 0); 3009 } 3010 } 3011 return Chain; 3012 } 3013 3014 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3015 /// the position of the argument. 3016 static void 3017 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3018 SDValue Arg, int SPDiff, unsigned ArgOffset, 3019 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3020 int Offset = ArgOffset + SPDiff; 3021 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3022 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3023 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3024 SDValue FIN = DAG.getFrameIndex(FI, VT); 3025 TailCallArgumentInfo Info; 3026 Info.Arg = Arg; 3027 Info.FrameIdxOp = FIN; 3028 Info.FrameIdx = FI; 3029 TailCallArguments.push_back(Info); 3030 } 3031 3032 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3033 /// stack slot. Returns the chain as result and the loaded frame pointers in 3034 /// LROpOut/FPOpout. Used when tail calling. 3035 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3036 int SPDiff, 3037 SDValue Chain, 3038 SDValue &LROpOut, 3039 SDValue &FPOpOut, 3040 bool isDarwinABI, 3041 SDLoc dl) const { 3042 if (SPDiff) { 3043 // Load the LR and FP stack slot for later adjusting. 3044 EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; 3045 LROpOut = getReturnAddrFrameIndex(DAG); 3046 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3047 false, false, false, 0); 3048 Chain = SDValue(LROpOut.getNode(), 1); 3049 3050 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3051 // slot as the FP is never overwritten. 3052 if (isDarwinABI) { 3053 FPOpOut = getFramePointerFrameIndex(DAG); 3054 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 3055 false, false, false, 0); 3056 Chain = SDValue(FPOpOut.getNode(), 1); 3057 } 3058 } 3059 return Chain; 3060 } 3061 3062 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 3063 /// by "Src" to address "Dst" of size "Size". Alignment information is 3064 /// specified by the specific parameter attribute. The copy will be passed as 3065 /// a byval function parameter. 3066 /// Sometimes what we are copying is the end of a larger object, the part that 3067 /// does not fit in registers. 3068 static SDValue 3069 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 3070 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 3071 SDLoc dl) { 3072 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 3073 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 3074 false, false, MachinePointerInfo(0), 3075 MachinePointerInfo(0)); 3076 } 3077 3078 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 3079 /// tail calls. 3080 static void 3081 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 3082 SDValue Arg, SDValue PtrOff, int SPDiff, 3083 unsigned ArgOffset, bool isPPC64, bool isTailCall, 3084 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 3085 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 3086 SDLoc dl) { 3087 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3088 if (!isTailCall) { 3089 if (isVector) { 3090 SDValue StackPtr; 3091 if (isPPC64) 3092 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3093 else 3094 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3095 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3096 DAG.getConstant(ArgOffset, PtrVT)); 3097 } 3098 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3099 MachinePointerInfo(), false, false, 0)); 3100 // Calculate and remember argument location. 3101 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 3102 TailCallArguments); 3103 } 3104 3105 static 3106 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 3107 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 3108 SDValue LROp, SDValue FPOp, bool isDarwinABI, 3109 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 3110 MachineFunction &MF = DAG.getMachineFunction(); 3111 3112 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 3113 // might overwrite each other in case of tail call optimization. 3114 SmallVector<SDValue, 8> MemOpChains2; 3115 // Do not flag preceding copytoreg stuff together with the following stuff. 3116 InFlag = SDValue(); 3117 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 3118 MemOpChains2, dl); 3119 if (!MemOpChains2.empty()) 3120 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3121 &MemOpChains2[0], MemOpChains2.size()); 3122 3123 // Store the return address to the appropriate stack slot. 3124 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 3125 isPPC64, isDarwinABI, dl); 3126 3127 // Emit callseq_end just before tailcall node. 3128 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3129 DAG.getIntPtrConstant(0, true), InFlag, dl); 3130 InFlag = Chain.getValue(1); 3131 } 3132 3133 static 3134 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 3135 SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall, 3136 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 3137 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 3138 const PPCSubtarget &PPCSubTarget) { 3139 3140 bool isPPC64 = PPCSubTarget.isPPC64(); 3141 bool isSVR4ABI = PPCSubTarget.isSVR4ABI(); 3142 3143 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3144 NodeTys.push_back(MVT::Other); // Returns a chain 3145 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 3146 3147 unsigned CallOpc = PPCISD::CALL; 3148 3149 bool needIndirectCall = true; 3150 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 3151 // If this is an absolute destination address, use the munged value. 3152 Callee = SDValue(Dest, 0); 3153 needIndirectCall = false; 3154 } 3155 3156 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3157 // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201 3158 // Use indirect calls for ALL functions calls in JIT mode, since the 3159 // far-call stubs may be outside relocation limits for a BL instruction. 3160 if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) { 3161 unsigned OpFlags = 0; 3162 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3163 (PPCSubTarget.getTargetTriple().isMacOSX() && 3164 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 3165 (G->getGlobal()->isDeclaration() || 3166 G->getGlobal()->isWeakForLinker())) { 3167 // PC-relative references to external symbols should go through $stub, 3168 // unless we're building with the leopard linker or later, which 3169 // automatically synthesizes these stubs. 3170 OpFlags = PPCII::MO_DARWIN_STUB; 3171 } 3172 3173 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 3174 // every direct call is) turn it into a TargetGlobalAddress / 3175 // TargetExternalSymbol node so that legalize doesn't hack it. 3176 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 3177 Callee.getValueType(), 3178 0, OpFlags); 3179 needIndirectCall = false; 3180 } 3181 } 3182 3183 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 3184 unsigned char OpFlags = 0; 3185 3186 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3187 (PPCSubTarget.getTargetTriple().isMacOSX() && 3188 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5))) { 3189 // PC-relative references to external symbols should go through $stub, 3190 // unless we're building with the leopard linker or later, which 3191 // automatically synthesizes these stubs. 3192 OpFlags = PPCII::MO_DARWIN_STUB; 3193 } 3194 3195 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 3196 OpFlags); 3197 needIndirectCall = false; 3198 } 3199 3200 if (needIndirectCall) { 3201 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 3202 // to do the call, we can't use PPCISD::CALL. 3203 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 3204 3205 if (isSVR4ABI && isPPC64) { 3206 // Function pointers in the 64-bit SVR4 ABI do not point to the function 3207 // entry point, but to the function descriptor (the function entry point 3208 // address is part of the function descriptor though). 3209 // The function descriptor is a three doubleword structure with the 3210 // following fields: function entry point, TOC base address and 3211 // environment pointer. 3212 // Thus for a call through a function pointer, the following actions need 3213 // to be performed: 3214 // 1. Save the TOC of the caller in the TOC save area of its stack 3215 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 3216 // 2. Load the address of the function entry point from the function 3217 // descriptor. 3218 // 3. Load the TOC of the callee from the function descriptor into r2. 3219 // 4. Load the environment pointer from the function descriptor into 3220 // r11. 3221 // 5. Branch to the function entry point address. 3222 // 6. On return of the callee, the TOC of the caller needs to be 3223 // restored (this is done in FinishCall()). 3224 // 3225 // All those operations are flagged together to ensure that no other 3226 // operations can be scheduled in between. E.g. without flagging the 3227 // operations together, a TOC access in the caller could be scheduled 3228 // between the load of the callee TOC and the branch to the callee, which 3229 // results in the TOC access going through the TOC of the callee instead 3230 // of going through the TOC of the caller, which leads to incorrect code. 3231 3232 // Load the address of the function entry point from the function 3233 // descriptor. 3234 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue); 3235 SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps, 3236 InFlag.getNode() ? 3 : 2); 3237 Chain = LoadFuncPtr.getValue(1); 3238 InFlag = LoadFuncPtr.getValue(2); 3239 3240 // Load environment pointer into r11. 3241 // Offset of the environment pointer within the function descriptor. 3242 SDValue PtrOff = DAG.getIntPtrConstant(16); 3243 3244 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 3245 SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, 3246 InFlag); 3247 Chain = LoadEnvPtr.getValue(1); 3248 InFlag = LoadEnvPtr.getValue(2); 3249 3250 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 3251 InFlag); 3252 Chain = EnvVal.getValue(0); 3253 InFlag = EnvVal.getValue(1); 3254 3255 // Load TOC of the callee into r2. We are using a target-specific load 3256 // with r2 hard coded, because the result of a target-independent load 3257 // would never go directly into r2, since r2 is a reserved register (which 3258 // prevents the register allocator from allocating it), resulting in an 3259 // additional register being allocated and an unnecessary move instruction 3260 // being generated. 3261 VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3262 SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, 3263 Callee, InFlag); 3264 Chain = LoadTOCPtr.getValue(0); 3265 InFlag = LoadTOCPtr.getValue(1); 3266 3267 MTCTROps[0] = Chain; 3268 MTCTROps[1] = LoadFuncPtr; 3269 MTCTROps[2] = InFlag; 3270 } 3271 3272 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, 3273 2 + (InFlag.getNode() != 0)); 3274 InFlag = Chain.getValue(1); 3275 3276 NodeTys.clear(); 3277 NodeTys.push_back(MVT::Other); 3278 NodeTys.push_back(MVT::Glue); 3279 Ops.push_back(Chain); 3280 CallOpc = PPCISD::BCTRL; 3281 Callee.setNode(0); 3282 // Add use of X11 (holding environment pointer) 3283 if (isSVR4ABI && isPPC64) 3284 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 3285 // Add CTR register as callee so a bctr can be emitted later. 3286 if (isTailCall) 3287 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 3288 } 3289 3290 // If this is a direct call, pass the chain and the callee. 3291 if (Callee.getNode()) { 3292 Ops.push_back(Chain); 3293 Ops.push_back(Callee); 3294 } 3295 // If this is a tail call add stack pointer delta. 3296 if (isTailCall) 3297 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 3298 3299 // Add argument registers to the end of the list so that they are known live 3300 // into the call. 3301 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 3302 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 3303 RegsToPass[i].second.getValueType())); 3304 3305 return CallOpc; 3306 } 3307 3308 static 3309 bool isLocalCall(const SDValue &Callee) 3310 { 3311 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3312 return !G->getGlobal()->isDeclaration() && 3313 !G->getGlobal()->isWeakForLinker(); 3314 return false; 3315 } 3316 3317 SDValue 3318 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 3319 CallingConv::ID CallConv, bool isVarArg, 3320 const SmallVectorImpl<ISD::InputArg> &Ins, 3321 SDLoc dl, SelectionDAG &DAG, 3322 SmallVectorImpl<SDValue> &InVals) const { 3323 3324 SmallVector<CCValAssign, 16> RVLocs; 3325 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3326 getTargetMachine(), RVLocs, *DAG.getContext()); 3327 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 3328 3329 // Copy all of the result registers out of their specified physreg. 3330 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 3331 CCValAssign &VA = RVLocs[i]; 3332 assert(VA.isRegLoc() && "Can only return in registers!"); 3333 3334 SDValue Val = DAG.getCopyFromReg(Chain, dl, 3335 VA.getLocReg(), VA.getLocVT(), InFlag); 3336 Chain = Val.getValue(1); 3337 InFlag = Val.getValue(2); 3338 3339 switch (VA.getLocInfo()) { 3340 default: llvm_unreachable("Unknown loc info!"); 3341 case CCValAssign::Full: break; 3342 case CCValAssign::AExt: 3343 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3344 break; 3345 case CCValAssign::ZExt: 3346 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 3347 DAG.getValueType(VA.getValVT())); 3348 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3349 break; 3350 case CCValAssign::SExt: 3351 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 3352 DAG.getValueType(VA.getValVT())); 3353 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3354 break; 3355 } 3356 3357 InVals.push_back(Val); 3358 } 3359 3360 return Chain; 3361 } 3362 3363 SDValue 3364 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 3365 bool isTailCall, bool isVarArg, 3366 SelectionDAG &DAG, 3367 SmallVector<std::pair<unsigned, SDValue>, 8> 3368 &RegsToPass, 3369 SDValue InFlag, SDValue Chain, 3370 SDValue &Callee, 3371 int SPDiff, unsigned NumBytes, 3372 const SmallVectorImpl<ISD::InputArg> &Ins, 3373 SmallVectorImpl<SDValue> &InVals) const { 3374 std::vector<EVT> NodeTys; 3375 SmallVector<SDValue, 8> Ops; 3376 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 3377 isTailCall, RegsToPass, Ops, NodeTys, 3378 PPCSubTarget); 3379 3380 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 3381 if (isVarArg && PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) 3382 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 3383 3384 // When performing tail call optimization the callee pops its arguments off 3385 // the stack. Account for this here so these bytes can be pushed back on in 3386 // PPCFrameLowering::eliminateCallFramePseudoInstr. 3387 int BytesCalleePops = 3388 (CallConv == CallingConv::Fast && 3389 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 3390 3391 // Add a register mask operand representing the call-preserved registers. 3392 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 3393 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 3394 assert(Mask && "Missing call preserved mask for calling convention"); 3395 Ops.push_back(DAG.getRegisterMask(Mask)); 3396 3397 if (InFlag.getNode()) 3398 Ops.push_back(InFlag); 3399 3400 // Emit tail call. 3401 if (isTailCall) { 3402 assert(((Callee.getOpcode() == ISD::Register && 3403 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 3404 Callee.getOpcode() == ISD::TargetExternalSymbol || 3405 Callee.getOpcode() == ISD::TargetGlobalAddress || 3406 isa<ConstantSDNode>(Callee)) && 3407 "Expecting an global address, external symbol, absolute value or register"); 3408 3409 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size()); 3410 } 3411 3412 // Add a NOP immediately after the branch instruction when using the 64-bit 3413 // SVR4 ABI. At link time, if caller and callee are in a different module and 3414 // thus have a different TOC, the call will be replaced with a call to a stub 3415 // function which saves the current TOC, loads the TOC of the callee and 3416 // branches to the callee. The NOP will be replaced with a load instruction 3417 // which restores the TOC of the caller from the TOC save slot of the current 3418 // stack frame. If caller and callee belong to the same module (and have the 3419 // same TOC), the NOP will remain unchanged. 3420 3421 bool needsTOCRestore = false; 3422 if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { 3423 if (CallOpc == PPCISD::BCTRL) { 3424 // This is a call through a function pointer. 3425 // Restore the caller TOC from the save area into R2. 3426 // See PrepareCall() for more information about calls through function 3427 // pointers in the 64-bit SVR4 ABI. 3428 // We are using a target-specific load with r2 hard coded, because the 3429 // result of a target-independent load would never go directly into r2, 3430 // since r2 is a reserved register (which prevents the register allocator 3431 // from allocating it), resulting in an additional register being 3432 // allocated and an unnecessary move instruction being generated. 3433 needsTOCRestore = true; 3434 } else if ((CallOpc == PPCISD::CALL) && !isLocalCall(Callee)) { 3435 // Otherwise insert NOP for non-local calls. 3436 CallOpc = PPCISD::CALL_NOP; 3437 } 3438 } 3439 3440 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 3441 InFlag = Chain.getValue(1); 3442 3443 if (needsTOCRestore) { 3444 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3445 Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag); 3446 InFlag = Chain.getValue(1); 3447 } 3448 3449 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3450 DAG.getIntPtrConstant(BytesCalleePops, true), 3451 InFlag, dl); 3452 if (!Ins.empty()) 3453 InFlag = Chain.getValue(1); 3454 3455 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 3456 Ins, dl, DAG, InVals); 3457 } 3458 3459 SDValue 3460 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 3461 SmallVectorImpl<SDValue> &InVals) const { 3462 SelectionDAG &DAG = CLI.DAG; 3463 SDLoc &dl = CLI.DL; 3464 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 3465 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 3466 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 3467 SDValue Chain = CLI.Chain; 3468 SDValue Callee = CLI.Callee; 3469 bool &isTailCall = CLI.IsTailCall; 3470 CallingConv::ID CallConv = CLI.CallConv; 3471 bool isVarArg = CLI.IsVarArg; 3472 3473 if (isTailCall) 3474 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 3475 Ins, DAG); 3476 3477 if (PPCSubTarget.isSVR4ABI()) { 3478 if (PPCSubTarget.isPPC64()) 3479 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 3480 isTailCall, Outs, OutVals, Ins, 3481 dl, DAG, InVals); 3482 else 3483 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 3484 isTailCall, Outs, OutVals, Ins, 3485 dl, DAG, InVals); 3486 } 3487 3488 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 3489 isTailCall, Outs, OutVals, Ins, 3490 dl, DAG, InVals); 3491 } 3492 3493 SDValue 3494 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 3495 CallingConv::ID CallConv, bool isVarArg, 3496 bool isTailCall, 3497 const SmallVectorImpl<ISD::OutputArg> &Outs, 3498 const SmallVectorImpl<SDValue> &OutVals, 3499 const SmallVectorImpl<ISD::InputArg> &Ins, 3500 SDLoc dl, SelectionDAG &DAG, 3501 SmallVectorImpl<SDValue> &InVals) const { 3502 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 3503 // of the 32-bit SVR4 ABI stack frame layout. 3504 3505 assert((CallConv == CallingConv::C || 3506 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 3507 3508 unsigned PtrByteSize = 4; 3509 3510 MachineFunction &MF = DAG.getMachineFunction(); 3511 3512 // Mark this function as potentially containing a function that contains a 3513 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3514 // and restoring the callers stack pointer in this functions epilog. This is 3515 // done because by tail calling the called function might overwrite the value 3516 // in this function's (MF) stack pointer stack slot 0(SP). 3517 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3518 CallConv == CallingConv::Fast) 3519 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3520 3521 // Count how many bytes are to be pushed on the stack, including the linkage 3522 // area, parameter list area and the part of the local variable space which 3523 // contains copies of aggregates which are passed by value. 3524 3525 // Assign locations to all of the outgoing arguments. 3526 SmallVector<CCValAssign, 16> ArgLocs; 3527 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3528 getTargetMachine(), ArgLocs, *DAG.getContext()); 3529 3530 // Reserve space for the linkage area on the stack. 3531 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 3532 3533 if (isVarArg) { 3534 // Handle fixed and variable vector arguments differently. 3535 // Fixed vector arguments go into registers as long as registers are 3536 // available. Variable vector arguments always go into memory. 3537 unsigned NumArgs = Outs.size(); 3538 3539 for (unsigned i = 0; i != NumArgs; ++i) { 3540 MVT ArgVT = Outs[i].VT; 3541 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3542 bool Result; 3543 3544 if (Outs[i].IsFixed) { 3545 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 3546 CCInfo); 3547 } else { 3548 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 3549 ArgFlags, CCInfo); 3550 } 3551 3552 if (Result) { 3553 #ifndef NDEBUG 3554 errs() << "Call operand #" << i << " has unhandled type " 3555 << EVT(ArgVT).getEVTString() << "\n"; 3556 #endif 3557 llvm_unreachable(0); 3558 } 3559 } 3560 } else { 3561 // All arguments are treated the same. 3562 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 3563 } 3564 3565 // Assign locations to all of the outgoing aggregate by value arguments. 3566 SmallVector<CCValAssign, 16> ByValArgLocs; 3567 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3568 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 3569 3570 // Reserve stack space for the allocations in CCInfo. 3571 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3572 3573 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 3574 3575 // Size of the linkage area, parameter list area and the part of the local 3576 // space variable where copies of aggregates which are passed by value are 3577 // stored. 3578 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 3579 3580 // Calculate by how many bytes the stack has to be adjusted in case of tail 3581 // call optimization. 3582 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3583 3584 // Adjust the stack pointer for the new arguments... 3585 // These operations are automatically eliminated by the prolog/epilog pass 3586 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 3587 dl); 3588 SDValue CallSeqStart = Chain; 3589 3590 // Load the return address and frame pointer so it can be moved somewhere else 3591 // later. 3592 SDValue LROp, FPOp; 3593 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 3594 dl); 3595 3596 // Set up a copy of the stack pointer for use loading and storing any 3597 // arguments that may not fit in the registers available for argument 3598 // passing. 3599 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3600 3601 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3602 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3603 SmallVector<SDValue, 8> MemOpChains; 3604 3605 bool seenFloatArg = false; 3606 // Walk the register/memloc assignments, inserting copies/loads. 3607 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 3608 i != e; 3609 ++i) { 3610 CCValAssign &VA = ArgLocs[i]; 3611 SDValue Arg = OutVals[i]; 3612 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3613 3614 if (Flags.isByVal()) { 3615 // Argument is an aggregate which is passed by value, thus we need to 3616 // create a copy of it in the local variable space of the current stack 3617 // frame (which is the stack frame of the caller) and pass the address of 3618 // this copy to the callee. 3619 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 3620 CCValAssign &ByValVA = ByValArgLocs[j++]; 3621 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 3622 3623 // Memory reserved in the local variable space of the callers stack frame. 3624 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 3625 3626 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3627 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3628 3629 // Create a copy of the argument in the local area of the current 3630 // stack frame. 3631 SDValue MemcpyCall = 3632 CreateCopyOfByValArgument(Arg, PtrOff, 3633 CallSeqStart.getNode()->getOperand(0), 3634 Flags, DAG, dl); 3635 3636 // This must go outside the CALLSEQ_START..END. 3637 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3638 CallSeqStart.getNode()->getOperand(1), 3639 SDLoc(MemcpyCall)); 3640 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3641 NewCallSeqStart.getNode()); 3642 Chain = CallSeqStart = NewCallSeqStart; 3643 3644 // Pass the address of the aggregate copy on the stack either in a 3645 // physical register or in the parameter list area of the current stack 3646 // frame to the callee. 3647 Arg = PtrOff; 3648 } 3649 3650 if (VA.isRegLoc()) { 3651 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 3652 // Put argument in a physical register. 3653 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 3654 } else { 3655 // Put argument in the parameter list area of the current stack frame. 3656 assert(VA.isMemLoc()); 3657 unsigned LocMemOffset = VA.getLocMemOffset(); 3658 3659 if (!isTailCall) { 3660 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3661 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3662 3663 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3664 MachinePointerInfo(), 3665 false, false, 0)); 3666 } else { 3667 // Calculate and remember argument location. 3668 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 3669 TailCallArguments); 3670 } 3671 } 3672 } 3673 3674 if (!MemOpChains.empty()) 3675 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3676 &MemOpChains[0], MemOpChains.size()); 3677 3678 // Build a sequence of copy-to-reg nodes chained together with token chain 3679 // and flag operands which copy the outgoing args into the appropriate regs. 3680 SDValue InFlag; 3681 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3682 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3683 RegsToPass[i].second, InFlag); 3684 InFlag = Chain.getValue(1); 3685 } 3686 3687 // Set CR bit 6 to true if this is a vararg call with floating args passed in 3688 // registers. 3689 if (isVarArg) { 3690 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3691 SDValue Ops[] = { Chain, InFlag }; 3692 3693 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 3694 dl, VTs, Ops, InFlag.getNode() ? 2 : 1); 3695 3696 InFlag = Chain.getValue(1); 3697 } 3698 3699 if (isTailCall) 3700 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 3701 false, TailCallArguments); 3702 3703 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3704 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3705 Ins, InVals); 3706 } 3707 3708 // Copy an argument into memory, being careful to do this outside the 3709 // call sequence for the call to which the argument belongs. 3710 SDValue 3711 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 3712 SDValue CallSeqStart, 3713 ISD::ArgFlagsTy Flags, 3714 SelectionDAG &DAG, 3715 SDLoc dl) const { 3716 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 3717 CallSeqStart.getNode()->getOperand(0), 3718 Flags, DAG, dl); 3719 // The MEMCPY must go outside the CALLSEQ_START..END. 3720 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3721 CallSeqStart.getNode()->getOperand(1), 3722 SDLoc(MemcpyCall)); 3723 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3724 NewCallSeqStart.getNode()); 3725 return NewCallSeqStart; 3726 } 3727 3728 SDValue 3729 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 3730 CallingConv::ID CallConv, bool isVarArg, 3731 bool isTailCall, 3732 const SmallVectorImpl<ISD::OutputArg> &Outs, 3733 const SmallVectorImpl<SDValue> &OutVals, 3734 const SmallVectorImpl<ISD::InputArg> &Ins, 3735 SDLoc dl, SelectionDAG &DAG, 3736 SmallVectorImpl<SDValue> &InVals) const { 3737 3738 unsigned NumOps = Outs.size(); 3739 3740 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3741 unsigned PtrByteSize = 8; 3742 3743 MachineFunction &MF = DAG.getMachineFunction(); 3744 3745 // Mark this function as potentially containing a function that contains a 3746 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3747 // and restoring the callers stack pointer in this functions epilog. This is 3748 // done because by tail calling the called function might overwrite the value 3749 // in this function's (MF) stack pointer stack slot 0(SP). 3750 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3751 CallConv == CallingConv::Fast) 3752 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3753 3754 unsigned nAltivecParamsAtEnd = 0; 3755 3756 // Count how many bytes are to be pushed on the stack, including the linkage 3757 // area, and parameter passing area. We start with at least 48 bytes, which 3758 // is reserved space for [SP][CR][LR][3 x unused]. 3759 // NOTE: For PPC64, nAltivecParamsAtEnd always remains zero as a result 3760 // of this call. 3761 unsigned NumBytes = 3762 CalculateParameterAndLinkageAreaSize(DAG, true, isVarArg, CallConv, 3763 Outs, OutVals, nAltivecParamsAtEnd); 3764 3765 // Calculate by how many bytes the stack has to be adjusted in case of tail 3766 // call optimization. 3767 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3768 3769 // To protect arguments on the stack from being clobbered in a tail call, 3770 // force all the loads to happen before doing any other lowering. 3771 if (isTailCall) 3772 Chain = DAG.getStackArgumentTokenFactor(Chain); 3773 3774 // Adjust the stack pointer for the new arguments... 3775 // These operations are automatically eliminated by the prolog/epilog pass 3776 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 3777 dl); 3778 SDValue CallSeqStart = Chain; 3779 3780 // Load the return address and frame pointer so it can be move somewhere else 3781 // later. 3782 SDValue LROp, FPOp; 3783 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 3784 dl); 3785 3786 // Set up a copy of the stack pointer for use loading and storing any 3787 // arguments that may not fit in the registers available for argument 3788 // passing. 3789 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3790 3791 // Figure out which arguments are going to go in registers, and which in 3792 // memory. Also, if this is a vararg function, floating point operations 3793 // must be stored to our stack, and loaded into integer regs as well, if 3794 // any integer regs are available for argument passing. 3795 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 3796 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3797 3798 static const uint16_t GPR[] = { 3799 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3800 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3801 }; 3802 static const uint16_t *FPR = GetFPR(); 3803 3804 static const uint16_t VR[] = { 3805 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3806 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3807 }; 3808 const unsigned NumGPRs = array_lengthof(GPR); 3809 const unsigned NumFPRs = 13; 3810 const unsigned NumVRs = array_lengthof(VR); 3811 3812 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3813 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3814 3815 SmallVector<SDValue, 8> MemOpChains; 3816 for (unsigned i = 0; i != NumOps; ++i) { 3817 SDValue Arg = OutVals[i]; 3818 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3819 3820 // PtrOff will be used to store the current argument to the stack if a 3821 // register cannot be found for it. 3822 SDValue PtrOff; 3823 3824 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 3825 3826 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3827 3828 // Promote integers to 64-bit values. 3829 if (Arg.getValueType() == MVT::i32) { 3830 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 3831 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3832 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 3833 } 3834 3835 // FIXME memcpy is used way more than necessary. Correctness first. 3836 // Note: "by value" is code for passing a structure by value, not 3837 // basic types. 3838 if (Flags.isByVal()) { 3839 // Note: Size includes alignment padding, so 3840 // struct x { short a; char b; } 3841 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 3842 // These are the proper values we need for right-justifying the 3843 // aggregate in a parameter register. 3844 unsigned Size = Flags.getByValSize(); 3845 3846 // An empty aggregate parameter takes up no storage and no 3847 // registers. 3848 if (Size == 0) 3849 continue; 3850 3851 // All aggregates smaller than 8 bytes must be passed right-justified. 3852 if (Size==1 || Size==2 || Size==4) { 3853 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 3854 if (GPR_idx != NumGPRs) { 3855 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 3856 MachinePointerInfo(), VT, 3857 false, false, 0); 3858 MemOpChains.push_back(Load.getValue(1)); 3859 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3860 3861 ArgOffset += PtrByteSize; 3862 continue; 3863 } 3864 } 3865 3866 if (GPR_idx == NumGPRs && Size < 8) { 3867 SDValue Const = DAG.getConstant(PtrByteSize - Size, 3868 PtrOff.getValueType()); 3869 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3870 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3871 CallSeqStart, 3872 Flags, DAG, dl); 3873 ArgOffset += PtrByteSize; 3874 continue; 3875 } 3876 // Copy entire object into memory. There are cases where gcc-generated 3877 // code assumes it is there, even if it could be put entirely into 3878 // registers. (This is not what the doc says.) 3879 3880 // FIXME: The above statement is likely due to a misunderstanding of the 3881 // documents. All arguments must be copied into the parameter area BY 3882 // THE CALLEE in the event that the callee takes the address of any 3883 // formal argument. That has not yet been implemented. However, it is 3884 // reasonable to use the stack area as a staging area for the register 3885 // load. 3886 3887 // Skip this for small aggregates, as we will use the same slot for a 3888 // right-justified copy, below. 3889 if (Size >= 8) 3890 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 3891 CallSeqStart, 3892 Flags, DAG, dl); 3893 3894 // When a register is available, pass a small aggregate right-justified. 3895 if (Size < 8 && GPR_idx != NumGPRs) { 3896 // The easiest way to get this right-justified in a register 3897 // is to copy the structure into the rightmost portion of a 3898 // local variable slot, then load the whole slot into the 3899 // register. 3900 // FIXME: The memcpy seems to produce pretty awful code for 3901 // small aggregates, particularly for packed ones. 3902 // FIXME: It would be preferable to use the slot in the 3903 // parameter save area instead of a new local variable. 3904 SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType()); 3905 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3906 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3907 CallSeqStart, 3908 Flags, DAG, dl); 3909 3910 // Load the slot into the register. 3911 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 3912 MachinePointerInfo(), 3913 false, false, false, 0); 3914 MemOpChains.push_back(Load.getValue(1)); 3915 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3916 3917 // Done with this argument. 3918 ArgOffset += PtrByteSize; 3919 continue; 3920 } 3921 3922 // For aggregates larger than PtrByteSize, copy the pieces of the 3923 // object that fit into registers from the parameter save area. 3924 for (unsigned j=0; j<Size; j+=PtrByteSize) { 3925 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 3926 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 3927 if (GPR_idx != NumGPRs) { 3928 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 3929 MachinePointerInfo(), 3930 false, false, false, 0); 3931 MemOpChains.push_back(Load.getValue(1)); 3932 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3933 ArgOffset += PtrByteSize; 3934 } else { 3935 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 3936 break; 3937 } 3938 } 3939 continue; 3940 } 3941 3942 switch (Arg.getValueType().getSimpleVT().SimpleTy) { 3943 default: llvm_unreachable("Unexpected ValueType for argument!"); 3944 case MVT::i32: 3945 case MVT::i64: 3946 if (GPR_idx != NumGPRs) { 3947 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 3948 } else { 3949 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3950 true, isTailCall, false, MemOpChains, 3951 TailCallArguments, dl); 3952 } 3953 ArgOffset += PtrByteSize; 3954 break; 3955 case MVT::f32: 3956 case MVT::f64: 3957 if (FPR_idx != NumFPRs) { 3958 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 3959 3960 if (isVarArg) { 3961 // A single float or an aggregate containing only a single float 3962 // must be passed right-justified in the stack doubleword, and 3963 // in the GPR, if one is available. 3964 SDValue StoreOff; 3965 if (Arg.getValueType().getSimpleVT().SimpleTy == MVT::f32) { 3966 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3967 StoreOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3968 } else 3969 StoreOff = PtrOff; 3970 3971 SDValue Store = DAG.getStore(Chain, dl, Arg, StoreOff, 3972 MachinePointerInfo(), false, false, 0); 3973 MemOpChains.push_back(Store); 3974 3975 // Float varargs are always shadowed in available integer registers 3976 if (GPR_idx != NumGPRs) { 3977 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 3978 MachinePointerInfo(), false, false, 3979 false, 0); 3980 MemOpChains.push_back(Load.getValue(1)); 3981 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3982 } 3983 } else if (GPR_idx != NumGPRs) 3984 // If we have any FPRs remaining, we may also have GPRs remaining. 3985 ++GPR_idx; 3986 } else { 3987 // Single-precision floating-point values are mapped to the 3988 // second (rightmost) word of the stack doubleword. 3989 if (Arg.getValueType() == MVT::f32) { 3990 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3991 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3992 } 3993 3994 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3995 true, isTailCall, false, MemOpChains, 3996 TailCallArguments, dl); 3997 } 3998 ArgOffset += 8; 3999 break; 4000 case MVT::v4f32: 4001 case MVT::v4i32: 4002 case MVT::v8i16: 4003 case MVT::v16i8: 4004 if (isVarArg) { 4005 // These go aligned on the stack, or in the corresponding R registers 4006 // when within range. The Darwin PPC ABI doc claims they also go in 4007 // V registers; in fact gcc does this only for arguments that are 4008 // prototyped, not for those that match the ... We do it for all 4009 // arguments, seems to work. 4010 while (ArgOffset % 16 !=0) { 4011 ArgOffset += PtrByteSize; 4012 if (GPR_idx != NumGPRs) 4013 GPR_idx++; 4014 } 4015 // We could elide this store in the case where the object fits 4016 // entirely in R registers. Maybe later. 4017 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4018 DAG.getConstant(ArgOffset, PtrVT)); 4019 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4020 MachinePointerInfo(), false, false, 0); 4021 MemOpChains.push_back(Store); 4022 if (VR_idx != NumVRs) { 4023 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4024 MachinePointerInfo(), 4025 false, false, false, 0); 4026 MemOpChains.push_back(Load.getValue(1)); 4027 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4028 } 4029 ArgOffset += 16; 4030 for (unsigned i=0; i<16; i+=PtrByteSize) { 4031 if (GPR_idx == NumGPRs) 4032 break; 4033 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4034 DAG.getConstant(i, PtrVT)); 4035 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4036 false, false, false, 0); 4037 MemOpChains.push_back(Load.getValue(1)); 4038 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4039 } 4040 break; 4041 } 4042 4043 // Non-varargs Altivec params generally go in registers, but have 4044 // stack space allocated at the end. 4045 if (VR_idx != NumVRs) { 4046 // Doesn't have GPR space allocated. 4047 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4048 } else { 4049 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4050 true, isTailCall, true, MemOpChains, 4051 TailCallArguments, dl); 4052 ArgOffset += 16; 4053 } 4054 break; 4055 } 4056 } 4057 4058 if (!MemOpChains.empty()) 4059 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4060 &MemOpChains[0], MemOpChains.size()); 4061 4062 // Check if this is an indirect call (MTCTR/BCTRL). 4063 // See PrepareCall() for more information about calls through function 4064 // pointers in the 64-bit SVR4 ABI. 4065 if (!isTailCall && 4066 !dyn_cast<GlobalAddressSDNode>(Callee) && 4067 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4068 !isBLACompatibleAddress(Callee, DAG)) { 4069 // Load r2 into a virtual register and store it to the TOC save area. 4070 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 4071 // TOC save area offset. 4072 SDValue PtrOff = DAG.getIntPtrConstant(40); 4073 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4074 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), 4075 false, false, 0); 4076 // R12 must contain the address of an indirect callee. This does not 4077 // mean the MTCTR instruction must use R12; it's easier to model this 4078 // as an extra parameter, so do that. 4079 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 4080 } 4081 4082 // Build a sequence of copy-to-reg nodes chained together with token chain 4083 // and flag operands which copy the outgoing args into the appropriate regs. 4084 SDValue InFlag; 4085 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4086 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4087 RegsToPass[i].second, InFlag); 4088 InFlag = Chain.getValue(1); 4089 } 4090 4091 if (isTailCall) 4092 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 4093 FPOp, true, TailCallArguments); 4094 4095 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4096 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4097 Ins, InVals); 4098 } 4099 4100 SDValue 4101 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 4102 CallingConv::ID CallConv, bool isVarArg, 4103 bool isTailCall, 4104 const SmallVectorImpl<ISD::OutputArg> &Outs, 4105 const SmallVectorImpl<SDValue> &OutVals, 4106 const SmallVectorImpl<ISD::InputArg> &Ins, 4107 SDLoc dl, SelectionDAG &DAG, 4108 SmallVectorImpl<SDValue> &InVals) const { 4109 4110 unsigned NumOps = Outs.size(); 4111 4112 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4113 bool isPPC64 = PtrVT == MVT::i64; 4114 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4115 4116 MachineFunction &MF = DAG.getMachineFunction(); 4117 4118 // Mark this function as potentially containing a function that contains a 4119 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4120 // and restoring the callers stack pointer in this functions epilog. This is 4121 // done because by tail calling the called function might overwrite the value 4122 // in this function's (MF) stack pointer stack slot 0(SP). 4123 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4124 CallConv == CallingConv::Fast) 4125 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4126 4127 unsigned nAltivecParamsAtEnd = 0; 4128 4129 // Count how many bytes are to be pushed on the stack, including the linkage 4130 // area, and parameter passing area. We start with 24/48 bytes, which is 4131 // prereserved space for [SP][CR][LR][3 x unused]. 4132 unsigned NumBytes = 4133 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv, 4134 Outs, OutVals, 4135 nAltivecParamsAtEnd); 4136 4137 // Calculate by how many bytes the stack has to be adjusted in case of tail 4138 // call optimization. 4139 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4140 4141 // To protect arguments on the stack from being clobbered in a tail call, 4142 // force all the loads to happen before doing any other lowering. 4143 if (isTailCall) 4144 Chain = DAG.getStackArgumentTokenFactor(Chain); 4145 4146 // Adjust the stack pointer for the new arguments... 4147 // These operations are automatically eliminated by the prolog/epilog pass 4148 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 4149 dl); 4150 SDValue CallSeqStart = Chain; 4151 4152 // Load the return address and frame pointer so it can be move somewhere else 4153 // later. 4154 SDValue LROp, FPOp; 4155 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4156 dl); 4157 4158 // Set up a copy of the stack pointer for use loading and storing any 4159 // arguments that may not fit in the registers available for argument 4160 // passing. 4161 SDValue StackPtr; 4162 if (isPPC64) 4163 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4164 else 4165 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4166 4167 // Figure out which arguments are going to go in registers, and which in 4168 // memory. Also, if this is a vararg function, floating point operations 4169 // must be stored to our stack, and loaded into integer regs as well, if 4170 // any integer regs are available for argument passing. 4171 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 4172 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4173 4174 static const uint16_t GPR_32[] = { // 32-bit registers. 4175 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4176 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4177 }; 4178 static const uint16_t GPR_64[] = { // 64-bit registers. 4179 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4180 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4181 }; 4182 static const uint16_t *FPR = GetFPR(); 4183 4184 static const uint16_t VR[] = { 4185 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4186 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4187 }; 4188 const unsigned NumGPRs = array_lengthof(GPR_32); 4189 const unsigned NumFPRs = 13; 4190 const unsigned NumVRs = array_lengthof(VR); 4191 4192 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 4193 4194 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4195 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4196 4197 SmallVector<SDValue, 8> MemOpChains; 4198 for (unsigned i = 0; i != NumOps; ++i) { 4199 SDValue Arg = OutVals[i]; 4200 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4201 4202 // PtrOff will be used to store the current argument to the stack if a 4203 // register cannot be found for it. 4204 SDValue PtrOff; 4205 4206 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 4207 4208 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4209 4210 // On PPC64, promote integers to 64-bit values. 4211 if (isPPC64 && Arg.getValueType() == MVT::i32) { 4212 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4213 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4214 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4215 } 4216 4217 // FIXME memcpy is used way more than necessary. Correctness first. 4218 // Note: "by value" is code for passing a structure by value, not 4219 // basic types. 4220 if (Flags.isByVal()) { 4221 unsigned Size = Flags.getByValSize(); 4222 // Very small objects are passed right-justified. Everything else is 4223 // passed left-justified. 4224 if (Size==1 || Size==2) { 4225 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 4226 if (GPR_idx != NumGPRs) { 4227 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4228 MachinePointerInfo(), VT, 4229 false, false, 0); 4230 MemOpChains.push_back(Load.getValue(1)); 4231 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4232 4233 ArgOffset += PtrByteSize; 4234 } else { 4235 SDValue Const = DAG.getConstant(PtrByteSize - Size, 4236 PtrOff.getValueType()); 4237 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4238 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4239 CallSeqStart, 4240 Flags, DAG, dl); 4241 ArgOffset += PtrByteSize; 4242 } 4243 continue; 4244 } 4245 // Copy entire object into memory. There are cases where gcc-generated 4246 // code assumes it is there, even if it could be put entirely into 4247 // registers. (This is not what the doc says.) 4248 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4249 CallSeqStart, 4250 Flags, DAG, dl); 4251 4252 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 4253 // copy the pieces of the object that fit into registers from the 4254 // parameter save area. 4255 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4256 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 4257 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4258 if (GPR_idx != NumGPRs) { 4259 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4260 MachinePointerInfo(), 4261 false, false, false, 0); 4262 MemOpChains.push_back(Load.getValue(1)); 4263 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4264 ArgOffset += PtrByteSize; 4265 } else { 4266 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4267 break; 4268 } 4269 } 4270 continue; 4271 } 4272 4273 switch (Arg.getValueType().getSimpleVT().SimpleTy) { 4274 default: llvm_unreachable("Unexpected ValueType for argument!"); 4275 case MVT::i32: 4276 case MVT::i64: 4277 if (GPR_idx != NumGPRs) { 4278 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4279 } else { 4280 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4281 isPPC64, isTailCall, false, MemOpChains, 4282 TailCallArguments, dl); 4283 } 4284 ArgOffset += PtrByteSize; 4285 break; 4286 case MVT::f32: 4287 case MVT::f64: 4288 if (FPR_idx != NumFPRs) { 4289 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4290 4291 if (isVarArg) { 4292 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4293 MachinePointerInfo(), false, false, 0); 4294 MemOpChains.push_back(Store); 4295 4296 // Float varargs are always shadowed in available integer registers 4297 if (GPR_idx != NumGPRs) { 4298 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4299 MachinePointerInfo(), false, false, 4300 false, 0); 4301 MemOpChains.push_back(Load.getValue(1)); 4302 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4303 } 4304 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 4305 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4306 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4307 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4308 MachinePointerInfo(), 4309 false, false, false, 0); 4310 MemOpChains.push_back(Load.getValue(1)); 4311 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4312 } 4313 } else { 4314 // If we have any FPRs remaining, we may also have GPRs remaining. 4315 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 4316 // GPRs. 4317 if (GPR_idx != NumGPRs) 4318 ++GPR_idx; 4319 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 4320 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 4321 ++GPR_idx; 4322 } 4323 } else 4324 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4325 isPPC64, isTailCall, false, MemOpChains, 4326 TailCallArguments, dl); 4327 if (isPPC64) 4328 ArgOffset += 8; 4329 else 4330 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 4331 break; 4332 case MVT::v4f32: 4333 case MVT::v4i32: 4334 case MVT::v8i16: 4335 case MVT::v16i8: 4336 if (isVarArg) { 4337 // These go aligned on the stack, or in the corresponding R registers 4338 // when within range. The Darwin PPC ABI doc claims they also go in 4339 // V registers; in fact gcc does this only for arguments that are 4340 // prototyped, not for those that match the ... We do it for all 4341 // arguments, seems to work. 4342 while (ArgOffset % 16 !=0) { 4343 ArgOffset += PtrByteSize; 4344 if (GPR_idx != NumGPRs) 4345 GPR_idx++; 4346 } 4347 // We could elide this store in the case where the object fits 4348 // entirely in R registers. Maybe later. 4349 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4350 DAG.getConstant(ArgOffset, PtrVT)); 4351 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4352 MachinePointerInfo(), false, false, 0); 4353 MemOpChains.push_back(Store); 4354 if (VR_idx != NumVRs) { 4355 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4356 MachinePointerInfo(), 4357 false, false, false, 0); 4358 MemOpChains.push_back(Load.getValue(1)); 4359 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4360 } 4361 ArgOffset += 16; 4362 for (unsigned i=0; i<16; i+=PtrByteSize) { 4363 if (GPR_idx == NumGPRs) 4364 break; 4365 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4366 DAG.getConstant(i, PtrVT)); 4367 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4368 false, false, false, 0); 4369 MemOpChains.push_back(Load.getValue(1)); 4370 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4371 } 4372 break; 4373 } 4374 4375 // Non-varargs Altivec params generally go in registers, but have 4376 // stack space allocated at the end. 4377 if (VR_idx != NumVRs) { 4378 // Doesn't have GPR space allocated. 4379 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4380 } else if (nAltivecParamsAtEnd==0) { 4381 // We are emitting Altivec params in order. 4382 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4383 isPPC64, isTailCall, true, MemOpChains, 4384 TailCallArguments, dl); 4385 ArgOffset += 16; 4386 } 4387 break; 4388 } 4389 } 4390 // If all Altivec parameters fit in registers, as they usually do, 4391 // they get stack space following the non-Altivec parameters. We 4392 // don't track this here because nobody below needs it. 4393 // If there are more Altivec parameters than fit in registers emit 4394 // the stores here. 4395 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 4396 unsigned j = 0; 4397 // Offset is aligned; skip 1st 12 params which go in V registers. 4398 ArgOffset = ((ArgOffset+15)/16)*16; 4399 ArgOffset += 12*16; 4400 for (unsigned i = 0; i != NumOps; ++i) { 4401 SDValue Arg = OutVals[i]; 4402 EVT ArgType = Outs[i].VT; 4403 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 4404 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 4405 if (++j > NumVRs) { 4406 SDValue PtrOff; 4407 // We are emitting Altivec params in order. 4408 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4409 isPPC64, isTailCall, true, MemOpChains, 4410 TailCallArguments, dl); 4411 ArgOffset += 16; 4412 } 4413 } 4414 } 4415 } 4416 4417 if (!MemOpChains.empty()) 4418 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4419 &MemOpChains[0], MemOpChains.size()); 4420 4421 // On Darwin, R12 must contain the address of an indirect callee. This does 4422 // not mean the MTCTR instruction must use R12; it's easier to model this as 4423 // an extra parameter, so do that. 4424 if (!isTailCall && 4425 !dyn_cast<GlobalAddressSDNode>(Callee) && 4426 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4427 !isBLACompatibleAddress(Callee, DAG)) 4428 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 4429 PPC::R12), Callee)); 4430 4431 // Build a sequence of copy-to-reg nodes chained together with token chain 4432 // and flag operands which copy the outgoing args into the appropriate regs. 4433 SDValue InFlag; 4434 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4435 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4436 RegsToPass[i].second, InFlag); 4437 InFlag = Chain.getValue(1); 4438 } 4439 4440 if (isTailCall) 4441 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 4442 FPOp, true, TailCallArguments); 4443 4444 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4445 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4446 Ins, InVals); 4447 } 4448 4449 bool 4450 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 4451 MachineFunction &MF, bool isVarArg, 4452 const SmallVectorImpl<ISD::OutputArg> &Outs, 4453 LLVMContext &Context) const { 4454 SmallVector<CCValAssign, 16> RVLocs; 4455 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 4456 RVLocs, Context); 4457 return CCInfo.CheckReturn(Outs, RetCC_PPC); 4458 } 4459 4460 SDValue 4461 PPCTargetLowering::LowerReturn(SDValue Chain, 4462 CallingConv::ID CallConv, bool isVarArg, 4463 const SmallVectorImpl<ISD::OutputArg> &Outs, 4464 const SmallVectorImpl<SDValue> &OutVals, 4465 SDLoc dl, SelectionDAG &DAG) const { 4466 4467 SmallVector<CCValAssign, 16> RVLocs; 4468 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4469 getTargetMachine(), RVLocs, *DAG.getContext()); 4470 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 4471 4472 SDValue Flag; 4473 SmallVector<SDValue, 4> RetOps(1, Chain); 4474 4475 // Copy the result values into the output registers. 4476 for (unsigned i = 0; i != RVLocs.size(); ++i) { 4477 CCValAssign &VA = RVLocs[i]; 4478 assert(VA.isRegLoc() && "Can only return in registers!"); 4479 4480 SDValue Arg = OutVals[i]; 4481 4482 switch (VA.getLocInfo()) { 4483 default: llvm_unreachable("Unknown loc info!"); 4484 case CCValAssign::Full: break; 4485 case CCValAssign::AExt: 4486 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 4487 break; 4488 case CCValAssign::ZExt: 4489 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 4490 break; 4491 case CCValAssign::SExt: 4492 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 4493 break; 4494 } 4495 4496 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 4497 Flag = Chain.getValue(1); 4498 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 4499 } 4500 4501 RetOps[0] = Chain; // Update chain. 4502 4503 // Add the flag if we have it. 4504 if (Flag.getNode()) 4505 RetOps.push_back(Flag); 4506 4507 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, 4508 &RetOps[0], RetOps.size()); 4509 } 4510 4511 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 4512 const PPCSubtarget &Subtarget) const { 4513 // When we pop the dynamic allocation we need to restore the SP link. 4514 SDLoc dl(Op); 4515 4516 // Get the corect type for pointers. 4517 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4518 4519 // Construct the stack pointer operand. 4520 bool isPPC64 = Subtarget.isPPC64(); 4521 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 4522 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 4523 4524 // Get the operands for the STACKRESTORE. 4525 SDValue Chain = Op.getOperand(0); 4526 SDValue SaveSP = Op.getOperand(1); 4527 4528 // Load the old link SP. 4529 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 4530 MachinePointerInfo(), 4531 false, false, false, 0); 4532 4533 // Restore the stack pointer. 4534 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 4535 4536 // Store the old link SP. 4537 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 4538 false, false, 0); 4539 } 4540 4541 4542 4543 SDValue 4544 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 4545 MachineFunction &MF = DAG.getMachineFunction(); 4546 bool isPPC64 = PPCSubTarget.isPPC64(); 4547 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4548 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4549 4550 // Get current frame pointer save index. The users of this index will be 4551 // primarily DYNALLOC instructions. 4552 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4553 int RASI = FI->getReturnAddrSaveIndex(); 4554 4555 // If the frame pointer save index hasn't been defined yet. 4556 if (!RASI) { 4557 // Find out what the fix offset of the frame pointer save area. 4558 int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); 4559 // Allocate the frame index for frame pointer save area. 4560 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true); 4561 // Save the result. 4562 FI->setReturnAddrSaveIndex(RASI); 4563 } 4564 return DAG.getFrameIndex(RASI, PtrVT); 4565 } 4566 4567 SDValue 4568 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 4569 MachineFunction &MF = DAG.getMachineFunction(); 4570 bool isPPC64 = PPCSubTarget.isPPC64(); 4571 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4572 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4573 4574 // Get current frame pointer save index. The users of this index will be 4575 // primarily DYNALLOC instructions. 4576 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4577 int FPSI = FI->getFramePointerSaveIndex(); 4578 4579 // If the frame pointer save index hasn't been defined yet. 4580 if (!FPSI) { 4581 // Find out what the fix offset of the frame pointer save area. 4582 int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, 4583 isDarwinABI); 4584 4585 // Allocate the frame index for frame pointer save area. 4586 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 4587 // Save the result. 4588 FI->setFramePointerSaveIndex(FPSI); 4589 } 4590 return DAG.getFrameIndex(FPSI, PtrVT); 4591 } 4592 4593 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 4594 SelectionDAG &DAG, 4595 const PPCSubtarget &Subtarget) const { 4596 // Get the inputs. 4597 SDValue Chain = Op.getOperand(0); 4598 SDValue Size = Op.getOperand(1); 4599 SDLoc dl(Op); 4600 4601 // Get the corect type for pointers. 4602 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4603 // Negate the size. 4604 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 4605 DAG.getConstant(0, PtrVT), Size); 4606 // Construct a node for the frame pointer save index. 4607 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 4608 // Build a DYNALLOC node. 4609 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 4610 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 4611 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3); 4612 } 4613 4614 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 4615 SelectionDAG &DAG) const { 4616 SDLoc DL(Op); 4617 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 4618 DAG.getVTList(MVT::i32, MVT::Other), 4619 Op.getOperand(0), Op.getOperand(1)); 4620 } 4621 4622 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 4623 SelectionDAG &DAG) const { 4624 SDLoc DL(Op); 4625 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 4626 Op.getOperand(0), Op.getOperand(1)); 4627 } 4628 4629 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 4630 /// possible. 4631 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 4632 // Not FP? Not a fsel. 4633 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 4634 !Op.getOperand(2).getValueType().isFloatingPoint()) 4635 return Op; 4636 4637 // We might be able to do better than this under some circumstances, but in 4638 // general, fsel-based lowering of select is a finite-math-only optimization. 4639 // For more information, see section F.3 of the 2.06 ISA specification. 4640 if (!DAG.getTarget().Options.NoInfsFPMath || 4641 !DAG.getTarget().Options.NoNaNsFPMath) 4642 return Op; 4643 4644 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4645 4646 EVT ResVT = Op.getValueType(); 4647 EVT CmpVT = Op.getOperand(0).getValueType(); 4648 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4649 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 4650 SDLoc dl(Op); 4651 4652 // If the RHS of the comparison is a 0.0, we don't need to do the 4653 // subtraction at all. 4654 SDValue Sel1; 4655 if (isFloatingPointZero(RHS)) 4656 switch (CC) { 4657 default: break; // SETUO etc aren't handled by fsel. 4658 case ISD::SETNE: 4659 std::swap(TV, FV); 4660 case ISD::SETEQ: 4661 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4662 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4663 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 4664 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 4665 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 4666 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4667 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 4668 case ISD::SETULT: 4669 case ISD::SETLT: 4670 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4671 case ISD::SETOGE: 4672 case ISD::SETGE: 4673 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4674 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4675 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 4676 case ISD::SETUGT: 4677 case ISD::SETGT: 4678 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4679 case ISD::SETOLE: 4680 case ISD::SETLE: 4681 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4682 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4683 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4684 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 4685 } 4686 4687 SDValue Cmp; 4688 switch (CC) { 4689 default: break; // SETUO etc aren't handled by fsel. 4690 case ISD::SETNE: 4691 std::swap(TV, FV); 4692 case ISD::SETEQ: 4693 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4694 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4695 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4696 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4697 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 4698 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 4699 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4700 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 4701 case ISD::SETULT: 4702 case ISD::SETLT: 4703 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4704 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4705 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4706 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4707 case ISD::SETOGE: 4708 case ISD::SETGE: 4709 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4710 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4711 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4712 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4713 case ISD::SETUGT: 4714 case ISD::SETGT: 4715 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4716 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4717 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4718 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4719 case ISD::SETOLE: 4720 case ISD::SETLE: 4721 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4722 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4723 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4724 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4725 } 4726 return Op; 4727 } 4728 4729 // FIXME: Split this code up when LegalizeDAGTypes lands. 4730 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 4731 SDLoc dl) const { 4732 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 4733 SDValue Src = Op.getOperand(0); 4734 if (Src.getValueType() == MVT::f32) 4735 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 4736 4737 SDValue Tmp; 4738 switch (Op.getValueType().getSimpleVT().SimpleTy) { 4739 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 4740 case MVT::i32: 4741 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 4742 (PPCSubTarget.hasFPCVT() ? PPCISD::FCTIWUZ : 4743 PPCISD::FCTIDZ), 4744 dl, MVT::f64, Src); 4745 break; 4746 case MVT::i64: 4747 assert((Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()) && 4748 "i64 FP_TO_UINT is supported only with FPCVT"); 4749 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 4750 PPCISD::FCTIDUZ, 4751 dl, MVT::f64, Src); 4752 break; 4753 } 4754 4755 // Convert the FP value to an int value through memory. 4756 bool i32Stack = Op.getValueType() == MVT::i32 && PPCSubTarget.hasSTFIWX() && 4757 (Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()); 4758 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 4759 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 4760 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI); 4761 4762 // Emit a store to the stack slot. 4763 SDValue Chain; 4764 if (i32Stack) { 4765 MachineFunction &MF = DAG.getMachineFunction(); 4766 MachineMemOperand *MMO = 4767 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 4768 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 4769 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 4770 DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), 4771 MVT::i32, MMO); 4772 } else 4773 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 4774 MPI, false, false, 0); 4775 4776 // Result is a load from the stack slot. If loading 4 bytes, make sure to 4777 // add in a bias. 4778 if (Op.getValueType() == MVT::i32 && !i32Stack) { 4779 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 4780 DAG.getConstant(4, FIPtr.getValueType())); 4781 MPI = MachinePointerInfo(); 4782 } 4783 4784 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MPI, 4785 false, false, false, 0); 4786 } 4787 4788 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 4789 SelectionDAG &DAG) const { 4790 SDLoc dl(Op); 4791 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 4792 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 4793 return SDValue(); 4794 4795 assert((Op.getOpcode() == ISD::SINT_TO_FP || PPCSubTarget.hasFPCVT()) && 4796 "UINT_TO_FP is supported only with FPCVT"); 4797 4798 // If we have FCFIDS, then use it when converting to single-precision. 4799 // Otherwise, convert to double-precision and then round. 4800 unsigned FCFOp = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 4801 (Op.getOpcode() == ISD::UINT_TO_FP ? 4802 PPCISD::FCFIDUS : PPCISD::FCFIDS) : 4803 (Op.getOpcode() == ISD::UINT_TO_FP ? 4804 PPCISD::FCFIDU : PPCISD::FCFID); 4805 MVT FCFTy = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 4806 MVT::f32 : MVT::f64; 4807 4808 if (Op.getOperand(0).getValueType() == MVT::i64) { 4809 SDValue SINT = Op.getOperand(0); 4810 // When converting to single-precision, we actually need to convert 4811 // to double-precision first and then round to single-precision. 4812 // To avoid double-rounding effects during that operation, we have 4813 // to prepare the input operand. Bits that might be truncated when 4814 // converting to double-precision are replaced by a bit that won't 4815 // be lost at this stage, but is below the single-precision rounding 4816 // position. 4817 // 4818 // However, if -enable-unsafe-fp-math is in effect, accept double 4819 // rounding to avoid the extra overhead. 4820 if (Op.getValueType() == MVT::f32 && 4821 !PPCSubTarget.hasFPCVT() && 4822 !DAG.getTarget().Options.UnsafeFPMath) { 4823 4824 // Twiddle input to make sure the low 11 bits are zero. (If this 4825 // is the case, we are guaranteed the value will fit into the 53 bit 4826 // mantissa of an IEEE double-precision value without rounding.) 4827 // If any of those low 11 bits were not zero originally, make sure 4828 // bit 12 (value 2048) is set instead, so that the final rounding 4829 // to single-precision gets the correct result. 4830 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4831 SINT, DAG.getConstant(2047, MVT::i64)); 4832 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 4833 Round, DAG.getConstant(2047, MVT::i64)); 4834 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 4835 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4836 Round, DAG.getConstant(-2048, MVT::i64)); 4837 4838 // However, we cannot use that value unconditionally: if the magnitude 4839 // of the input value is small, the bit-twiddling we did above might 4840 // end up visibly changing the output. Fortunately, in that case, we 4841 // don't need to twiddle bits since the original input will convert 4842 // exactly to double-precision floating-point already. Therefore, 4843 // construct a conditional to use the original value if the top 11 4844 // bits are all sign-bit copies, and use the rounded value computed 4845 // above otherwise. 4846 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 4847 SINT, DAG.getConstant(53, MVT::i32)); 4848 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 4849 Cond, DAG.getConstant(1, MVT::i64)); 4850 Cond = DAG.getSetCC(dl, MVT::i32, 4851 Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT); 4852 4853 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 4854 } 4855 4856 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 4857 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 4858 4859 if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) 4860 FP = DAG.getNode(ISD::FP_ROUND, dl, 4861 MVT::f32, FP, DAG.getIntPtrConstant(0)); 4862 return FP; 4863 } 4864 4865 assert(Op.getOperand(0).getValueType() == MVT::i32 && 4866 "Unhandled INT_TO_FP type in custom expander!"); 4867 // Since we only generate this in 64-bit mode, we can take advantage of 4868 // 64-bit registers. In particular, sign extend the input value into the 4869 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 4870 // then lfd it and fcfid it. 4871 MachineFunction &MF = DAG.getMachineFunction(); 4872 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 4873 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4874 4875 SDValue Ld; 4876 if (PPCSubTarget.hasLFIWAX() || PPCSubTarget.hasFPCVT()) { 4877 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 4878 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4879 4880 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 4881 MachinePointerInfo::getFixedStack(FrameIdx), 4882 false, false, 0); 4883 4884 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 4885 "Expected an i32 store"); 4886 MachineMemOperand *MMO = 4887 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), 4888 MachineMemOperand::MOLoad, 4, 4); 4889 SDValue Ops[] = { Store, FIdx }; 4890 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 4891 PPCISD::LFIWZX : PPCISD::LFIWAX, 4892 dl, DAG.getVTList(MVT::f64, MVT::Other), 4893 Ops, 2, MVT::i32, MMO); 4894 } else { 4895 assert(PPCSubTarget.isPPC64() && 4896 "i32->FP without LFIWAX supported only on PPC64"); 4897 4898 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 4899 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4900 4901 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 4902 Op.getOperand(0)); 4903 4904 // STD the extended value into the stack slot. 4905 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx, 4906 MachinePointerInfo::getFixedStack(FrameIdx), 4907 false, false, 0); 4908 4909 // Load the value as a double. 4910 Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, 4911 MachinePointerInfo::getFixedStack(FrameIdx), 4912 false, false, false, 0); 4913 } 4914 4915 // FCFID it and return it. 4916 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 4917 if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) 4918 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 4919 return FP; 4920 } 4921 4922 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 4923 SelectionDAG &DAG) const { 4924 SDLoc dl(Op); 4925 /* 4926 The rounding mode is in bits 30:31 of FPSR, and has the following 4927 settings: 4928 00 Round to nearest 4929 01 Round to 0 4930 10 Round to +inf 4931 11 Round to -inf 4932 4933 FLT_ROUNDS, on the other hand, expects the following: 4934 -1 Undefined 4935 0 Round to 0 4936 1 Round to nearest 4937 2 Round to +inf 4938 3 Round to -inf 4939 4940 To perform the conversion, we do: 4941 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 4942 */ 4943 4944 MachineFunction &MF = DAG.getMachineFunction(); 4945 EVT VT = Op.getValueType(); 4946 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4947 SDValue MFFSreg, InFlag; 4948 4949 // Save FP Control Word to register 4950 EVT NodeTys[] = { 4951 MVT::f64, // return register 4952 MVT::Glue // unused in this context 4953 }; 4954 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 4955 4956 // Save FP register to stack slot 4957 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 4958 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 4959 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 4960 StackSlot, MachinePointerInfo(), false, false,0); 4961 4962 // Load FP Control Word from low 32 bits of stack slot. 4963 SDValue Four = DAG.getConstant(4, PtrVT); 4964 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 4965 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 4966 false, false, false, 0); 4967 4968 // Transform as necessary 4969 SDValue CWD1 = 4970 DAG.getNode(ISD::AND, dl, MVT::i32, 4971 CWD, DAG.getConstant(3, MVT::i32)); 4972 SDValue CWD2 = 4973 DAG.getNode(ISD::SRL, dl, MVT::i32, 4974 DAG.getNode(ISD::AND, dl, MVT::i32, 4975 DAG.getNode(ISD::XOR, dl, MVT::i32, 4976 CWD, DAG.getConstant(3, MVT::i32)), 4977 DAG.getConstant(3, MVT::i32)), 4978 DAG.getConstant(1, MVT::i32)); 4979 4980 SDValue RetVal = 4981 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 4982 4983 return DAG.getNode((VT.getSizeInBits() < 16 ? 4984 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 4985 } 4986 4987 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 4988 EVT VT = Op.getValueType(); 4989 unsigned BitWidth = VT.getSizeInBits(); 4990 SDLoc dl(Op); 4991 assert(Op.getNumOperands() == 3 && 4992 VT == Op.getOperand(1).getValueType() && 4993 "Unexpected SHL!"); 4994 4995 // Expand into a bunch of logical ops. Note that these ops 4996 // depend on the PPC behavior for oversized shift amounts. 4997 SDValue Lo = Op.getOperand(0); 4998 SDValue Hi = Op.getOperand(1); 4999 SDValue Amt = Op.getOperand(2); 5000 EVT AmtVT = Amt.getValueType(); 5001 5002 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5003 DAG.getConstant(BitWidth, AmtVT), Amt); 5004 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 5005 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 5006 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 5007 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5008 DAG.getConstant(-BitWidth, AmtVT)); 5009 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 5010 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5011 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 5012 SDValue OutOps[] = { OutLo, OutHi }; 5013 return DAG.getMergeValues(OutOps, 2, dl); 5014 } 5015 5016 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 5017 EVT VT = Op.getValueType(); 5018 SDLoc dl(Op); 5019 unsigned BitWidth = VT.getSizeInBits(); 5020 assert(Op.getNumOperands() == 3 && 5021 VT == Op.getOperand(1).getValueType() && 5022 "Unexpected SRL!"); 5023 5024 // Expand into a bunch of logical ops. Note that these ops 5025 // depend on the PPC behavior for oversized shift amounts. 5026 SDValue Lo = Op.getOperand(0); 5027 SDValue Hi = Op.getOperand(1); 5028 SDValue Amt = Op.getOperand(2); 5029 EVT AmtVT = Amt.getValueType(); 5030 5031 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5032 DAG.getConstant(BitWidth, AmtVT), Amt); 5033 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5034 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5035 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5036 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5037 DAG.getConstant(-BitWidth, AmtVT)); 5038 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 5039 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5040 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 5041 SDValue OutOps[] = { OutLo, OutHi }; 5042 return DAG.getMergeValues(OutOps, 2, dl); 5043 } 5044 5045 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 5046 SDLoc dl(Op); 5047 EVT VT = Op.getValueType(); 5048 unsigned BitWidth = VT.getSizeInBits(); 5049 assert(Op.getNumOperands() == 3 && 5050 VT == Op.getOperand(1).getValueType() && 5051 "Unexpected SRA!"); 5052 5053 // Expand into a bunch of logical ops, followed by a select_cc. 5054 SDValue Lo = Op.getOperand(0); 5055 SDValue Hi = Op.getOperand(1); 5056 SDValue Amt = Op.getOperand(2); 5057 EVT AmtVT = Amt.getValueType(); 5058 5059 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5060 DAG.getConstant(BitWidth, AmtVT), Amt); 5061 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5062 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5063 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5064 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5065 DAG.getConstant(-BitWidth, AmtVT)); 5066 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 5067 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 5068 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 5069 Tmp4, Tmp6, ISD::SETLE); 5070 SDValue OutOps[] = { OutLo, OutHi }; 5071 return DAG.getMergeValues(OutOps, 2, dl); 5072 } 5073 5074 //===----------------------------------------------------------------------===// 5075 // Vector related lowering. 5076 // 5077 5078 /// BuildSplatI - Build a canonical splati of Val with an element size of 5079 /// SplatSize. Cast the result to VT. 5080 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 5081 SelectionDAG &DAG, SDLoc dl) { 5082 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 5083 5084 static const EVT VTys[] = { // canonical VT to use for each size. 5085 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 5086 }; 5087 5088 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 5089 5090 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 5091 if (Val == -1) 5092 SplatSize = 1; 5093 5094 EVT CanonicalVT = VTys[SplatSize-1]; 5095 5096 // Build a canonical splat for this value. 5097 SDValue Elt = DAG.getConstant(Val, MVT::i32); 5098 SmallVector<SDValue, 8> Ops; 5099 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 5100 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, 5101 &Ops[0], Ops.size()); 5102 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 5103 } 5104 5105 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 5106 /// specified intrinsic ID. 5107 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 5108 SelectionDAG &DAG, SDLoc dl, 5109 EVT DestVT = MVT::Other) { 5110 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 5111 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5112 DAG.getConstant(IID, MVT::i32), Op); 5113 } 5114 5115 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 5116 /// specified intrinsic ID. 5117 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 5118 SelectionDAG &DAG, SDLoc dl, 5119 EVT DestVT = MVT::Other) { 5120 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 5121 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5122 DAG.getConstant(IID, MVT::i32), LHS, RHS); 5123 } 5124 5125 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 5126 /// specified intrinsic ID. 5127 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 5128 SDValue Op2, SelectionDAG &DAG, 5129 SDLoc dl, EVT DestVT = MVT::Other) { 5130 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 5131 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5132 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 5133 } 5134 5135 5136 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 5137 /// amount. The result has the specified value type. 5138 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 5139 EVT VT, SelectionDAG &DAG, SDLoc dl) { 5140 // Force LHS/RHS to be the right type. 5141 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 5142 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 5143 5144 int Ops[16]; 5145 for (unsigned i = 0; i != 16; ++i) 5146 Ops[i] = i + Amt; 5147 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 5148 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5149 } 5150 5151 // If this is a case we can't handle, return null and let the default 5152 // expansion code take care of it. If we CAN select this case, and if it 5153 // selects to a single instruction, return Op. Otherwise, if we can codegen 5154 // this case more efficiently than a constant pool load, lower it to the 5155 // sequence of ops that should be used. 5156 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 5157 SelectionDAG &DAG) const { 5158 SDLoc dl(Op); 5159 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 5160 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 5161 5162 // Check if this is a splat of a constant value. 5163 APInt APSplatBits, APSplatUndef; 5164 unsigned SplatBitSize; 5165 bool HasAnyUndefs; 5166 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 5167 HasAnyUndefs, 0, true) || SplatBitSize > 32) 5168 return SDValue(); 5169 5170 unsigned SplatBits = APSplatBits.getZExtValue(); 5171 unsigned SplatUndef = APSplatUndef.getZExtValue(); 5172 unsigned SplatSize = SplatBitSize / 8; 5173 5174 // First, handle single instruction cases. 5175 5176 // All zeros? 5177 if (SplatBits == 0) { 5178 // Canonicalize all zero vectors to be v4i32. 5179 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 5180 SDValue Z = DAG.getConstant(0, MVT::i32); 5181 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 5182 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 5183 } 5184 return Op; 5185 } 5186 5187 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 5188 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 5189 (32-SplatBitSize)); 5190 if (SextVal >= -16 && SextVal <= 15) 5191 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 5192 5193 5194 // Two instruction sequences. 5195 5196 // If this value is in the range [-32,30] and is even, use: 5197 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 5198 // If this value is in the range [17,31] and is odd, use: 5199 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 5200 // If this value is in the range [-31,-17] and is odd, use: 5201 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 5202 // Note the last two are three-instruction sequences. 5203 if (SextVal >= -32 && SextVal <= 31) { 5204 // To avoid having these optimizations undone by constant folding, 5205 // we convert to a pseudo that will be expanded later into one of 5206 // the above forms. 5207 SDValue Elt = DAG.getConstant(SextVal, MVT::i32); 5208 EVT VT = Op.getValueType(); 5209 int Size = VT == MVT::v16i8 ? 1 : (VT == MVT::v8i16 ? 2 : 4); 5210 SDValue EltSize = DAG.getConstant(Size, MVT::i32); 5211 return DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 5212 } 5213 5214 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 5215 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 5216 // for fneg/fabs. 5217 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 5218 // Make -1 and vspltisw -1: 5219 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 5220 5221 // Make the VSLW intrinsic, computing 0x8000_0000. 5222 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 5223 OnesV, DAG, dl); 5224 5225 // xor by OnesV to invert it. 5226 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 5227 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5228 } 5229 5230 // Check to see if this is a wide variety of vsplti*, binop self cases. 5231 static const signed char SplatCsts[] = { 5232 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 5233 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 5234 }; 5235 5236 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 5237 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 5238 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 5239 int i = SplatCsts[idx]; 5240 5241 // Figure out what shift amount will be used by altivec if shifted by i in 5242 // this splat size. 5243 unsigned TypeShiftAmt = i & (SplatBitSize-1); 5244 5245 // vsplti + shl self. 5246 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 5247 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5248 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5249 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 5250 Intrinsic::ppc_altivec_vslw 5251 }; 5252 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5253 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5254 } 5255 5256 // vsplti + srl self. 5257 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5258 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5259 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5260 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 5261 Intrinsic::ppc_altivec_vsrw 5262 }; 5263 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5264 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5265 } 5266 5267 // vsplti + sra self. 5268 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5269 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5270 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5271 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 5272 Intrinsic::ppc_altivec_vsraw 5273 }; 5274 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5275 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5276 } 5277 5278 // vsplti + rol self. 5279 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 5280 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 5281 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5282 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5283 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 5284 Intrinsic::ppc_altivec_vrlw 5285 }; 5286 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5287 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5288 } 5289 5290 // t = vsplti c, result = vsldoi t, t, 1 5291 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 5292 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5293 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 5294 } 5295 // t = vsplti c, result = vsldoi t, t, 2 5296 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 5297 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5298 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 5299 } 5300 // t = vsplti c, result = vsldoi t, t, 3 5301 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 5302 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5303 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 5304 } 5305 } 5306 5307 return SDValue(); 5308 } 5309 5310 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5311 /// the specified operations to build the shuffle. 5312 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5313 SDValue RHS, SelectionDAG &DAG, 5314 SDLoc dl) { 5315 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5316 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5317 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5318 5319 enum { 5320 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 5321 OP_VMRGHW, 5322 OP_VMRGLW, 5323 OP_VSPLTISW0, 5324 OP_VSPLTISW1, 5325 OP_VSPLTISW2, 5326 OP_VSPLTISW3, 5327 OP_VSLDOI4, 5328 OP_VSLDOI8, 5329 OP_VSLDOI12 5330 }; 5331 5332 if (OpNum == OP_COPY) { 5333 if (LHSID == (1*9+2)*9+3) return LHS; 5334 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 5335 return RHS; 5336 } 5337 5338 SDValue OpLHS, OpRHS; 5339 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 5340 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 5341 5342 int ShufIdxs[16]; 5343 switch (OpNum) { 5344 default: llvm_unreachable("Unknown i32 permute!"); 5345 case OP_VMRGHW: 5346 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 5347 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 5348 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 5349 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 5350 break; 5351 case OP_VMRGLW: 5352 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 5353 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 5354 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 5355 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 5356 break; 5357 case OP_VSPLTISW0: 5358 for (unsigned i = 0; i != 16; ++i) 5359 ShufIdxs[i] = (i&3)+0; 5360 break; 5361 case OP_VSPLTISW1: 5362 for (unsigned i = 0; i != 16; ++i) 5363 ShufIdxs[i] = (i&3)+4; 5364 break; 5365 case OP_VSPLTISW2: 5366 for (unsigned i = 0; i != 16; ++i) 5367 ShufIdxs[i] = (i&3)+8; 5368 break; 5369 case OP_VSPLTISW3: 5370 for (unsigned i = 0; i != 16; ++i) 5371 ShufIdxs[i] = (i&3)+12; 5372 break; 5373 case OP_VSLDOI4: 5374 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 5375 case OP_VSLDOI8: 5376 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 5377 case OP_VSLDOI12: 5378 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 5379 } 5380 EVT VT = OpLHS.getValueType(); 5381 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 5382 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 5383 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 5384 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5385 } 5386 5387 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 5388 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 5389 /// return the code it can be lowered into. Worst case, it can always be 5390 /// lowered into a vperm. 5391 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 5392 SelectionDAG &DAG) const { 5393 SDLoc dl(Op); 5394 SDValue V1 = Op.getOperand(0); 5395 SDValue V2 = Op.getOperand(1); 5396 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5397 EVT VT = Op.getValueType(); 5398 5399 // Cases that are handled by instructions that take permute immediates 5400 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 5401 // selected by the instruction selector. 5402 if (V2.getOpcode() == ISD::UNDEF) { 5403 if (PPC::isSplatShuffleMask(SVOp, 1) || 5404 PPC::isSplatShuffleMask(SVOp, 2) || 5405 PPC::isSplatShuffleMask(SVOp, 4) || 5406 PPC::isVPKUWUMShuffleMask(SVOp, true) || 5407 PPC::isVPKUHUMShuffleMask(SVOp, true) || 5408 PPC::isVSLDOIShuffleMask(SVOp, true) != -1 || 5409 PPC::isVMRGLShuffleMask(SVOp, 1, true) || 5410 PPC::isVMRGLShuffleMask(SVOp, 2, true) || 5411 PPC::isVMRGLShuffleMask(SVOp, 4, true) || 5412 PPC::isVMRGHShuffleMask(SVOp, 1, true) || 5413 PPC::isVMRGHShuffleMask(SVOp, 2, true) || 5414 PPC::isVMRGHShuffleMask(SVOp, 4, true)) { 5415 return Op; 5416 } 5417 } 5418 5419 // Altivec has a variety of "shuffle immediates" that take two vector inputs 5420 // and produce a fixed permutation. If any of these match, do not lower to 5421 // VPERM. 5422 if (PPC::isVPKUWUMShuffleMask(SVOp, false) || 5423 PPC::isVPKUHUMShuffleMask(SVOp, false) || 5424 PPC::isVSLDOIShuffleMask(SVOp, false) != -1 || 5425 PPC::isVMRGLShuffleMask(SVOp, 1, false) || 5426 PPC::isVMRGLShuffleMask(SVOp, 2, false) || 5427 PPC::isVMRGLShuffleMask(SVOp, 4, false) || 5428 PPC::isVMRGHShuffleMask(SVOp, 1, false) || 5429 PPC::isVMRGHShuffleMask(SVOp, 2, false) || 5430 PPC::isVMRGHShuffleMask(SVOp, 4, false)) 5431 return Op; 5432 5433 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 5434 // perfect shuffle table to emit an optimal matching sequence. 5435 ArrayRef<int> PermMask = SVOp->getMask(); 5436 5437 unsigned PFIndexes[4]; 5438 bool isFourElementShuffle = true; 5439 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 5440 unsigned EltNo = 8; // Start out undef. 5441 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 5442 if (PermMask[i*4+j] < 0) 5443 continue; // Undef, ignore it. 5444 5445 unsigned ByteSource = PermMask[i*4+j]; 5446 if ((ByteSource & 3) != j) { 5447 isFourElementShuffle = false; 5448 break; 5449 } 5450 5451 if (EltNo == 8) { 5452 EltNo = ByteSource/4; 5453 } else if (EltNo != ByteSource/4) { 5454 isFourElementShuffle = false; 5455 break; 5456 } 5457 } 5458 PFIndexes[i] = EltNo; 5459 } 5460 5461 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 5462 // perfect shuffle vector to determine if it is cost effective to do this as 5463 // discrete instructions, or whether we should use a vperm. 5464 if (isFourElementShuffle) { 5465 // Compute the index in the perfect shuffle table. 5466 unsigned PFTableIndex = 5467 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5468 5469 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5470 unsigned Cost = (PFEntry >> 30); 5471 5472 // Determining when to avoid vperm is tricky. Many things affect the cost 5473 // of vperm, particularly how many times the perm mask needs to be computed. 5474 // For example, if the perm mask can be hoisted out of a loop or is already 5475 // used (perhaps because there are multiple permutes with the same shuffle 5476 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 5477 // the loop requires an extra register. 5478 // 5479 // As a compromise, we only emit discrete instructions if the shuffle can be 5480 // generated in 3 or fewer operations. When we have loop information 5481 // available, if this block is within a loop, we should avoid using vperm 5482 // for 3-operation perms and use a constant pool load instead. 5483 if (Cost < 3) 5484 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 5485 } 5486 5487 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 5488 // vector that will get spilled to the constant pool. 5489 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 5490 5491 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 5492 // that it is in input element units, not in bytes. Convert now. 5493 EVT EltVT = V1.getValueType().getVectorElementType(); 5494 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 5495 5496 SmallVector<SDValue, 16> ResultMask; 5497 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5498 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 5499 5500 for (unsigned j = 0; j != BytesPerElement; ++j) 5501 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 5502 MVT::i32)); 5503 } 5504 5505 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 5506 &ResultMask[0], ResultMask.size()); 5507 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); 5508 } 5509 5510 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 5511 /// altivec comparison. If it is, return true and fill in Opc/isDot with 5512 /// information about the intrinsic. 5513 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 5514 bool &isDot) { 5515 unsigned IntrinsicID = 5516 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 5517 CompareOpc = -1; 5518 isDot = false; 5519 switch (IntrinsicID) { 5520 default: return false; 5521 // Comparison predicates. 5522 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 5523 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 5524 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 5525 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 5526 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 5527 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 5528 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 5529 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 5530 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 5531 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 5532 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 5533 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 5534 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 5535 5536 // Normal Comparisons. 5537 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 5538 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 5539 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 5540 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 5541 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 5542 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 5543 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 5544 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 5545 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 5546 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 5547 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 5548 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 5549 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 5550 } 5551 return true; 5552 } 5553 5554 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 5555 /// lower, do it, otherwise return null. 5556 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 5557 SelectionDAG &DAG) const { 5558 // If this is a lowered altivec predicate compare, CompareOpc is set to the 5559 // opcode number of the comparison. 5560 SDLoc dl(Op); 5561 int CompareOpc; 5562 bool isDot; 5563 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 5564 return SDValue(); // Don't custom lower most intrinsics. 5565 5566 // If this is a non-dot comparison, make the VCMP node and we are done. 5567 if (!isDot) { 5568 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 5569 Op.getOperand(1), Op.getOperand(2), 5570 DAG.getConstant(CompareOpc, MVT::i32)); 5571 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 5572 } 5573 5574 // Create the PPCISD altivec 'dot' comparison node. 5575 SDValue Ops[] = { 5576 Op.getOperand(2), // LHS 5577 Op.getOperand(3), // RHS 5578 DAG.getConstant(CompareOpc, MVT::i32) 5579 }; 5580 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 5581 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 5582 5583 // Now that we have the comparison, emit a copy from the CR to a GPR. 5584 // This is flagged to the above dot comparison. 5585 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 5586 DAG.getRegister(PPC::CR6, MVT::i32), 5587 CompNode.getValue(1)); 5588 5589 // Unpack the result based on how the target uses it. 5590 unsigned BitNo; // Bit # of CR6. 5591 bool InvertBit; // Invert result? 5592 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 5593 default: // Can't happen, don't crash on invalid number though. 5594 case 0: // Return the value of the EQ bit of CR6. 5595 BitNo = 0; InvertBit = false; 5596 break; 5597 case 1: // Return the inverted value of the EQ bit of CR6. 5598 BitNo = 0; InvertBit = true; 5599 break; 5600 case 2: // Return the value of the LT bit of CR6. 5601 BitNo = 2; InvertBit = false; 5602 break; 5603 case 3: // Return the inverted value of the LT bit of CR6. 5604 BitNo = 2; InvertBit = true; 5605 break; 5606 } 5607 5608 // Shift the bit into the low position. 5609 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 5610 DAG.getConstant(8-(3-BitNo), MVT::i32)); 5611 // Isolate the bit. 5612 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 5613 DAG.getConstant(1, MVT::i32)); 5614 5615 // If we are supposed to, toggle the bit. 5616 if (InvertBit) 5617 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 5618 DAG.getConstant(1, MVT::i32)); 5619 return Flags; 5620 } 5621 5622 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 5623 SelectionDAG &DAG) const { 5624 SDLoc dl(Op); 5625 // Create a stack slot that is 16-byte aligned. 5626 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 5627 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 5628 EVT PtrVT = getPointerTy(); 5629 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 5630 5631 // Store the input value into Value#0 of the stack slot. 5632 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 5633 Op.getOperand(0), FIdx, MachinePointerInfo(), 5634 false, false, 0); 5635 // Load it out. 5636 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 5637 false, false, false, 0); 5638 } 5639 5640 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 5641 SDLoc dl(Op); 5642 if (Op.getValueType() == MVT::v4i32) { 5643 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5644 5645 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 5646 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 5647 5648 SDValue RHSSwap = // = vrlw RHS, 16 5649 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 5650 5651 // Shrinkify inputs to v8i16. 5652 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 5653 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 5654 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 5655 5656 // Low parts multiplied together, generating 32-bit results (we ignore the 5657 // top parts). 5658 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 5659 LHS, RHS, DAG, dl, MVT::v4i32); 5660 5661 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 5662 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 5663 // Shift the high parts up 16 bits. 5664 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 5665 Neg16, DAG, dl); 5666 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 5667 } else if (Op.getValueType() == MVT::v8i16) { 5668 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5669 5670 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 5671 5672 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 5673 LHS, RHS, Zero, DAG, dl); 5674 } else if (Op.getValueType() == MVT::v16i8) { 5675 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5676 5677 // Multiply the even 8-bit parts, producing 16-bit sums. 5678 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 5679 LHS, RHS, DAG, dl, MVT::v8i16); 5680 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 5681 5682 // Multiply the odd 8-bit parts, producing 16-bit sums. 5683 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 5684 LHS, RHS, DAG, dl, MVT::v8i16); 5685 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 5686 5687 // Merge the results together. 5688 int Ops[16]; 5689 for (unsigned i = 0; i != 8; ++i) { 5690 Ops[i*2 ] = 2*i+1; 5691 Ops[i*2+1] = 2*i+1+16; 5692 } 5693 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 5694 } else { 5695 llvm_unreachable("Unknown mul to lower!"); 5696 } 5697 } 5698 5699 /// LowerOperation - Provide custom lowering hooks for some operations. 5700 /// 5701 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 5702 switch (Op.getOpcode()) { 5703 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 5704 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5705 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 5706 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5707 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5708 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5709 case ISD::SETCC: return LowerSETCC(Op, DAG); 5710 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 5711 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 5712 case ISD::VASTART: 5713 return LowerVASTART(Op, DAG, PPCSubTarget); 5714 5715 case ISD::VAARG: 5716 return LowerVAARG(Op, DAG, PPCSubTarget); 5717 5718 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 5719 case ISD::DYNAMIC_STACKALLOC: 5720 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 5721 5722 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 5723 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 5724 5725 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 5726 case ISD::FP_TO_UINT: 5727 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 5728 SDLoc(Op)); 5729 case ISD::UINT_TO_FP: 5730 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 5731 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5732 5733 // Lower 64-bit shifts. 5734 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 5735 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 5736 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 5737 5738 // Vector-related lowering. 5739 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5740 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5741 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5742 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5743 case ISD::MUL: return LowerMUL(Op, DAG); 5744 5745 // For counter-based loop handling. 5746 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 5747 5748 // Frame & Return address. 5749 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5750 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5751 } 5752 } 5753 5754 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 5755 SmallVectorImpl<SDValue>&Results, 5756 SelectionDAG &DAG) const { 5757 const TargetMachine &TM = getTargetMachine(); 5758 SDLoc dl(N); 5759 switch (N->getOpcode()) { 5760 default: 5761 llvm_unreachable("Do not know how to custom type legalize this operation!"); 5762 case ISD::INTRINSIC_W_CHAIN: { 5763 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 5764 Intrinsic::ppc_is_decremented_ctr_nonzero) 5765 break; 5766 5767 assert(N->getValueType(0) == MVT::i1 && 5768 "Unexpected result type for CTR decrement intrinsic"); 5769 EVT SVT = getSetCCResultType(*DAG.getContext(), N->getValueType(0)); 5770 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 5771 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 5772 N->getOperand(1)); 5773 5774 Results.push_back(NewInt); 5775 Results.push_back(NewInt.getValue(1)); 5776 break; 5777 } 5778 case ISD::VAARG: { 5779 if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 5780 || TM.getSubtarget<PPCSubtarget>().isPPC64()) 5781 return; 5782 5783 EVT VT = N->getValueType(0); 5784 5785 if (VT == MVT::i64) { 5786 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, PPCSubTarget); 5787 5788 Results.push_back(NewNode); 5789 Results.push_back(NewNode.getValue(1)); 5790 } 5791 return; 5792 } 5793 case ISD::FP_ROUND_INREG: { 5794 assert(N->getValueType(0) == MVT::ppcf128); 5795 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 5796 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5797 MVT::f64, N->getOperand(0), 5798 DAG.getIntPtrConstant(0)); 5799 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5800 MVT::f64, N->getOperand(0), 5801 DAG.getIntPtrConstant(1)); 5802 5803 // Add the two halves of the long double in round-to-zero mode. 5804 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 5805 5806 // We know the low half is about to be thrown away, so just use something 5807 // convenient. 5808 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 5809 FPreg, FPreg)); 5810 return; 5811 } 5812 case ISD::FP_TO_SINT: 5813 // LowerFP_TO_INT() can only handle f32 and f64. 5814 if (N->getOperand(0).getValueType() == MVT::ppcf128) 5815 return; 5816 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 5817 return; 5818 } 5819 } 5820 5821 5822 //===----------------------------------------------------------------------===// 5823 // Other Lowering Code 5824 //===----------------------------------------------------------------------===// 5825 5826 MachineBasicBlock * 5827 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5828 bool is64bit, unsigned BinOpcode) const { 5829 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5830 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5831 5832 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5833 MachineFunction *F = BB->getParent(); 5834 MachineFunction::iterator It = BB; 5835 ++It; 5836 5837 unsigned dest = MI->getOperand(0).getReg(); 5838 unsigned ptrA = MI->getOperand(1).getReg(); 5839 unsigned ptrB = MI->getOperand(2).getReg(); 5840 unsigned incr = MI->getOperand(3).getReg(); 5841 DebugLoc dl = MI->getDebugLoc(); 5842 5843 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5844 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5845 F->insert(It, loopMBB); 5846 F->insert(It, exitMBB); 5847 exitMBB->splice(exitMBB->begin(), BB, 5848 llvm::next(MachineBasicBlock::iterator(MI)), 5849 BB->end()); 5850 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5851 5852 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5853 unsigned TmpReg = (!BinOpcode) ? incr : 5854 RegInfo.createVirtualRegister( 5855 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5856 (const TargetRegisterClass *) &PPC::GPRCRegClass); 5857 5858 // thisMBB: 5859 // ... 5860 // fallthrough --> loopMBB 5861 BB->addSuccessor(loopMBB); 5862 5863 // loopMBB: 5864 // l[wd]arx dest, ptr 5865 // add r0, dest, incr 5866 // st[wd]cx. r0, ptr 5867 // bne- loopMBB 5868 // fallthrough --> exitMBB 5869 BB = loopMBB; 5870 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 5871 .addReg(ptrA).addReg(ptrB); 5872 if (BinOpcode) 5873 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 5874 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 5875 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 5876 BuildMI(BB, dl, TII->get(PPC::BCC)) 5877 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 5878 BB->addSuccessor(loopMBB); 5879 BB->addSuccessor(exitMBB); 5880 5881 // exitMBB: 5882 // ... 5883 BB = exitMBB; 5884 return BB; 5885 } 5886 5887 MachineBasicBlock * 5888 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 5889 MachineBasicBlock *BB, 5890 bool is8bit, // operation 5891 unsigned BinOpcode) const { 5892 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5893 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5894 // In 64 bit mode we have to use 64 bits for addresses, even though the 5895 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 5896 // registers without caring whether they're 32 or 64, but here we're 5897 // doing actual arithmetic on the addresses. 5898 bool is64bit = PPCSubTarget.isPPC64(); 5899 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 5900 5901 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5902 MachineFunction *F = BB->getParent(); 5903 MachineFunction::iterator It = BB; 5904 ++It; 5905 5906 unsigned dest = MI->getOperand(0).getReg(); 5907 unsigned ptrA = MI->getOperand(1).getReg(); 5908 unsigned ptrB = MI->getOperand(2).getReg(); 5909 unsigned incr = MI->getOperand(3).getReg(); 5910 DebugLoc dl = MI->getDebugLoc(); 5911 5912 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5913 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5914 F->insert(It, loopMBB); 5915 F->insert(It, exitMBB); 5916 exitMBB->splice(exitMBB->begin(), BB, 5917 llvm::next(MachineBasicBlock::iterator(MI)), 5918 BB->end()); 5919 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5920 5921 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5922 const TargetRegisterClass *RC = 5923 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5924 (const TargetRegisterClass *) &PPC::GPRCRegClass; 5925 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 5926 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 5927 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 5928 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 5929 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 5930 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 5931 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 5932 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 5933 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 5934 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 5935 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 5936 unsigned Ptr1Reg; 5937 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 5938 5939 // thisMBB: 5940 // ... 5941 // fallthrough --> loopMBB 5942 BB->addSuccessor(loopMBB); 5943 5944 // The 4-byte load must be aligned, while a char or short may be 5945 // anywhere in the word. Hence all this nasty bookkeeping code. 5946 // add ptr1, ptrA, ptrB [copy if ptrA==0] 5947 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 5948 // xori shift, shift1, 24 [16] 5949 // rlwinm ptr, ptr1, 0, 0, 29 5950 // slw incr2, incr, shift 5951 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 5952 // slw mask, mask2, shift 5953 // loopMBB: 5954 // lwarx tmpDest, ptr 5955 // add tmp, tmpDest, incr2 5956 // andc tmp2, tmpDest, mask 5957 // and tmp3, tmp, mask 5958 // or tmp4, tmp3, tmp2 5959 // stwcx. tmp4, ptr 5960 // bne- loopMBB 5961 // fallthrough --> exitMBB 5962 // srw dest, tmpDest, shift 5963 if (ptrA != ZeroReg) { 5964 Ptr1Reg = RegInfo.createVirtualRegister(RC); 5965 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 5966 .addReg(ptrA).addReg(ptrB); 5967 } else { 5968 Ptr1Reg = ptrB; 5969 } 5970 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 5971 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 5972 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 5973 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 5974 if (is64bit) 5975 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 5976 .addReg(Ptr1Reg).addImm(0).addImm(61); 5977 else 5978 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 5979 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 5980 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 5981 .addReg(incr).addReg(ShiftReg); 5982 if (is8bit) 5983 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 5984 else { 5985 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 5986 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 5987 } 5988 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 5989 .addReg(Mask2Reg).addReg(ShiftReg); 5990 5991 BB = loopMBB; 5992 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 5993 .addReg(ZeroReg).addReg(PtrReg); 5994 if (BinOpcode) 5995 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 5996 .addReg(Incr2Reg).addReg(TmpDestReg); 5997 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 5998 .addReg(TmpDestReg).addReg(MaskReg); 5999 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 6000 .addReg(TmpReg).addReg(MaskReg); 6001 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 6002 .addReg(Tmp3Reg).addReg(Tmp2Reg); 6003 BuildMI(BB, dl, TII->get(PPC::STWCX)) 6004 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 6005 BuildMI(BB, dl, TII->get(PPC::BCC)) 6006 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 6007 BB->addSuccessor(loopMBB); 6008 BB->addSuccessor(exitMBB); 6009 6010 // exitMBB: 6011 // ... 6012 BB = exitMBB; 6013 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 6014 .addReg(ShiftReg); 6015 return BB; 6016 } 6017 6018 llvm::MachineBasicBlock* 6019 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 6020 MachineBasicBlock *MBB) const { 6021 DebugLoc DL = MI->getDebugLoc(); 6022 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6023 6024 MachineFunction *MF = MBB->getParent(); 6025 MachineRegisterInfo &MRI = MF->getRegInfo(); 6026 6027 const BasicBlock *BB = MBB->getBasicBlock(); 6028 MachineFunction::iterator I = MBB; 6029 ++I; 6030 6031 // Memory Reference 6032 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6033 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6034 6035 unsigned DstReg = MI->getOperand(0).getReg(); 6036 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 6037 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 6038 unsigned mainDstReg = MRI.createVirtualRegister(RC); 6039 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 6040 6041 MVT PVT = getPointerTy(); 6042 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6043 "Invalid Pointer Size!"); 6044 // For v = setjmp(buf), we generate 6045 // 6046 // thisMBB: 6047 // SjLjSetup mainMBB 6048 // bl mainMBB 6049 // v_restore = 1 6050 // b sinkMBB 6051 // 6052 // mainMBB: 6053 // buf[LabelOffset] = LR 6054 // v_main = 0 6055 // 6056 // sinkMBB: 6057 // v = phi(main, restore) 6058 // 6059 6060 MachineBasicBlock *thisMBB = MBB; 6061 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 6062 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 6063 MF->insert(I, mainMBB); 6064 MF->insert(I, sinkMBB); 6065 6066 MachineInstrBuilder MIB; 6067 6068 // Transfer the remainder of BB and its successor edges to sinkMBB. 6069 sinkMBB->splice(sinkMBB->begin(), MBB, 6070 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 6071 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 6072 6073 // Note that the structure of the jmp_buf used here is not compatible 6074 // with that used by libc, and is not designed to be. Specifically, it 6075 // stores only those 'reserved' registers that LLVM does not otherwise 6076 // understand how to spill. Also, by convention, by the time this 6077 // intrinsic is called, Clang has already stored the frame address in the 6078 // first slot of the buffer and stack address in the third. Following the 6079 // X86 target code, we'll store the jump address in the second slot. We also 6080 // need to save the TOC pointer (R2) to handle jumps between shared 6081 // libraries, and that will be stored in the fourth slot. The thread 6082 // identifier (R13) is not affected. 6083 6084 // thisMBB: 6085 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6086 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6087 6088 // Prepare IP either in reg. 6089 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 6090 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 6091 unsigned BufReg = MI->getOperand(1).getReg(); 6092 6093 if (PPCSubTarget.isPPC64() && PPCSubTarget.isSVR4ABI()) { 6094 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 6095 .addReg(PPC::X2) 6096 .addImm(TOCOffset) 6097 .addReg(BufReg); 6098 6099 MIB.setMemRefs(MMOBegin, MMOEnd); 6100 } 6101 6102 // Setup 6103 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 6104 const PPCRegisterInfo *TRI = 6105 static_cast<const PPCRegisterInfo*>(getTargetMachine().getRegisterInfo()); 6106 MIB.addRegMask(TRI->getNoPreservedMask()); 6107 6108 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 6109 6110 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 6111 .addMBB(mainMBB); 6112 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 6113 6114 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 6115 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 6116 6117 // mainMBB: 6118 // mainDstReg = 0 6119 MIB = BuildMI(mainMBB, DL, 6120 TII->get(PPCSubTarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 6121 6122 // Store IP 6123 if (PPCSubTarget.isPPC64()) { 6124 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 6125 .addReg(LabelReg) 6126 .addImm(LabelOffset) 6127 .addReg(BufReg); 6128 } else { 6129 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 6130 .addReg(LabelReg) 6131 .addImm(LabelOffset) 6132 .addReg(BufReg); 6133 } 6134 6135 MIB.setMemRefs(MMOBegin, MMOEnd); 6136 6137 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 6138 mainMBB->addSuccessor(sinkMBB); 6139 6140 // sinkMBB: 6141 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 6142 TII->get(PPC::PHI), DstReg) 6143 .addReg(mainDstReg).addMBB(mainMBB) 6144 .addReg(restoreDstReg).addMBB(thisMBB); 6145 6146 MI->eraseFromParent(); 6147 return sinkMBB; 6148 } 6149 6150 MachineBasicBlock * 6151 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 6152 MachineBasicBlock *MBB) const { 6153 DebugLoc DL = MI->getDebugLoc(); 6154 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6155 6156 MachineFunction *MF = MBB->getParent(); 6157 MachineRegisterInfo &MRI = MF->getRegInfo(); 6158 6159 // Memory Reference 6160 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6161 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6162 6163 MVT PVT = getPointerTy(); 6164 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6165 "Invalid Pointer Size!"); 6166 6167 const TargetRegisterClass *RC = 6168 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6169 unsigned Tmp = MRI.createVirtualRegister(RC); 6170 // Since FP is only updated here but NOT referenced, it's treated as GPR. 6171 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 6172 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 6173 6174 MachineInstrBuilder MIB; 6175 6176 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6177 const int64_t SPOffset = 2 * PVT.getStoreSize(); 6178 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6179 6180 unsigned BufReg = MI->getOperand(0).getReg(); 6181 6182 // Reload FP (the jumped-to function may not have had a 6183 // frame pointer, and if so, then its r31 will be restored 6184 // as necessary). 6185 if (PVT == MVT::i64) { 6186 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 6187 .addImm(0) 6188 .addReg(BufReg); 6189 } else { 6190 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 6191 .addImm(0) 6192 .addReg(BufReg); 6193 } 6194 MIB.setMemRefs(MMOBegin, MMOEnd); 6195 6196 // Reload IP 6197 if (PVT == MVT::i64) { 6198 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 6199 .addImm(LabelOffset) 6200 .addReg(BufReg); 6201 } else { 6202 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 6203 .addImm(LabelOffset) 6204 .addReg(BufReg); 6205 } 6206 MIB.setMemRefs(MMOBegin, MMOEnd); 6207 6208 // Reload SP 6209 if (PVT == MVT::i64) { 6210 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 6211 .addImm(SPOffset) 6212 .addReg(BufReg); 6213 } else { 6214 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 6215 .addImm(SPOffset) 6216 .addReg(BufReg); 6217 } 6218 MIB.setMemRefs(MMOBegin, MMOEnd); 6219 6220 // FIXME: When we also support base pointers, that register must also be 6221 // restored here. 6222 6223 // Reload TOC 6224 if (PVT == MVT::i64 && PPCSubTarget.isSVR4ABI()) { 6225 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 6226 .addImm(TOCOffset) 6227 .addReg(BufReg); 6228 6229 MIB.setMemRefs(MMOBegin, MMOEnd); 6230 } 6231 6232 // Jump 6233 BuildMI(*MBB, MI, DL, 6234 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 6235 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 6236 6237 MI->eraseFromParent(); 6238 return MBB; 6239 } 6240 6241 MachineBasicBlock * 6242 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6243 MachineBasicBlock *BB) const { 6244 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 6245 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 6246 return emitEHSjLjSetJmp(MI, BB); 6247 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 6248 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 6249 return emitEHSjLjLongJmp(MI, BB); 6250 } 6251 6252 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6253 6254 // To "insert" these instructions we actually have to insert their 6255 // control-flow patterns. 6256 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6257 MachineFunction::iterator It = BB; 6258 ++It; 6259 6260 MachineFunction *F = BB->getParent(); 6261 6262 if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 6263 MI->getOpcode() == PPC::SELECT_CC_I8)) { 6264 SmallVector<MachineOperand, 2> Cond; 6265 Cond.push_back(MI->getOperand(4)); 6266 Cond.push_back(MI->getOperand(1)); 6267 6268 DebugLoc dl = MI->getDebugLoc(); 6269 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6270 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 6271 Cond, MI->getOperand(2).getReg(), 6272 MI->getOperand(3).getReg()); 6273 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 6274 MI->getOpcode() == PPC::SELECT_CC_I8 || 6275 MI->getOpcode() == PPC::SELECT_CC_F4 || 6276 MI->getOpcode() == PPC::SELECT_CC_F8 || 6277 MI->getOpcode() == PPC::SELECT_CC_VRRC) { 6278 6279 6280 // The incoming instruction knows the destination vreg to set, the 6281 // condition code register to branch on, the true/false values to 6282 // select between, and a branch opcode to use. 6283 6284 // thisMBB: 6285 // ... 6286 // TrueVal = ... 6287 // cmpTY ccX, r1, r2 6288 // bCC copy1MBB 6289 // fallthrough --> copy0MBB 6290 MachineBasicBlock *thisMBB = BB; 6291 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6292 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6293 unsigned SelectPred = MI->getOperand(4).getImm(); 6294 DebugLoc dl = MI->getDebugLoc(); 6295 F->insert(It, copy0MBB); 6296 F->insert(It, sinkMBB); 6297 6298 // Transfer the remainder of BB and its successor edges to sinkMBB. 6299 sinkMBB->splice(sinkMBB->begin(), BB, 6300 llvm::next(MachineBasicBlock::iterator(MI)), 6301 BB->end()); 6302 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6303 6304 // Next, add the true and fallthrough blocks as its successors. 6305 BB->addSuccessor(copy0MBB); 6306 BB->addSuccessor(sinkMBB); 6307 6308 BuildMI(BB, dl, TII->get(PPC::BCC)) 6309 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 6310 6311 // copy0MBB: 6312 // %FalseValue = ... 6313 // # fallthrough to sinkMBB 6314 BB = copy0MBB; 6315 6316 // Update machine-CFG edges 6317 BB->addSuccessor(sinkMBB); 6318 6319 // sinkMBB: 6320 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6321 // ... 6322 BB = sinkMBB; 6323 BuildMI(*BB, BB->begin(), dl, 6324 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 6325 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 6326 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6327 } 6328 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 6329 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 6330 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 6331 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 6332 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 6333 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 6334 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 6335 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 6336 6337 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 6338 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 6339 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 6340 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 6341 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 6342 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 6343 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 6344 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 6345 6346 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 6347 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 6348 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 6349 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 6350 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 6351 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 6352 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 6353 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 6354 6355 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 6356 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 6357 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 6358 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 6359 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 6360 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 6361 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 6362 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 6363 6364 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 6365 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 6366 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 6367 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 6368 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 6369 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 6370 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 6371 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 6372 6373 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 6374 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 6375 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 6376 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 6377 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 6378 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 6379 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 6380 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 6381 6382 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 6383 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 6384 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 6385 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 6386 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 6387 BB = EmitAtomicBinary(MI, BB, false, 0); 6388 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 6389 BB = EmitAtomicBinary(MI, BB, true, 0); 6390 6391 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 6392 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 6393 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 6394 6395 unsigned dest = MI->getOperand(0).getReg(); 6396 unsigned ptrA = MI->getOperand(1).getReg(); 6397 unsigned ptrB = MI->getOperand(2).getReg(); 6398 unsigned oldval = MI->getOperand(3).getReg(); 6399 unsigned newval = MI->getOperand(4).getReg(); 6400 DebugLoc dl = MI->getDebugLoc(); 6401 6402 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6403 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6404 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6405 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6406 F->insert(It, loop1MBB); 6407 F->insert(It, loop2MBB); 6408 F->insert(It, midMBB); 6409 F->insert(It, exitMBB); 6410 exitMBB->splice(exitMBB->begin(), BB, 6411 llvm::next(MachineBasicBlock::iterator(MI)), 6412 BB->end()); 6413 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6414 6415 // thisMBB: 6416 // ... 6417 // fallthrough --> loopMBB 6418 BB->addSuccessor(loop1MBB); 6419 6420 // loop1MBB: 6421 // l[wd]arx dest, ptr 6422 // cmp[wd] dest, oldval 6423 // bne- midMBB 6424 // loop2MBB: 6425 // st[wd]cx. newval, ptr 6426 // bne- loopMBB 6427 // b exitBB 6428 // midMBB: 6429 // st[wd]cx. dest, ptr 6430 // exitBB: 6431 BB = loop1MBB; 6432 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 6433 .addReg(ptrA).addReg(ptrB); 6434 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 6435 .addReg(oldval).addReg(dest); 6436 BuildMI(BB, dl, TII->get(PPC::BCC)) 6437 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6438 BB->addSuccessor(loop2MBB); 6439 BB->addSuccessor(midMBB); 6440 6441 BB = loop2MBB; 6442 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6443 .addReg(newval).addReg(ptrA).addReg(ptrB); 6444 BuildMI(BB, dl, TII->get(PPC::BCC)) 6445 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6446 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6447 BB->addSuccessor(loop1MBB); 6448 BB->addSuccessor(exitMBB); 6449 6450 BB = midMBB; 6451 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6452 .addReg(dest).addReg(ptrA).addReg(ptrB); 6453 BB->addSuccessor(exitMBB); 6454 6455 // exitMBB: 6456 // ... 6457 BB = exitMBB; 6458 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 6459 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 6460 // We must use 64-bit registers for addresses when targeting 64-bit, 6461 // since we're actually doing arithmetic on them. Other registers 6462 // can be 32-bit. 6463 bool is64bit = PPCSubTarget.isPPC64(); 6464 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 6465 6466 unsigned dest = MI->getOperand(0).getReg(); 6467 unsigned ptrA = MI->getOperand(1).getReg(); 6468 unsigned ptrB = MI->getOperand(2).getReg(); 6469 unsigned oldval = MI->getOperand(3).getReg(); 6470 unsigned newval = MI->getOperand(4).getReg(); 6471 DebugLoc dl = MI->getDebugLoc(); 6472 6473 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6474 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6475 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6476 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6477 F->insert(It, loop1MBB); 6478 F->insert(It, loop2MBB); 6479 F->insert(It, midMBB); 6480 F->insert(It, exitMBB); 6481 exitMBB->splice(exitMBB->begin(), BB, 6482 llvm::next(MachineBasicBlock::iterator(MI)), 6483 BB->end()); 6484 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6485 6486 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6487 const TargetRegisterClass *RC = 6488 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 6489 (const TargetRegisterClass *) &PPC::GPRCRegClass; 6490 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 6491 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 6492 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 6493 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 6494 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 6495 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 6496 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 6497 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 6498 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 6499 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 6500 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 6501 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 6502 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 6503 unsigned Ptr1Reg; 6504 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 6505 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 6506 // thisMBB: 6507 // ... 6508 // fallthrough --> loopMBB 6509 BB->addSuccessor(loop1MBB); 6510 6511 // The 4-byte load must be aligned, while a char or short may be 6512 // anywhere in the word. Hence all this nasty bookkeeping code. 6513 // add ptr1, ptrA, ptrB [copy if ptrA==0] 6514 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 6515 // xori shift, shift1, 24 [16] 6516 // rlwinm ptr, ptr1, 0, 0, 29 6517 // slw newval2, newval, shift 6518 // slw oldval2, oldval,shift 6519 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 6520 // slw mask, mask2, shift 6521 // and newval3, newval2, mask 6522 // and oldval3, oldval2, mask 6523 // loop1MBB: 6524 // lwarx tmpDest, ptr 6525 // and tmp, tmpDest, mask 6526 // cmpw tmp, oldval3 6527 // bne- midMBB 6528 // loop2MBB: 6529 // andc tmp2, tmpDest, mask 6530 // or tmp4, tmp2, newval3 6531 // stwcx. tmp4, ptr 6532 // bne- loop1MBB 6533 // b exitBB 6534 // midMBB: 6535 // stwcx. tmpDest, ptr 6536 // exitBB: 6537 // srw dest, tmpDest, shift 6538 if (ptrA != ZeroReg) { 6539 Ptr1Reg = RegInfo.createVirtualRegister(RC); 6540 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 6541 .addReg(ptrA).addReg(ptrB); 6542 } else { 6543 Ptr1Reg = ptrB; 6544 } 6545 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 6546 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 6547 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 6548 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 6549 if (is64bit) 6550 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 6551 .addReg(Ptr1Reg).addImm(0).addImm(61); 6552 else 6553 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 6554 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 6555 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 6556 .addReg(newval).addReg(ShiftReg); 6557 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 6558 .addReg(oldval).addReg(ShiftReg); 6559 if (is8bit) 6560 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 6561 else { 6562 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 6563 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 6564 .addReg(Mask3Reg).addImm(65535); 6565 } 6566 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 6567 .addReg(Mask2Reg).addReg(ShiftReg); 6568 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 6569 .addReg(NewVal2Reg).addReg(MaskReg); 6570 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 6571 .addReg(OldVal2Reg).addReg(MaskReg); 6572 6573 BB = loop1MBB; 6574 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 6575 .addReg(ZeroReg).addReg(PtrReg); 6576 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 6577 .addReg(TmpDestReg).addReg(MaskReg); 6578 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 6579 .addReg(TmpReg).addReg(OldVal3Reg); 6580 BuildMI(BB, dl, TII->get(PPC::BCC)) 6581 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6582 BB->addSuccessor(loop2MBB); 6583 BB->addSuccessor(midMBB); 6584 6585 BB = loop2MBB; 6586 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 6587 .addReg(TmpDestReg).addReg(MaskReg); 6588 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 6589 .addReg(Tmp2Reg).addReg(NewVal3Reg); 6590 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 6591 .addReg(ZeroReg).addReg(PtrReg); 6592 BuildMI(BB, dl, TII->get(PPC::BCC)) 6593 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6594 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6595 BB->addSuccessor(loop1MBB); 6596 BB->addSuccessor(exitMBB); 6597 6598 BB = midMBB; 6599 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 6600 .addReg(ZeroReg).addReg(PtrReg); 6601 BB->addSuccessor(exitMBB); 6602 6603 // exitMBB: 6604 // ... 6605 BB = exitMBB; 6606 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 6607 .addReg(ShiftReg); 6608 } else if (MI->getOpcode() == PPC::FADDrtz) { 6609 // This pseudo performs an FADD with rounding mode temporarily forced 6610 // to round-to-zero. We emit this via custom inserter since the FPSCR 6611 // is not modeled at the SelectionDAG level. 6612 unsigned Dest = MI->getOperand(0).getReg(); 6613 unsigned Src1 = MI->getOperand(1).getReg(); 6614 unsigned Src2 = MI->getOperand(2).getReg(); 6615 DebugLoc dl = MI->getDebugLoc(); 6616 6617 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6618 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 6619 6620 // Save FPSCR value. 6621 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 6622 6623 // Set rounding mode to round-to-zero. 6624 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 6625 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 6626 6627 // Perform addition. 6628 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 6629 6630 // Restore FPSCR value. 6631 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg); 6632 } else if (MI->getOpcode() == PPC::FRINDrint || 6633 MI->getOpcode() == PPC::FRINSrint) { 6634 bool isf32 = MI->getOpcode() == PPC::FRINSrint; 6635 unsigned Dest = MI->getOperand(0).getReg(); 6636 unsigned Src = MI->getOperand(1).getReg(); 6637 DebugLoc dl = MI->getDebugLoc(); 6638 6639 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6640 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 6641 6642 // Perform the rounding. 6643 BuildMI(*BB, MI, dl, TII->get(isf32 ? PPC::FRINS : PPC::FRIND), Dest) 6644 .addReg(Src); 6645 6646 // Compare the results. 6647 BuildMI(*BB, MI, dl, TII->get(isf32 ? PPC::FCMPUS : PPC::FCMPUD), CRReg) 6648 .addReg(Dest).addReg(Src); 6649 6650 // If the results were not equal, then set the FPSCR XX bit. 6651 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6652 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6653 F->insert(It, midMBB); 6654 F->insert(It, exitMBB); 6655 exitMBB->splice(exitMBB->begin(), BB, 6656 llvm::next(MachineBasicBlock::iterator(MI)), 6657 BB->end()); 6658 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6659 6660 BuildMI(*BB, MI, dl, TII->get(PPC::BCC)) 6661 .addImm(PPC::PRED_EQ).addReg(CRReg).addMBB(exitMBB); 6662 6663 BB->addSuccessor(midMBB); 6664 BB->addSuccessor(exitMBB); 6665 6666 BB = midMBB; 6667 6668 // Set the FPSCR XX bit (FE_INEXACT). Note that we cannot just set 6669 // the FI bit here because that will not automatically set XX also, 6670 // and XX is what libm interprets as the FE_INEXACT flag. 6671 BuildMI(BB, dl, TII->get(PPC::MTFSB1)).addImm(/* 38 - 32 = */ 6); 6672 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6673 6674 BB->addSuccessor(exitMBB); 6675 6676 BB = exitMBB; 6677 } else { 6678 llvm_unreachable("Unexpected instr type to insert"); 6679 } 6680 6681 MI->eraseFromParent(); // The pseudo instruction is gone now. 6682 return BB; 6683 } 6684 6685 //===----------------------------------------------------------------------===// 6686 // Target Optimization Hooks 6687 //===----------------------------------------------------------------------===// 6688 6689 SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op, 6690 DAGCombinerInfo &DCI) const { 6691 if (DCI.isAfterLegalizeVectorOps()) 6692 return SDValue(); 6693 6694 EVT VT = Op.getValueType(); 6695 6696 if ((VT == MVT::f32 && PPCSubTarget.hasFRES()) || 6697 (VT == MVT::f64 && PPCSubTarget.hasFRE()) || 6698 (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { 6699 6700 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 6701 // For the reciprocal, we need to find the zero of the function: 6702 // F(X) = A X - 1 [which has a zero at X = 1/A] 6703 // => 6704 // X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form 6705 // does not require additional intermediate precision] 6706 6707 // Convergence is quadratic, so we essentially double the number of digits 6708 // correct after every iteration. The minimum architected relative 6709 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 6710 // 23 digits and double has 52 digits. 6711 int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; 6712 if (VT.getScalarType() == MVT::f64) 6713 ++Iterations; 6714 6715 SelectionDAG &DAG = DCI.DAG; 6716 SDLoc dl(Op); 6717 6718 SDValue FPOne = 6719 DAG.getConstantFP(1.0, VT.getScalarType()); 6720 if (VT.isVector()) { 6721 assert(VT.getVectorNumElements() == 4 && 6722 "Unknown vector type"); 6723 FPOne = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 6724 FPOne, FPOne, FPOne, FPOne); 6725 } 6726 6727 SDValue Est = DAG.getNode(PPCISD::FRE, dl, VT, Op); 6728 DCI.AddToWorklist(Est.getNode()); 6729 6730 // Newton iterations: Est = Est + Est (1 - Arg * Est) 6731 for (int i = 0; i < Iterations; ++i) { 6732 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Op, Est); 6733 DCI.AddToWorklist(NewEst.getNode()); 6734 6735 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPOne, NewEst); 6736 DCI.AddToWorklist(NewEst.getNode()); 6737 6738 NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 6739 DCI.AddToWorklist(NewEst.getNode()); 6740 6741 Est = DAG.getNode(ISD::FADD, dl, VT, Est, NewEst); 6742 DCI.AddToWorklist(Est.getNode()); 6743 } 6744 6745 return Est; 6746 } 6747 6748 return SDValue(); 6749 } 6750 6751 SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op, 6752 DAGCombinerInfo &DCI) const { 6753 if (DCI.isAfterLegalizeVectorOps()) 6754 return SDValue(); 6755 6756 EVT VT = Op.getValueType(); 6757 6758 if ((VT == MVT::f32 && PPCSubTarget.hasFRSQRTES()) || 6759 (VT == MVT::f64 && PPCSubTarget.hasFRSQRTE()) || 6760 (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { 6761 6762 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 6763 // For the reciprocal sqrt, we need to find the zero of the function: 6764 // F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 6765 // => 6766 // X_{i+1} = X_i (1.5 - A X_i^2 / 2) 6767 // As a result, we precompute A/2 prior to the iteration loop. 6768 6769 // Convergence is quadratic, so we essentially double the number of digits 6770 // correct after every iteration. The minimum architected relative 6771 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 6772 // 23 digits and double has 52 digits. 6773 int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; 6774 if (VT.getScalarType() == MVT::f64) 6775 ++Iterations; 6776 6777 SelectionDAG &DAG = DCI.DAG; 6778 SDLoc dl(Op); 6779 6780 SDValue FPThreeHalves = 6781 DAG.getConstantFP(1.5, VT.getScalarType()); 6782 if (VT.isVector()) { 6783 assert(VT.getVectorNumElements() == 4 && 6784 "Unknown vector type"); 6785 FPThreeHalves = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 6786 FPThreeHalves, FPThreeHalves, 6787 FPThreeHalves, FPThreeHalves); 6788 } 6789 6790 SDValue Est = DAG.getNode(PPCISD::FRSQRTE, dl, VT, Op); 6791 DCI.AddToWorklist(Est.getNode()); 6792 6793 // We now need 0.5*Arg which we can write as (1.5*Arg - Arg) so that 6794 // this entire sequence requires only one FP constant. 6795 SDValue HalfArg = DAG.getNode(ISD::FMUL, dl, VT, FPThreeHalves, Op); 6796 DCI.AddToWorklist(HalfArg.getNode()); 6797 6798 HalfArg = DAG.getNode(ISD::FSUB, dl, VT, HalfArg, Op); 6799 DCI.AddToWorklist(HalfArg.getNode()); 6800 6801 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est) 6802 for (int i = 0; i < Iterations; ++i) { 6803 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, Est); 6804 DCI.AddToWorklist(NewEst.getNode()); 6805 6806 NewEst = DAG.getNode(ISD::FMUL, dl, VT, HalfArg, NewEst); 6807 DCI.AddToWorklist(NewEst.getNode()); 6808 6809 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPThreeHalves, NewEst); 6810 DCI.AddToWorklist(NewEst.getNode()); 6811 6812 Est = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 6813 DCI.AddToWorklist(Est.getNode()); 6814 } 6815 6816 return Est; 6817 } 6818 6819 return SDValue(); 6820 } 6821 6822 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 6823 // not enforce equality of the chain operands. 6824 static bool isConsecutiveLS(LSBaseSDNode *LS, LSBaseSDNode *Base, 6825 unsigned Bytes, int Dist, 6826 SelectionDAG &DAG) { 6827 EVT VT = LS->getMemoryVT(); 6828 if (VT.getSizeInBits() / 8 != Bytes) 6829 return false; 6830 6831 SDValue Loc = LS->getBasePtr(); 6832 SDValue BaseLoc = Base->getBasePtr(); 6833 if (Loc.getOpcode() == ISD::FrameIndex) { 6834 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6835 return false; 6836 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6837 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6838 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6839 int FS = MFI->getObjectSize(FI); 6840 int BFS = MFI->getObjectSize(BFI); 6841 if (FS != BFS || FS != (int)Bytes) return false; 6842 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 6843 } 6844 6845 // Handle X+C 6846 if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 6847 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 6848 return true; 6849 6850 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6851 const GlobalValue *GV1 = NULL; 6852 const GlobalValue *GV2 = NULL; 6853 int64_t Offset1 = 0; 6854 int64_t Offset2 = 0; 6855 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 6856 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 6857 if (isGA1 && isGA2 && GV1 == GV2) 6858 return Offset1 == (Offset2 + Dist*Bytes); 6859 return false; 6860 } 6861 6862 // Return true is there is a nearyby consecutive load to the one provided 6863 // (regardless of alignment). We search up and down the chain, looking though 6864 // token factors and other loads (but nothing else). As a result, a true 6865 // results indicates that it is safe to create a new consecutive load adjacent 6866 // to the load provided. 6867 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 6868 SDValue Chain = LD->getChain(); 6869 EVT VT = LD->getMemoryVT(); 6870 6871 SmallSet<SDNode *, 16> LoadRoots; 6872 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 6873 SmallSet<SDNode *, 16> Visited; 6874 6875 // First, search up the chain, branching to follow all token-factor operands. 6876 // If we find a consecutive load, then we're done, otherwise, record all 6877 // nodes just above the top-level loads and token factors. 6878 while (!Queue.empty()) { 6879 SDNode *ChainNext = Queue.pop_back_val(); 6880 if (!Visited.insert(ChainNext)) 6881 continue; 6882 6883 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(ChainNext)) { 6884 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 6885 return true; 6886 6887 if (!Visited.count(ChainLD->getChain().getNode())) 6888 Queue.push_back(ChainLD->getChain().getNode()); 6889 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 6890 for (SDNode::op_iterator O = ChainNext->op_begin(), 6891 OE = ChainNext->op_end(); O != OE; ++O) 6892 if (!Visited.count(O->getNode())) 6893 Queue.push_back(O->getNode()); 6894 } else 6895 LoadRoots.insert(ChainNext); 6896 } 6897 6898 // Second, search down the chain, starting from the top-level nodes recorded 6899 // in the first phase. These top-level nodes are the nodes just above all 6900 // loads and token factors. Starting with their uses, recursively look though 6901 // all loads (just the chain uses) and token factors to find a consecutive 6902 // load. 6903 Visited.clear(); 6904 Queue.clear(); 6905 6906 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 6907 IE = LoadRoots.end(); I != IE; ++I) { 6908 Queue.push_back(*I); 6909 6910 while (!Queue.empty()) { 6911 SDNode *LoadRoot = Queue.pop_back_val(); 6912 if (!Visited.insert(LoadRoot)) 6913 continue; 6914 6915 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(LoadRoot)) 6916 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 6917 return true; 6918 6919 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 6920 UE = LoadRoot->use_end(); UI != UE; ++UI) 6921 if (((isa<LoadSDNode>(*UI) && 6922 cast<LoadSDNode>(*UI)->getChain().getNode() == LoadRoot) || 6923 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 6924 Queue.push_back(*UI); 6925 } 6926 } 6927 6928 return false; 6929 } 6930 6931 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 6932 DAGCombinerInfo &DCI) const { 6933 const TargetMachine &TM = getTargetMachine(); 6934 SelectionDAG &DAG = DCI.DAG; 6935 SDLoc dl(N); 6936 switch (N->getOpcode()) { 6937 default: break; 6938 case PPCISD::SHL: 6939 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6940 if (C->isNullValue()) // 0 << V -> 0. 6941 return N->getOperand(0); 6942 } 6943 break; 6944 case PPCISD::SRL: 6945 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6946 if (C->isNullValue()) // 0 >>u V -> 0. 6947 return N->getOperand(0); 6948 } 6949 break; 6950 case PPCISD::SRA: 6951 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6952 if (C->isNullValue() || // 0 >>s V -> 0. 6953 C->isAllOnesValue()) // -1 >>s V -> -1. 6954 return N->getOperand(0); 6955 } 6956 break; 6957 case ISD::FDIV: { 6958 assert(TM.Options.UnsafeFPMath && 6959 "Reciprocal estimates require UnsafeFPMath"); 6960 6961 if (N->getOperand(1).getOpcode() == ISD::FSQRT) { 6962 SDValue RV = 6963 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0), DCI); 6964 if (RV.getNode() != 0) { 6965 DCI.AddToWorklist(RV.getNode()); 6966 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6967 N->getOperand(0), RV); 6968 } 6969 } else if (N->getOperand(1).getOpcode() == ISD::FP_EXTEND && 6970 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 6971 SDValue RV = 6972 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 6973 DCI); 6974 if (RV.getNode() != 0) { 6975 DCI.AddToWorklist(RV.getNode()); 6976 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N->getOperand(1)), 6977 N->getValueType(0), RV); 6978 DCI.AddToWorklist(RV.getNode()); 6979 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6980 N->getOperand(0), RV); 6981 } 6982 } else if (N->getOperand(1).getOpcode() == ISD::FP_ROUND && 6983 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 6984 SDValue RV = 6985 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 6986 DCI); 6987 if (RV.getNode() != 0) { 6988 DCI.AddToWorklist(RV.getNode()); 6989 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N->getOperand(1)), 6990 N->getValueType(0), RV, 6991 N->getOperand(1).getOperand(1)); 6992 DCI.AddToWorklist(RV.getNode()); 6993 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6994 N->getOperand(0), RV); 6995 } 6996 } 6997 6998 SDValue RV = DAGCombineFastRecip(N->getOperand(1), DCI); 6999 if (RV.getNode() != 0) { 7000 DCI.AddToWorklist(RV.getNode()); 7001 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7002 N->getOperand(0), RV); 7003 } 7004 7005 } 7006 break; 7007 case ISD::FSQRT: { 7008 assert(TM.Options.UnsafeFPMath && 7009 "Reciprocal estimates require UnsafeFPMath"); 7010 7011 // Compute this as 1/(1/sqrt(X)), which is the reciprocal of the 7012 // reciprocal sqrt. 7013 SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(0), DCI); 7014 if (RV.getNode() != 0) { 7015 DCI.AddToWorklist(RV.getNode()); 7016 RV = DAGCombineFastRecip(RV, DCI); 7017 if (RV.getNode() != 0) 7018 return RV; 7019 } 7020 7021 } 7022 break; 7023 case ISD::SINT_TO_FP: 7024 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 7025 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 7026 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 7027 // We allow the src/dst to be either f32/f64, but the intermediate 7028 // type must be i64. 7029 if (N->getOperand(0).getValueType() == MVT::i64 && 7030 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 7031 SDValue Val = N->getOperand(0).getOperand(0); 7032 if (Val.getValueType() == MVT::f32) { 7033 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 7034 DCI.AddToWorklist(Val.getNode()); 7035 } 7036 7037 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val); 7038 DCI.AddToWorklist(Val.getNode()); 7039 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val); 7040 DCI.AddToWorklist(Val.getNode()); 7041 if (N->getValueType(0) == MVT::f32) { 7042 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 7043 DAG.getIntPtrConstant(0)); 7044 DCI.AddToWorklist(Val.getNode()); 7045 } 7046 return Val; 7047 } else if (N->getOperand(0).getValueType() == MVT::i32) { 7048 // If the intermediate type is i32, we can avoid the load/store here 7049 // too. 7050 } 7051 } 7052 } 7053 break; 7054 case ISD::STORE: 7055 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 7056 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 7057 !cast<StoreSDNode>(N)->isTruncatingStore() && 7058 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 7059 N->getOperand(1).getValueType() == MVT::i32 && 7060 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 7061 SDValue Val = N->getOperand(1).getOperand(0); 7062 if (Val.getValueType() == MVT::f32) { 7063 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 7064 DCI.AddToWorklist(Val.getNode()); 7065 } 7066 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 7067 DCI.AddToWorklist(Val.getNode()); 7068 7069 SDValue Ops[] = { 7070 N->getOperand(0), Val, N->getOperand(2), 7071 DAG.getValueType(N->getOperand(1).getValueType()) 7072 }; 7073 7074 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7075 DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), 7076 cast<StoreSDNode>(N)->getMemoryVT(), 7077 cast<StoreSDNode>(N)->getMemOperand()); 7078 DCI.AddToWorklist(Val.getNode()); 7079 return Val; 7080 } 7081 7082 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 7083 if (cast<StoreSDNode>(N)->isUnindexed() && 7084 N->getOperand(1).getOpcode() == ISD::BSWAP && 7085 N->getOperand(1).getNode()->hasOneUse() && 7086 (N->getOperand(1).getValueType() == MVT::i32 || 7087 N->getOperand(1).getValueType() == MVT::i16 || 7088 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 7089 TM.getSubtarget<PPCSubtarget>().isPPC64() && 7090 N->getOperand(1).getValueType() == MVT::i64))) { 7091 SDValue BSwapOp = N->getOperand(1).getOperand(0); 7092 // Do an any-extend to 32-bits if this is a half-word input. 7093 if (BSwapOp.getValueType() == MVT::i16) 7094 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 7095 7096 SDValue Ops[] = { 7097 N->getOperand(0), BSwapOp, N->getOperand(2), 7098 DAG.getValueType(N->getOperand(1).getValueType()) 7099 }; 7100 return 7101 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 7102 Ops, array_lengthof(Ops), 7103 cast<StoreSDNode>(N)->getMemoryVT(), 7104 cast<StoreSDNode>(N)->getMemOperand()); 7105 } 7106 break; 7107 case ISD::LOAD: { 7108 LoadSDNode *LD = cast<LoadSDNode>(N); 7109 EVT VT = LD->getValueType(0); 7110 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 7111 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty); 7112 if (ISD::isNON_EXTLoad(N) && VT.isVector() && 7113 TM.getSubtarget<PPCSubtarget>().hasAltivec() && 7114 DCI.getDAGCombineLevel() == AfterLegalizeTypes && 7115 LD->getAlignment() < ABIAlignment) { 7116 // This is a type-legal unaligned Altivec load. 7117 SDValue Chain = LD->getChain(); 7118 SDValue Ptr = LD->getBasePtr(); 7119 7120 // This implements the loading of unaligned vectors as described in 7121 // the venerable Apple Velocity Engine overview. Specifically: 7122 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 7123 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 7124 // 7125 // The general idea is to expand a sequence of one or more unaligned 7126 // loads into a alignment-based permutation-control instruction (lvsl), 7127 // a series of regular vector loads (which always truncate their 7128 // input address to an aligned address), and a series of permutations. 7129 // The results of these permutations are the requested loaded values. 7130 // The trick is that the last "extra" load is not taken from the address 7131 // you might suspect (sizeof(vector) bytes after the last requested 7132 // load), but rather sizeof(vector) - 1 bytes after the last 7133 // requested vector. The point of this is to avoid a page fault if the 7134 // base address happend to be aligned. This works because if the base 7135 // address is aligned, then adding less than a full vector length will 7136 // cause the last vector in the sequence to be (re)loaded. Otherwise, 7137 // the next vector will be fetched as you might suspect was necessary. 7138 7139 // We might be able to reuse the permutation generation from 7140 // a different base address offset from this one by an aligned amount. 7141 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 7142 // optimization later. 7143 SDValue PermCntl = BuildIntrinsicOp(Intrinsic::ppc_altivec_lvsl, Ptr, 7144 DAG, dl, MVT::v16i8); 7145 7146 // Refine the alignment of the original load (a "new" load created here 7147 // which was identical to the first except for the alignment would be 7148 // merged with the existing node regardless). 7149 MachineFunction &MF = DAG.getMachineFunction(); 7150 MachineMemOperand *MMO = 7151 MF.getMachineMemOperand(LD->getPointerInfo(), 7152 LD->getMemOperand()->getFlags(), 7153 LD->getMemoryVT().getStoreSize(), 7154 ABIAlignment); 7155 LD->refineAlignment(MMO); 7156 SDValue BaseLoad = SDValue(LD, 0); 7157 7158 // Note that the value of IncOffset (which is provided to the next 7159 // load's pointer info offset value, and thus used to calculate the 7160 // alignment), and the value of IncValue (which is actually used to 7161 // increment the pointer value) are different! This is because we 7162 // require the next load to appear to be aligned, even though it 7163 // is actually offset from the base pointer by a lesser amount. 7164 int IncOffset = VT.getSizeInBits() / 8; 7165 int IncValue = IncOffset; 7166 7167 // Walk (both up and down) the chain looking for another load at the real 7168 // (aligned) offset (the alignment of the other load does not matter in 7169 // this case). If found, then do not use the offset reduction trick, as 7170 // that will prevent the loads from being later combined (as they would 7171 // otherwise be duplicates). 7172 if (!findConsecutiveLoad(LD, DAG)) 7173 --IncValue; 7174 7175 SDValue Increment = DAG.getConstant(IncValue, getPointerTy()); 7176 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 7177 7178 SDValue ExtraLoad = 7179 DAG.getLoad(VT, dl, Chain, Ptr, 7180 LD->getPointerInfo().getWithOffset(IncOffset), 7181 LD->isVolatile(), LD->isNonTemporal(), 7182 LD->isInvariant(), ABIAlignment); 7183 7184 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7185 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 7186 7187 if (BaseLoad.getValueType() != MVT::v4i32) 7188 BaseLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, BaseLoad); 7189 7190 if (ExtraLoad.getValueType() != MVT::v4i32) 7191 ExtraLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ExtraLoad); 7192 7193 SDValue Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm, 7194 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 7195 7196 if (VT != MVT::v4i32) 7197 Perm = DAG.getNode(ISD::BITCAST, dl, VT, Perm); 7198 7199 // Now we need to be really careful about how we update the users of the 7200 // original load. We cannot just call DCI.CombineTo (or 7201 // DAG.ReplaceAllUsesWith for that matter), because the load still has 7202 // uses created here (the permutation for example) that need to stay. 7203 SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7204 while (UI != UE) { 7205 SDUse &Use = UI.getUse(); 7206 SDNode *User = *UI; 7207 // Note: BaseLoad is checked here because it might not be N, but a 7208 // bitcast of N. 7209 if (User == Perm.getNode() || User == BaseLoad.getNode() || 7210 User == TF.getNode() || Use.getResNo() > 1) { 7211 ++UI; 7212 continue; 7213 } 7214 7215 SDValue To = Use.getResNo() ? TF : Perm; 7216 ++UI; 7217 7218 SmallVector<SDValue, 8> Ops; 7219 for (SDNode::op_iterator O = User->op_begin(), 7220 OE = User->op_end(); O != OE; ++O) { 7221 if (*O == Use) 7222 Ops.push_back(To); 7223 else 7224 Ops.push_back(*O); 7225 } 7226 7227 DAG.UpdateNodeOperands(User, Ops.data(), Ops.size()); 7228 } 7229 7230 return SDValue(N, 0); 7231 } 7232 } 7233 break; 7234 case ISD::INTRINSIC_WO_CHAIN: 7235 if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() == 7236 Intrinsic::ppc_altivec_lvsl && 7237 N->getOperand(1)->getOpcode() == ISD::ADD) { 7238 SDValue Add = N->getOperand(1); 7239 7240 if (DAG.MaskedValueIsZero(Add->getOperand(1), 7241 APInt::getAllOnesValue(4 /* 16 byte alignment */).zext( 7242 Add.getValueType().getScalarType().getSizeInBits()))) { 7243 SDNode *BasePtr = Add->getOperand(0).getNode(); 7244 for (SDNode::use_iterator UI = BasePtr->use_begin(), 7245 UE = BasePtr->use_end(); UI != UE; ++UI) { 7246 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 7247 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == 7248 Intrinsic::ppc_altivec_lvsl) { 7249 // We've found another LVSL, and this address if an aligned 7250 // multiple of that one. The results will be the same, so use the 7251 // one we've just found instead. 7252 7253 return SDValue(*UI, 0); 7254 } 7255 } 7256 } 7257 } 7258 case ISD::BSWAP: 7259 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 7260 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 7261 N->getOperand(0).hasOneUse() && 7262 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 7263 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 7264 TM.getSubtarget<PPCSubtarget>().isPPC64() && 7265 N->getValueType(0) == MVT::i64))) { 7266 SDValue Load = N->getOperand(0); 7267 LoadSDNode *LD = cast<LoadSDNode>(Load); 7268 // Create the byte-swapping load. 7269 SDValue Ops[] = { 7270 LD->getChain(), // Chain 7271 LD->getBasePtr(), // Ptr 7272 DAG.getValueType(N->getValueType(0)) // VT 7273 }; 7274 SDValue BSLoad = 7275 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 7276 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 7277 MVT::i64 : MVT::i32, MVT::Other), 7278 Ops, 3, LD->getMemoryVT(), LD->getMemOperand()); 7279 7280 // If this is an i16 load, insert the truncate. 7281 SDValue ResVal = BSLoad; 7282 if (N->getValueType(0) == MVT::i16) 7283 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 7284 7285 // First, combine the bswap away. This makes the value produced by the 7286 // load dead. 7287 DCI.CombineTo(N, ResVal); 7288 7289 // Next, combine the load away, we give it a bogus result value but a real 7290 // chain result. The result value is dead because the bswap is dead. 7291 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 7292 7293 // Return N so it doesn't get rechecked! 7294 return SDValue(N, 0); 7295 } 7296 7297 break; 7298 case PPCISD::VCMP: { 7299 // If a VCMPo node already exists with exactly the same operands as this 7300 // node, use its result instead of this node (VCMPo computes both a CR6 and 7301 // a normal output). 7302 // 7303 if (!N->getOperand(0).hasOneUse() && 7304 !N->getOperand(1).hasOneUse() && 7305 !N->getOperand(2).hasOneUse()) { 7306 7307 // Scan all of the users of the LHS, looking for VCMPo's that match. 7308 SDNode *VCMPoNode = 0; 7309 7310 SDNode *LHSN = N->getOperand(0).getNode(); 7311 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 7312 UI != E; ++UI) 7313 if (UI->getOpcode() == PPCISD::VCMPo && 7314 UI->getOperand(1) == N->getOperand(1) && 7315 UI->getOperand(2) == N->getOperand(2) && 7316 UI->getOperand(0) == N->getOperand(0)) { 7317 VCMPoNode = *UI; 7318 break; 7319 } 7320 7321 // If there is no VCMPo node, or if the flag value has a single use, don't 7322 // transform this. 7323 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 7324 break; 7325 7326 // Look at the (necessarily single) use of the flag value. If it has a 7327 // chain, this transformation is more complex. Note that multiple things 7328 // could use the value result, which we should ignore. 7329 SDNode *FlagUser = 0; 7330 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 7331 FlagUser == 0; ++UI) { 7332 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 7333 SDNode *User = *UI; 7334 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 7335 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 7336 FlagUser = User; 7337 break; 7338 } 7339 } 7340 } 7341 7342 // If the user is a MFOCRF instruction, we know this is safe. 7343 // Otherwise we give up for right now. 7344 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 7345 return SDValue(VCMPoNode, 0); 7346 } 7347 break; 7348 } 7349 case ISD::BR_CC: { 7350 // If this is a branch on an altivec predicate comparison, lower this so 7351 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 7352 // lowering is done pre-legalize, because the legalizer lowers the predicate 7353 // compare down to code that is difficult to reassemble. 7354 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 7355 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 7356 7357 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 7358 // value. If so, pass-through the AND to get to the intrinsic. 7359 if (LHS.getOpcode() == ISD::AND && 7360 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 7361 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 7362 Intrinsic::ppc_is_decremented_ctr_nonzero && 7363 isa<ConstantSDNode>(LHS.getOperand(1)) && 7364 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 7365 isZero()) 7366 LHS = LHS.getOperand(0); 7367 7368 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 7369 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 7370 Intrinsic::ppc_is_decremented_ctr_nonzero && 7371 isa<ConstantSDNode>(RHS)) { 7372 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 7373 "Counter decrement comparison is not EQ or NE"); 7374 7375 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 7376 bool isBDNZ = (CC == ISD::SETEQ && Val) || 7377 (CC == ISD::SETNE && !Val); 7378 7379 // We now need to make the intrinsic dead (it cannot be instruction 7380 // selected). 7381 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 7382 assert(LHS.getNode()->hasOneUse() && 7383 "Counter decrement has more than one use"); 7384 7385 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 7386 N->getOperand(0), N->getOperand(4)); 7387 } 7388 7389 int CompareOpc; 7390 bool isDot; 7391 7392 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 7393 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 7394 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 7395 assert(isDot && "Can't compare against a vector result!"); 7396 7397 // If this is a comparison against something other than 0/1, then we know 7398 // that the condition is never/always true. 7399 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 7400 if (Val != 0 && Val != 1) { 7401 if (CC == ISD::SETEQ) // Cond never true, remove branch. 7402 return N->getOperand(0); 7403 // Always !=, turn it into an unconditional branch. 7404 return DAG.getNode(ISD::BR, dl, MVT::Other, 7405 N->getOperand(0), N->getOperand(4)); 7406 } 7407 7408 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 7409 7410 // Create the PPCISD altivec 'dot' comparison node. 7411 SDValue Ops[] = { 7412 LHS.getOperand(2), // LHS of compare 7413 LHS.getOperand(3), // RHS of compare 7414 DAG.getConstant(CompareOpc, MVT::i32) 7415 }; 7416 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 7417 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 7418 7419 // Unpack the result based on how the target uses it. 7420 PPC::Predicate CompOpc; 7421 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 7422 default: // Can't happen, don't crash on invalid number though. 7423 case 0: // Branch on the value of the EQ bit of CR6. 7424 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 7425 break; 7426 case 1: // Branch on the inverted value of the EQ bit of CR6. 7427 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 7428 break; 7429 case 2: // Branch on the value of the LT bit of CR6. 7430 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 7431 break; 7432 case 3: // Branch on the inverted value of the LT bit of CR6. 7433 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 7434 break; 7435 } 7436 7437 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 7438 DAG.getConstant(CompOpc, MVT::i32), 7439 DAG.getRegister(PPC::CR6, MVT::i32), 7440 N->getOperand(4), CompNode.getValue(1)); 7441 } 7442 break; 7443 } 7444 } 7445 7446 return SDValue(); 7447 } 7448 7449 //===----------------------------------------------------------------------===// 7450 // Inline Assembly Support 7451 //===----------------------------------------------------------------------===// 7452 7453 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 7454 APInt &KnownZero, 7455 APInt &KnownOne, 7456 const SelectionDAG &DAG, 7457 unsigned Depth) const { 7458 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 7459 switch (Op.getOpcode()) { 7460 default: break; 7461 case PPCISD::LBRX: { 7462 // lhbrx is known to have the top bits cleared out. 7463 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 7464 KnownZero = 0xFFFF0000; 7465 break; 7466 } 7467 case ISD::INTRINSIC_WO_CHAIN: { 7468 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 7469 default: break; 7470 case Intrinsic::ppc_altivec_vcmpbfp_p: 7471 case Intrinsic::ppc_altivec_vcmpeqfp_p: 7472 case Intrinsic::ppc_altivec_vcmpequb_p: 7473 case Intrinsic::ppc_altivec_vcmpequh_p: 7474 case Intrinsic::ppc_altivec_vcmpequw_p: 7475 case Intrinsic::ppc_altivec_vcmpgefp_p: 7476 case Intrinsic::ppc_altivec_vcmpgtfp_p: 7477 case Intrinsic::ppc_altivec_vcmpgtsb_p: 7478 case Intrinsic::ppc_altivec_vcmpgtsh_p: 7479 case Intrinsic::ppc_altivec_vcmpgtsw_p: 7480 case Intrinsic::ppc_altivec_vcmpgtub_p: 7481 case Intrinsic::ppc_altivec_vcmpgtuh_p: 7482 case Intrinsic::ppc_altivec_vcmpgtuw_p: 7483 KnownZero = ~1U; // All bits but the low one are known to be zero. 7484 break; 7485 } 7486 } 7487 } 7488 } 7489 7490 7491 /// getConstraintType - Given a constraint, return the type of 7492 /// constraint it is for this target. 7493 PPCTargetLowering::ConstraintType 7494 PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 7495 if (Constraint.size() == 1) { 7496 switch (Constraint[0]) { 7497 default: break; 7498 case 'b': 7499 case 'r': 7500 case 'f': 7501 case 'v': 7502 case 'y': 7503 return C_RegisterClass; 7504 case 'Z': 7505 // FIXME: While Z does indicate a memory constraint, it specifically 7506 // indicates an r+r address (used in conjunction with the 'y' modifier 7507 // in the replacement string). Currently, we're forcing the base 7508 // register to be r0 in the asm printer (which is interpreted as zero) 7509 // and forming the complete address in the second register. This is 7510 // suboptimal. 7511 return C_Memory; 7512 } 7513 } 7514 return TargetLowering::getConstraintType(Constraint); 7515 } 7516 7517 /// Examine constraint type and operand type and determine a weight value. 7518 /// This object must already have been set up with the operand type 7519 /// and the current alternative constraint selected. 7520 TargetLowering::ConstraintWeight 7521 PPCTargetLowering::getSingleConstraintMatchWeight( 7522 AsmOperandInfo &info, const char *constraint) const { 7523 ConstraintWeight weight = CW_Invalid; 7524 Value *CallOperandVal = info.CallOperandVal; 7525 // If we don't have a value, we can't do a match, 7526 // but allow it at the lowest weight. 7527 if (CallOperandVal == NULL) 7528 return CW_Default; 7529 Type *type = CallOperandVal->getType(); 7530 // Look at the constraint type. 7531 switch (*constraint) { 7532 default: 7533 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 7534 break; 7535 case 'b': 7536 if (type->isIntegerTy()) 7537 weight = CW_Register; 7538 break; 7539 case 'f': 7540 if (type->isFloatTy()) 7541 weight = CW_Register; 7542 break; 7543 case 'd': 7544 if (type->isDoubleTy()) 7545 weight = CW_Register; 7546 break; 7547 case 'v': 7548 if (type->isVectorTy()) 7549 weight = CW_Register; 7550 break; 7551 case 'y': 7552 weight = CW_Register; 7553 break; 7554 case 'Z': 7555 weight = CW_Memory; 7556 break; 7557 } 7558 return weight; 7559 } 7560 7561 std::pair<unsigned, const TargetRegisterClass*> 7562 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 7563 MVT VT) const { 7564 if (Constraint.size() == 1) { 7565 // GCC RS6000 Constraint Letters 7566 switch (Constraint[0]) { 7567 case 'b': // R1-R31 7568 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 7569 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 7570 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 7571 case 'r': // R0-R31 7572 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 7573 return std::make_pair(0U, &PPC::G8RCRegClass); 7574 return std::make_pair(0U, &PPC::GPRCRegClass); 7575 case 'f': 7576 if (VT == MVT::f32 || VT == MVT::i32) 7577 return std::make_pair(0U, &PPC::F4RCRegClass); 7578 if (VT == MVT::f64 || VT == MVT::i64) 7579 return std::make_pair(0U, &PPC::F8RCRegClass); 7580 break; 7581 case 'v': 7582 return std::make_pair(0U, &PPC::VRRCRegClass); 7583 case 'y': // crrc 7584 return std::make_pair(0U, &PPC::CRRCRegClass); 7585 } 7586 } 7587 7588 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 7589 } 7590 7591 7592 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 7593 /// vector. If it is invalid, don't add anything to Ops. 7594 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 7595 std::string &Constraint, 7596 std::vector<SDValue>&Ops, 7597 SelectionDAG &DAG) const { 7598 SDValue Result(0,0); 7599 7600 // Only support length 1 constraints. 7601 if (Constraint.length() > 1) return; 7602 7603 char Letter = Constraint[0]; 7604 switch (Letter) { 7605 default: break; 7606 case 'I': 7607 case 'J': 7608 case 'K': 7609 case 'L': 7610 case 'M': 7611 case 'N': 7612 case 'O': 7613 case 'P': { 7614 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 7615 if (!CST) return; // Must be an immediate to match. 7616 unsigned Value = CST->getZExtValue(); 7617 switch (Letter) { 7618 default: llvm_unreachable("Unknown constraint letter!"); 7619 case 'I': // "I" is a signed 16-bit constant. 7620 if ((short)Value == (int)Value) 7621 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7622 break; 7623 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 7624 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 7625 if ((short)Value == 0) 7626 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7627 break; 7628 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 7629 if ((Value >> 16) == 0) 7630 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7631 break; 7632 case 'M': // "M" is a constant that is greater than 31. 7633 if (Value > 31) 7634 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7635 break; 7636 case 'N': // "N" is a positive constant that is an exact power of two. 7637 if ((int)Value > 0 && isPowerOf2_32(Value)) 7638 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7639 break; 7640 case 'O': // "O" is the constant zero. 7641 if (Value == 0) 7642 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7643 break; 7644 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 7645 if ((short)-Value == (int)-Value) 7646 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7647 break; 7648 } 7649 break; 7650 } 7651 } 7652 7653 if (Result.getNode()) { 7654 Ops.push_back(Result); 7655 return; 7656 } 7657 7658 // Handle standard constraint letters. 7659 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 7660 } 7661 7662 // isLegalAddressingMode - Return true if the addressing mode represented 7663 // by AM is legal for this target, for a load/store of the specified type. 7664 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 7665 Type *Ty) const { 7666 // FIXME: PPC does not allow r+i addressing modes for vectors! 7667 7668 // PPC allows a sign-extended 16-bit immediate field. 7669 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 7670 return false; 7671 7672 // No global is ever allowed as a base. 7673 if (AM.BaseGV) 7674 return false; 7675 7676 // PPC only support r+r, 7677 switch (AM.Scale) { 7678 case 0: // "r+i" or just "i", depending on HasBaseReg. 7679 break; 7680 case 1: 7681 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 7682 return false; 7683 // Otherwise we have r+r or r+i. 7684 break; 7685 case 2: 7686 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 7687 return false; 7688 // Allow 2*r as r+r. 7689 break; 7690 default: 7691 // No other scales are supported. 7692 return false; 7693 } 7694 7695 return true; 7696 } 7697 7698 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 7699 SelectionDAG &DAG) const { 7700 MachineFunction &MF = DAG.getMachineFunction(); 7701 MachineFrameInfo *MFI = MF.getFrameInfo(); 7702 MFI->setReturnAddressIsTaken(true); 7703 7704 SDLoc dl(Op); 7705 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7706 7707 // Make sure the function does not optimize away the store of the RA to 7708 // the stack. 7709 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 7710 FuncInfo->setLRStoreRequired(); 7711 bool isPPC64 = PPCSubTarget.isPPC64(); 7712 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 7713 7714 if (Depth > 0) { 7715 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 7716 SDValue Offset = 7717 7718 DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI), 7719 isPPC64? MVT::i64 : MVT::i32); 7720 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7721 DAG.getNode(ISD::ADD, dl, getPointerTy(), 7722 FrameAddr, Offset), 7723 MachinePointerInfo(), false, false, false, 0); 7724 } 7725 7726 // Just load the return address off the stack. 7727 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 7728 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7729 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 7730 } 7731 7732 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 7733 SelectionDAG &DAG) const { 7734 SDLoc dl(Op); 7735 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7736 7737 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 7738 bool isPPC64 = PtrVT == MVT::i64; 7739 7740 MachineFunction &MF = DAG.getMachineFunction(); 7741 MachineFrameInfo *MFI = MF.getFrameInfo(); 7742 MFI->setFrameAddressIsTaken(true); 7743 7744 // Naked functions never have a frame pointer, and so we use r1. For all 7745 // other functions, this decision must be delayed until during PEI. 7746 unsigned FrameReg; 7747 if (MF.getFunction()->getAttributes().hasAttribute( 7748 AttributeSet::FunctionIndex, Attribute::Naked)) 7749 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 7750 else 7751 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 7752 7753 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 7754 PtrVT); 7755 while (Depth--) 7756 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 7757 FrameAddr, MachinePointerInfo(), false, false, 7758 false, 0); 7759 return FrameAddr; 7760 } 7761 7762 bool 7763 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 7764 // The PowerPC target isn't yet aware of offsets. 7765 return false; 7766 } 7767 7768 /// getOptimalMemOpType - Returns the target specific optimal type for load 7769 /// and store operations as a result of memset, memcpy, and memmove 7770 /// lowering. If DstAlign is zero that means it's safe to destination 7771 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 7772 /// means there isn't a need to check it against alignment requirement, 7773 /// probably because the source does not need to be loaded. If 'IsMemset' is 7774 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 7775 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 7776 /// source is constant so it does not need to be loaded. 7777 /// It returns EVT::Other if the type should be determined using generic 7778 /// target-independent logic. 7779 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 7780 unsigned DstAlign, unsigned SrcAlign, 7781 bool IsMemset, bool ZeroMemset, 7782 bool MemcpyStrSrc, 7783 MachineFunction &MF) const { 7784 if (this->PPCSubTarget.isPPC64()) { 7785 return MVT::i64; 7786 } else { 7787 return MVT::i32; 7788 } 7789 } 7790 7791 bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 7792 bool *Fast) const { 7793 if (DisablePPCUnaligned) 7794 return false; 7795 7796 // PowerPC supports unaligned memory access for simple non-vector types. 7797 // Although accessing unaligned addresses is not as efficient as accessing 7798 // aligned addresses, it is generally more efficient than manual expansion, 7799 // and generally only traps for software emulation when crossing page 7800 // boundaries. 7801 7802 if (!VT.isSimple()) 7803 return false; 7804 7805 if (VT.getSimpleVT().isVector()) 7806 return false; 7807 7808 if (VT == MVT::ppcf128) 7809 return false; 7810 7811 if (Fast) 7812 *Fast = true; 7813 7814 return true; 7815 } 7816 7817 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 7818 VT = VT.getScalarType(); 7819 7820 if (!VT.isSimple()) 7821 return false; 7822 7823 switch (VT.getSimpleVT().SimpleTy) { 7824 case MVT::f32: 7825 case MVT::f64: 7826 return true; 7827 default: 7828 break; 7829 } 7830 7831 return false; 7832 } 7833 7834 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 7835 if (DisableILPPref) 7836 return TargetLowering::getSchedulingPreference(N); 7837 7838 return Sched::ILP; 7839 } 7840 7841