1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCMachineFunctionInfo.h" 17 #include "PPCPerfectShuffle.h" 18 #include "PPCTargetMachine.h" 19 #include "PPCTargetObjectFile.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/StringSwitch.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/SelectionDAG.h" 29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 30 #include "llvm/IR/CallingConv.h" 31 #include "llvm/IR/Constants.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/MathExtras.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include "llvm/Target/TargetOptions.h" 40 using namespace llvm; 41 42 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 43 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 44 45 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 46 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 47 48 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 49 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 50 51 // FIXME: Remove this once the bug has been fixed! 52 extern cl::opt<bool> ANDIGlueBug; 53 54 static TargetLoweringObjectFile *createTLOF(const Triple &TT) { 55 // If it isn't a Mach-O file then it's going to be a linux ELF 56 // object file. 57 if (TT.isOSDarwin()) 58 return new TargetLoweringObjectFileMachO(); 59 60 return new PPC64LinuxTargetObjectFile(); 61 } 62 63 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 64 : TargetLowering(TM, createTLOF(Triple(TM.getTargetTriple()))), 65 Subtarget(*TM.getSubtargetImpl()) { 66 setPow2DivIsCheap(); 67 68 // Use _setjmp/_longjmp instead of setjmp/longjmp. 69 setUseUnderscoreSetJmp(true); 70 setUseUnderscoreLongJmp(true); 71 72 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 73 // arguments are at least 4/8 bytes aligned. 74 bool isPPC64 = Subtarget.isPPC64(); 75 setMinStackArgumentAlignment(isPPC64 ? 8:4); 76 77 // Set up the register classes. 78 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 79 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 80 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 81 82 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 83 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 84 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 85 86 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 87 88 // PowerPC has pre-inc load and store's. 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 91 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 92 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 93 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 96 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 97 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 98 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 99 100 if (Subtarget.useCRBits()) { 101 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 102 103 if (isPPC64 || Subtarget.hasFPCVT()) { 104 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 105 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 106 isPPC64 ? MVT::i64 : MVT::i32); 107 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 108 AddPromotedToType (ISD::UINT_TO_FP, MVT::i1, 109 isPPC64 ? MVT::i64 : MVT::i32); 110 } else { 111 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 112 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 113 } 114 115 // PowerPC does not support direct load / store of condition registers 116 setOperationAction(ISD::LOAD, MVT::i1, Custom); 117 setOperationAction(ISD::STORE, MVT::i1, Custom); 118 119 // FIXME: Remove this once the ANDI glue bug is fixed: 120 if (ANDIGlueBug) 121 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 122 123 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 124 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 125 setTruncStoreAction(MVT::i64, MVT::i1, Expand); 126 setTruncStoreAction(MVT::i32, MVT::i1, Expand); 127 setTruncStoreAction(MVT::i16, MVT::i1, Expand); 128 setTruncStoreAction(MVT::i8, MVT::i1, Expand); 129 130 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 131 } 132 133 // This is used in the ppcf128->int sequence. Note it has different semantics 134 // from FP_ROUND: that rounds to nearest, this rounds to zero. 135 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 136 137 // We do not currently implement these libm ops for PowerPC. 138 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 139 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 140 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 141 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 142 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 143 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 144 145 // PowerPC has no SREM/UREM instructions 146 setOperationAction(ISD::SREM, MVT::i32, Expand); 147 setOperationAction(ISD::UREM, MVT::i32, Expand); 148 setOperationAction(ISD::SREM, MVT::i64, Expand); 149 setOperationAction(ISD::UREM, MVT::i64, Expand); 150 151 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 152 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 153 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 154 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 155 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 156 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 157 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 158 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 159 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 160 161 // We don't support sin/cos/sqrt/fmod/pow 162 setOperationAction(ISD::FSIN , MVT::f64, Expand); 163 setOperationAction(ISD::FCOS , MVT::f64, Expand); 164 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 165 setOperationAction(ISD::FREM , MVT::f64, Expand); 166 setOperationAction(ISD::FPOW , MVT::f64, Expand); 167 setOperationAction(ISD::FMA , MVT::f64, Legal); 168 setOperationAction(ISD::FSIN , MVT::f32, Expand); 169 setOperationAction(ISD::FCOS , MVT::f32, Expand); 170 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 171 setOperationAction(ISD::FREM , MVT::f32, Expand); 172 setOperationAction(ISD::FPOW , MVT::f32, Expand); 173 setOperationAction(ISD::FMA , MVT::f32, Legal); 174 175 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 176 177 // If we're enabling GP optimizations, use hardware square root 178 if (!Subtarget.hasFSQRT() && 179 !(TM.Options.UnsafeFPMath && 180 Subtarget.hasFRSQRTE() && Subtarget.hasFRE())) 181 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 182 183 if (!Subtarget.hasFSQRT() && 184 !(TM.Options.UnsafeFPMath && 185 Subtarget.hasFRSQRTES() && Subtarget.hasFRES())) 186 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 187 188 if (Subtarget.hasFCPSGN()) { 189 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 190 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 191 } else { 192 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 193 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 194 } 195 196 if (Subtarget.hasFPRND()) { 197 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 198 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 199 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 200 setOperationAction(ISD::FROUND, MVT::f64, Legal); 201 202 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 203 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 204 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 205 setOperationAction(ISD::FROUND, MVT::f32, Legal); 206 } 207 208 // PowerPC does not have BSWAP, CTPOP or CTTZ 209 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 210 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 211 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 212 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 213 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 214 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 215 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 216 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 217 218 if (Subtarget.hasPOPCNTD()) { 219 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 220 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 221 } else { 222 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 223 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 224 } 225 226 // PowerPC does not have ROTR 227 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 228 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 229 230 if (!Subtarget.useCRBits()) { 231 // PowerPC does not have Select 232 setOperationAction(ISD::SELECT, MVT::i32, Expand); 233 setOperationAction(ISD::SELECT, MVT::i64, Expand); 234 setOperationAction(ISD::SELECT, MVT::f32, Expand); 235 setOperationAction(ISD::SELECT, MVT::f64, Expand); 236 } 237 238 // PowerPC wants to turn select_cc of FP into fsel when possible. 239 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 240 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 241 242 // PowerPC wants to optimize integer setcc a bit 243 if (!Subtarget.useCRBits()) 244 setOperationAction(ISD::SETCC, MVT::i32, Custom); 245 246 // PowerPC does not have BRCOND which requires SetCC 247 if (!Subtarget.useCRBits()) 248 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 249 250 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 251 252 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 253 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 254 255 // PowerPC does not have [U|S]INT_TO_FP 256 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 257 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 258 259 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 260 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 261 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 262 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 263 264 // We cannot sextinreg(i1). Expand to shifts. 265 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 266 267 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 268 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 269 // support continuation, user-level threading, and etc.. As a result, no 270 // other SjLj exception interfaces are implemented and please don't build 271 // your own exception handling based on them. 272 // LLVM/Clang supports zero-cost DWARF exception handling. 273 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 274 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 275 276 // We want to legalize GlobalAddress and ConstantPool nodes into the 277 // appropriate instructions to materialize the address. 278 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 279 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 280 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 281 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 282 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 283 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 284 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 285 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 286 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 287 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 288 289 // TRAP is legal. 290 setOperationAction(ISD::TRAP, MVT::Other, Legal); 291 292 // TRAMPOLINE is custom lowered. 293 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 294 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 295 296 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 297 setOperationAction(ISD::VASTART , MVT::Other, Custom); 298 299 if (Subtarget.isSVR4ABI()) { 300 if (isPPC64) { 301 // VAARG always uses double-word chunks, so promote anything smaller. 302 setOperationAction(ISD::VAARG, MVT::i1, Promote); 303 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 304 setOperationAction(ISD::VAARG, MVT::i8, Promote); 305 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 306 setOperationAction(ISD::VAARG, MVT::i16, Promote); 307 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 308 setOperationAction(ISD::VAARG, MVT::i32, Promote); 309 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 310 setOperationAction(ISD::VAARG, MVT::Other, Expand); 311 } else { 312 // VAARG is custom lowered with the 32-bit SVR4 ABI. 313 setOperationAction(ISD::VAARG, MVT::Other, Custom); 314 setOperationAction(ISD::VAARG, MVT::i64, Custom); 315 } 316 } else 317 setOperationAction(ISD::VAARG, MVT::Other, Expand); 318 319 if (Subtarget.isSVR4ABI() && !isPPC64) 320 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 321 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 322 else 323 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 324 325 // Use the default implementation. 326 setOperationAction(ISD::VAEND , MVT::Other, Expand); 327 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 328 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 329 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 330 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 331 332 // We want to custom lower some of our intrinsics. 333 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 334 335 // To handle counter-based loop conditions. 336 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 337 338 // Comparisons that require checking two conditions. 339 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 340 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 341 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 342 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 343 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 344 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 345 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 346 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 347 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 348 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 349 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 350 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 351 352 if (Subtarget.has64BitSupport()) { 353 // They also have instructions for converting between i64 and fp. 354 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 355 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 356 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 357 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 358 // This is just the low 32 bits of a (signed) fp->i64 conversion. 359 // We cannot do this with Promote because i64 is not a legal type. 360 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 361 362 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 363 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 364 } else { 365 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 366 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 367 } 368 369 // With the instructions enabled under FPCVT, we can do everything. 370 if (Subtarget.hasFPCVT()) { 371 if (Subtarget.has64BitSupport()) { 372 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 373 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 374 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 375 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 376 } 377 378 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 379 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 380 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 381 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 382 } 383 384 if (Subtarget.use64BitRegs()) { 385 // 64-bit PowerPC implementations can support i64 types directly 386 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 387 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 388 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 389 // 64-bit PowerPC wants to expand i128 shifts itself. 390 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 391 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 392 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 393 } else { 394 // 32-bit PowerPC wants to expand i64 shifts itself. 395 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 396 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 397 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 398 } 399 400 if (Subtarget.hasAltivec()) { 401 // First set operation action for all vector types to expand. Then we 402 // will selectively turn on ones that can be effectively codegen'd. 403 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 404 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 405 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 406 407 // add/sub are legal for all supported vector VT's. 408 setOperationAction(ISD::ADD , VT, Legal); 409 setOperationAction(ISD::SUB , VT, Legal); 410 411 // We promote all shuffles to v16i8. 412 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 413 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 414 415 // We promote all non-typed operations to v4i32. 416 setOperationAction(ISD::AND , VT, Promote); 417 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 418 setOperationAction(ISD::OR , VT, Promote); 419 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 420 setOperationAction(ISD::XOR , VT, Promote); 421 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 422 setOperationAction(ISD::LOAD , VT, Promote); 423 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 424 setOperationAction(ISD::SELECT, VT, Promote); 425 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 426 setOperationAction(ISD::STORE, VT, Promote); 427 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 428 429 // No other operations are legal. 430 setOperationAction(ISD::MUL , VT, Expand); 431 setOperationAction(ISD::SDIV, VT, Expand); 432 setOperationAction(ISD::SREM, VT, Expand); 433 setOperationAction(ISD::UDIV, VT, Expand); 434 setOperationAction(ISD::UREM, VT, Expand); 435 setOperationAction(ISD::FDIV, VT, Expand); 436 setOperationAction(ISD::FREM, VT, Expand); 437 setOperationAction(ISD::FNEG, VT, Expand); 438 setOperationAction(ISD::FSQRT, VT, Expand); 439 setOperationAction(ISD::FLOG, VT, Expand); 440 setOperationAction(ISD::FLOG10, VT, Expand); 441 setOperationAction(ISD::FLOG2, VT, Expand); 442 setOperationAction(ISD::FEXP, VT, Expand); 443 setOperationAction(ISD::FEXP2, VT, Expand); 444 setOperationAction(ISD::FSIN, VT, Expand); 445 setOperationAction(ISD::FCOS, VT, Expand); 446 setOperationAction(ISD::FABS, VT, Expand); 447 setOperationAction(ISD::FPOWI, VT, Expand); 448 setOperationAction(ISD::FFLOOR, VT, Expand); 449 setOperationAction(ISD::FCEIL, VT, Expand); 450 setOperationAction(ISD::FTRUNC, VT, Expand); 451 setOperationAction(ISD::FRINT, VT, Expand); 452 setOperationAction(ISD::FNEARBYINT, VT, Expand); 453 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 454 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 455 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 456 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 457 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 458 setOperationAction(ISD::UDIVREM, VT, Expand); 459 setOperationAction(ISD::SDIVREM, VT, Expand); 460 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 461 setOperationAction(ISD::FPOW, VT, Expand); 462 setOperationAction(ISD::BSWAP, VT, Expand); 463 setOperationAction(ISD::CTPOP, VT, Expand); 464 setOperationAction(ISD::CTLZ, VT, Expand); 465 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 466 setOperationAction(ISD::CTTZ, VT, Expand); 467 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 468 setOperationAction(ISD::VSELECT, VT, Expand); 469 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 470 471 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 472 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) { 473 MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j; 474 setTruncStoreAction(VT, InnerVT, Expand); 475 } 476 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 477 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 478 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 479 } 480 481 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 482 // with merges, splats, etc. 483 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 484 485 setOperationAction(ISD::AND , MVT::v4i32, Legal); 486 setOperationAction(ISD::OR , MVT::v4i32, Legal); 487 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 488 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 489 setOperationAction(ISD::SELECT, MVT::v4i32, 490 Subtarget.useCRBits() ? Legal : Expand); 491 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 492 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 493 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 494 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 495 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 496 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 497 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 498 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 499 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 500 501 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 502 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 503 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 504 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 505 506 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 507 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 508 509 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 510 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 511 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 512 } 513 514 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 515 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 516 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 517 518 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 519 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 520 521 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 522 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 523 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 524 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 525 526 // Altivec does not contain unordered floating-point compare instructions 527 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 528 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 529 setCondCodeAction(ISD::SETUGT, MVT::v4f32, Expand); 530 setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand); 531 setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand); 532 setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand); 533 534 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 535 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 536 537 if (Subtarget.hasVSX()) { 538 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 539 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 540 541 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 542 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 543 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 544 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 545 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 546 547 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 548 549 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 550 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 551 552 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 553 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 554 555 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 556 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 557 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 558 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 559 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 560 561 // Share the Altivec comparison restrictions. 562 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 563 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 564 setCondCodeAction(ISD::SETUGT, MVT::v2f64, Expand); 565 setCondCodeAction(ISD::SETUGE, MVT::v2f64, Expand); 566 setCondCodeAction(ISD::SETULT, MVT::v2f64, Expand); 567 setCondCodeAction(ISD::SETULE, MVT::v2f64, Expand); 568 569 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 570 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 571 572 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 573 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 574 575 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 576 577 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 578 579 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 580 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 581 582 // VSX v2i64 only supports non-arithmetic operations. 583 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 584 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 585 586 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 587 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 588 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 589 590 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 591 592 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 593 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 594 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 595 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 596 597 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 598 599 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 600 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 601 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 602 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 603 604 // Vector operation legalization checks the result type of 605 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 606 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 607 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 608 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 609 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 610 611 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 612 } 613 } 614 615 if (Subtarget.has64BitSupport()) { 616 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 617 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 618 } 619 620 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); 621 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); 622 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 623 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 624 625 setBooleanContents(ZeroOrOneBooleanContent); 626 // Altivec instructions set fields to all zeros or all ones. 627 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 628 629 if (isPPC64) { 630 setStackPointerRegisterToSaveRestore(PPC::X1); 631 setExceptionPointerRegister(PPC::X3); 632 setExceptionSelectorRegister(PPC::X4); 633 } else { 634 setStackPointerRegisterToSaveRestore(PPC::R1); 635 setExceptionPointerRegister(PPC::R3); 636 setExceptionSelectorRegister(PPC::R4); 637 } 638 639 // We have target-specific dag combine patterns for the following nodes: 640 setTargetDAGCombine(ISD::SINT_TO_FP); 641 setTargetDAGCombine(ISD::LOAD); 642 setTargetDAGCombine(ISD::STORE); 643 setTargetDAGCombine(ISD::BR_CC); 644 if (Subtarget.useCRBits()) 645 setTargetDAGCombine(ISD::BRCOND); 646 setTargetDAGCombine(ISD::BSWAP); 647 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 648 649 setTargetDAGCombine(ISD::SIGN_EXTEND); 650 setTargetDAGCombine(ISD::ZERO_EXTEND); 651 setTargetDAGCombine(ISD::ANY_EXTEND); 652 653 if (Subtarget.useCRBits()) { 654 setTargetDAGCombine(ISD::TRUNCATE); 655 setTargetDAGCombine(ISD::SETCC); 656 setTargetDAGCombine(ISD::SELECT_CC); 657 } 658 659 // Use reciprocal estimates. 660 if (TM.Options.UnsafeFPMath) { 661 setTargetDAGCombine(ISD::FDIV); 662 setTargetDAGCombine(ISD::FSQRT); 663 } 664 665 // Darwin long double math library functions have $LDBL128 appended. 666 if (Subtarget.isDarwin()) { 667 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 668 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 669 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 670 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 671 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 672 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 673 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 674 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 675 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 676 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 677 } 678 679 // With 32 condition bits, we don't need to sink (and duplicate) compares 680 // aggressively in CodeGenPrep. 681 if (Subtarget.useCRBits()) 682 setHasMultipleConditionRegisters(); 683 684 setMinFunctionAlignment(2); 685 if (Subtarget.isDarwin()) 686 setPrefFunctionAlignment(4); 687 688 if (isPPC64 && Subtarget.isJITCodeModel()) 689 // Temporary workaround for the inability of PPC64 JIT to handle jump 690 // tables. 691 setSupportJumpTables(false); 692 693 setInsertFencesForAtomic(true); 694 695 if (Subtarget.enableMachineScheduler()) 696 setSchedulingPreference(Sched::Source); 697 else 698 setSchedulingPreference(Sched::Hybrid); 699 700 computeRegisterProperties(); 701 702 // The Freescale cores does better with aggressive inlining of memcpy and 703 // friends. Gcc uses same threshold of 128 bytes (= 32 word stores). 704 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 705 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 706 MaxStoresPerMemset = 32; 707 MaxStoresPerMemsetOptSize = 16; 708 MaxStoresPerMemcpy = 32; 709 MaxStoresPerMemcpyOptSize = 8; 710 MaxStoresPerMemmove = 32; 711 MaxStoresPerMemmoveOptSize = 8; 712 713 setPrefFunctionAlignment(4); 714 } 715 } 716 717 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 718 /// the desired ByVal argument alignment. 719 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 720 unsigned MaxMaxAlign) { 721 if (MaxAlign == MaxMaxAlign) 722 return; 723 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 724 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 725 MaxAlign = 32; 726 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 727 MaxAlign = 16; 728 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 729 unsigned EltAlign = 0; 730 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 731 if (EltAlign > MaxAlign) 732 MaxAlign = EltAlign; 733 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 734 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 735 unsigned EltAlign = 0; 736 getMaxByValAlign(STy->getElementType(i), EltAlign, MaxMaxAlign); 737 if (EltAlign > MaxAlign) 738 MaxAlign = EltAlign; 739 if (MaxAlign == MaxMaxAlign) 740 break; 741 } 742 } 743 } 744 745 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 746 /// function arguments in the caller parameter area. 747 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { 748 // Darwin passes everything on 4 byte boundary. 749 if (Subtarget.isDarwin()) 750 return 4; 751 752 // 16byte and wider vectors are passed on 16byte boundary. 753 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 754 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 755 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 756 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 757 return Align; 758 } 759 760 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 761 switch (Opcode) { 762 default: return nullptr; 763 case PPCISD::FSEL: return "PPCISD::FSEL"; 764 case PPCISD::FCFID: return "PPCISD::FCFID"; 765 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 766 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 767 case PPCISD::FRE: return "PPCISD::FRE"; 768 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 769 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 770 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 771 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 772 case PPCISD::VPERM: return "PPCISD::VPERM"; 773 case PPCISD::Hi: return "PPCISD::Hi"; 774 case PPCISD::Lo: return "PPCISD::Lo"; 775 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 776 case PPCISD::LOAD: return "PPCISD::LOAD"; 777 case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; 778 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 779 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 780 case PPCISD::SRL: return "PPCISD::SRL"; 781 case PPCISD::SRA: return "PPCISD::SRA"; 782 case PPCISD::SHL: return "PPCISD::SHL"; 783 case PPCISD::CALL: return "PPCISD::CALL"; 784 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 785 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 786 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 787 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 788 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 789 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 790 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 791 case PPCISD::VCMP: return "PPCISD::VCMP"; 792 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 793 case PPCISD::LBRX: return "PPCISD::LBRX"; 794 case PPCISD::STBRX: return "PPCISD::STBRX"; 795 case PPCISD::LARX: return "PPCISD::LARX"; 796 case PPCISD::STCX: return "PPCISD::STCX"; 797 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 798 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 799 case PPCISD::BDZ: return "PPCISD::BDZ"; 800 case PPCISD::MFFS: return "PPCISD::MFFS"; 801 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 802 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 803 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 804 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 805 case PPCISD::ADDIS_TOC_HA: return "PPCISD::ADDIS_TOC_HA"; 806 case PPCISD::LD_TOC_L: return "PPCISD::LD_TOC_L"; 807 case PPCISD::ADDI_TOC_L: return "PPCISD::ADDI_TOC_L"; 808 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 809 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 810 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 811 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 812 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 813 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 814 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 815 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 816 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 817 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 818 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 819 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 820 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 821 case PPCISD::SC: return "PPCISD::SC"; 822 } 823 } 824 825 EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 826 if (!VT.isVector()) 827 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 828 return VT.changeVectorElementTypeToInteger(); 829 } 830 831 //===----------------------------------------------------------------------===// 832 // Node matching predicates, for use by the tblgen matching code. 833 //===----------------------------------------------------------------------===// 834 835 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 836 static bool isFloatingPointZero(SDValue Op) { 837 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 838 return CFP->getValueAPF().isZero(); 839 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 840 // Maybe this has already been legalized into the constant pool? 841 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 842 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 843 return CFP->getValueAPF().isZero(); 844 } 845 return false; 846 } 847 848 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 849 /// true if Op is undef or if it matches the specified value. 850 static bool isConstantOrUndef(int Op, int Val) { 851 return Op < 0 || Op == Val; 852 } 853 854 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 855 /// VPKUHUM instruction. 856 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary, 857 SelectionDAG &DAG) { 858 unsigned j = DAG.getTarget().getDataLayout()->isLittleEndian() ? 0 : 1; 859 if (!isUnary) { 860 for (unsigned i = 0; i != 16; ++i) 861 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j)) 862 return false; 863 } else { 864 for (unsigned i = 0; i != 8; ++i) 865 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 866 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 867 return false; 868 } 869 return true; 870 } 871 872 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 873 /// VPKUWUM instruction. 874 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary, 875 SelectionDAG &DAG) { 876 unsigned j, k; 877 if (DAG.getTarget().getDataLayout()->isLittleEndian()) { 878 j = 0; 879 k = 1; 880 } else { 881 j = 2; 882 k = 3; 883 } 884 if (!isUnary) { 885 for (unsigned i = 0; i != 16; i += 2) 886 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 887 !isConstantOrUndef(N->getMaskElt(i+1), i*2+k)) 888 return false; 889 } else { 890 for (unsigned i = 0; i != 8; i += 2) 891 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 892 !isConstantOrUndef(N->getMaskElt(i+1), i*2+k) || 893 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 894 !isConstantOrUndef(N->getMaskElt(i+9), i*2+k)) 895 return false; 896 } 897 return true; 898 } 899 900 /// isVMerge - Common function, used to match vmrg* shuffles. 901 /// 902 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 903 unsigned LHSStart, unsigned RHSStart) { 904 if (N->getValueType(0) != MVT::v16i8) 905 return false; 906 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 907 "Unsupported merge size!"); 908 909 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 910 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 911 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 912 LHSStart+j+i*UnitSize) || 913 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 914 RHSStart+j+i*UnitSize)) 915 return false; 916 } 917 return true; 918 } 919 920 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 921 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 922 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 923 bool isUnary, SelectionDAG &DAG) { 924 if (DAG.getTarget().getDataLayout()->isLittleEndian()) { 925 if (!isUnary) 926 return isVMerge(N, UnitSize, 0, 16); 927 return isVMerge(N, UnitSize, 0, 0); 928 } else { 929 if (!isUnary) 930 return isVMerge(N, UnitSize, 8, 24); 931 return isVMerge(N, UnitSize, 8, 8); 932 } 933 } 934 935 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 936 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 937 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 938 bool isUnary, SelectionDAG &DAG) { 939 if (DAG.getTarget().getDataLayout()->isLittleEndian()) { 940 if (!isUnary) 941 return isVMerge(N, UnitSize, 8, 24); 942 return isVMerge(N, UnitSize, 8, 8); 943 } else { 944 if (!isUnary) 945 return isVMerge(N, UnitSize, 0, 16); 946 return isVMerge(N, UnitSize, 0, 0); 947 } 948 } 949 950 951 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 952 /// amount, otherwise return -1. 953 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary, SelectionDAG &DAG) { 954 if (N->getValueType(0) != MVT::v16i8) 955 return -1; 956 957 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 958 959 // Find the first non-undef value in the shuffle mask. 960 unsigned i; 961 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 962 /*search*/; 963 964 if (i == 16) return -1; // all undef. 965 966 // Otherwise, check to see if the rest of the elements are consecutively 967 // numbered from this value. 968 unsigned ShiftAmt = SVOp->getMaskElt(i); 969 if (ShiftAmt < i) return -1; 970 971 if (DAG.getTarget().getDataLayout()->isLittleEndian()) { 972 973 ShiftAmt += i; 974 975 if (!isUnary) { 976 // Check the rest of the elements to see if they are consecutive. 977 for (++i; i != 16; ++i) 978 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt - i)) 979 return -1; 980 } else { 981 // Check the rest of the elements to see if they are consecutive. 982 for (++i; i != 16; ++i) 983 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt - i) & 15)) 984 return -1; 985 } 986 987 } else { // Big Endian 988 989 ShiftAmt -= i; 990 991 if (!isUnary) { 992 // Check the rest of the elements to see if they are consecutive. 993 for (++i; i != 16; ++i) 994 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 995 return -1; 996 } else { 997 // Check the rest of the elements to see if they are consecutive. 998 for (++i; i != 16; ++i) 999 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1000 return -1; 1001 } 1002 } 1003 return ShiftAmt; 1004 } 1005 1006 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1007 /// specifies a splat of a single element that is suitable for input to 1008 /// VSPLTB/VSPLTH/VSPLTW. 1009 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1010 assert(N->getValueType(0) == MVT::v16i8 && 1011 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1012 1013 // This is a splat operation if each element of the permute is the same, and 1014 // if the value doesn't reference the second vector. 1015 unsigned ElementBase = N->getMaskElt(0); 1016 1017 // FIXME: Handle UNDEF elements too! 1018 if (ElementBase >= 16) 1019 return false; 1020 1021 // Check that the indices are consecutive, in the case of a multi-byte element 1022 // splatted with a v16i8 mask. 1023 for (unsigned i = 1; i != EltSize; ++i) 1024 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1025 return false; 1026 1027 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1028 if (N->getMaskElt(i) < 0) continue; 1029 for (unsigned j = 0; j != EltSize; ++j) 1030 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1031 return false; 1032 } 1033 return true; 1034 } 1035 1036 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 1037 /// are -0.0. 1038 bool PPC::isAllNegativeZeroVector(SDNode *N) { 1039 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 1040 1041 APInt APVal, APUndef; 1042 unsigned BitSize; 1043 bool HasAnyUndefs; 1044 1045 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) 1046 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 1047 return CFP->getValueAPF().isNegZero(); 1048 1049 return false; 1050 } 1051 1052 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1053 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1054 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1055 SelectionDAG &DAG) { 1056 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1057 assert(isSplatShuffleMask(SVOp, EltSize)); 1058 if (DAG.getTarget().getDataLayout()->isLittleEndian()) 1059 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1060 else 1061 return SVOp->getMaskElt(0) / EltSize; 1062 } 1063 1064 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1065 /// by using a vspltis[bhw] instruction of the specified element size, return 1066 /// the constant being splatted. The ByteSize field indicates the number of 1067 /// bytes of each element [124] -> [bhw]. 1068 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1069 SDValue OpVal(nullptr, 0); 1070 1071 // If ByteSize of the splat is bigger than the element size of the 1072 // build_vector, then we have a case where we are checking for a splat where 1073 // multiple elements of the buildvector are folded together into a single 1074 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1075 unsigned EltSize = 16/N->getNumOperands(); 1076 if (EltSize < ByteSize) { 1077 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1078 SDValue UniquedVals[4]; 1079 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1080 1081 // See if all of the elements in the buildvector agree across. 1082 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1083 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1084 // If the element isn't a constant, bail fully out. 1085 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1086 1087 1088 if (!UniquedVals[i&(Multiple-1)].getNode()) 1089 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1090 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1091 return SDValue(); // no match. 1092 } 1093 1094 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1095 // either constant or undef values that are identical for each chunk. See 1096 // if these chunks can form into a larger vspltis*. 1097 1098 // Check to see if all of the leading entries are either 0 or -1. If 1099 // neither, then this won't fit into the immediate field. 1100 bool LeadingZero = true; 1101 bool LeadingOnes = true; 1102 for (unsigned i = 0; i != Multiple-1; ++i) { 1103 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1104 1105 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 1106 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 1107 } 1108 // Finally, check the least significant entry. 1109 if (LeadingZero) { 1110 if (!UniquedVals[Multiple-1].getNode()) 1111 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 1112 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1113 if (Val < 16) 1114 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 1115 } 1116 if (LeadingOnes) { 1117 if (!UniquedVals[Multiple-1].getNode()) 1118 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 1119 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1120 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1121 return DAG.getTargetConstant(Val, MVT::i32); 1122 } 1123 1124 return SDValue(); 1125 } 1126 1127 // Check to see if this buildvec has a single non-undef value in its elements. 1128 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1129 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1130 if (!OpVal.getNode()) 1131 OpVal = N->getOperand(i); 1132 else if (OpVal != N->getOperand(i)) 1133 return SDValue(); 1134 } 1135 1136 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1137 1138 unsigned ValSizeInBytes = EltSize; 1139 uint64_t Value = 0; 1140 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1141 Value = CN->getZExtValue(); 1142 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1143 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1144 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1145 } 1146 1147 // If the splat value is larger than the element value, then we can never do 1148 // this splat. The only case that we could fit the replicated bits into our 1149 // immediate field for would be zero, and we prefer to use vxor for it. 1150 if (ValSizeInBytes < ByteSize) return SDValue(); 1151 1152 // If the element value is larger than the splat value, cut it in half and 1153 // check to see if the two halves are equal. Continue doing this until we 1154 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 1155 while (ValSizeInBytes > ByteSize) { 1156 ValSizeInBytes >>= 1; 1157 1158 // If the top half equals the bottom half, we're still ok. 1159 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 1160 (Value & ((1 << (8*ValSizeInBytes))-1))) 1161 return SDValue(); 1162 } 1163 1164 // Properly sign extend the value. 1165 int MaskVal = SignExtend32(Value, ByteSize * 8); 1166 1167 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1168 if (MaskVal == 0) return SDValue(); 1169 1170 // Finally, if this value fits in a 5 bit sext field, return it 1171 if (SignExtend32<5>(MaskVal) == MaskVal) 1172 return DAG.getTargetConstant(MaskVal, MVT::i32); 1173 return SDValue(); 1174 } 1175 1176 //===----------------------------------------------------------------------===// 1177 // Addressing Mode Selection 1178 //===----------------------------------------------------------------------===// 1179 1180 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1181 /// or 64-bit immediate, and if the value can be accurately represented as a 1182 /// sign extension from a 16-bit value. If so, this returns true and the 1183 /// immediate. 1184 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1185 if (!isa<ConstantSDNode>(N)) 1186 return false; 1187 1188 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1189 if (N->getValueType(0) == MVT::i32) 1190 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1191 else 1192 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1193 } 1194 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1195 return isIntS16Immediate(Op.getNode(), Imm); 1196 } 1197 1198 1199 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1200 /// can be represented as an indexed [r+r] operation. Returns false if it 1201 /// can be more efficiently represented with [r+imm]. 1202 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1203 SDValue &Index, 1204 SelectionDAG &DAG) const { 1205 short imm = 0; 1206 if (N.getOpcode() == ISD::ADD) { 1207 if (isIntS16Immediate(N.getOperand(1), imm)) 1208 return false; // r+i 1209 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1210 return false; // r+i 1211 1212 Base = N.getOperand(0); 1213 Index = N.getOperand(1); 1214 return true; 1215 } else if (N.getOpcode() == ISD::OR) { 1216 if (isIntS16Immediate(N.getOperand(1), imm)) 1217 return false; // r+i can fold it if we can. 1218 1219 // If this is an or of disjoint bitfields, we can codegen this as an add 1220 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1221 // disjoint. 1222 APInt LHSKnownZero, LHSKnownOne; 1223 APInt RHSKnownZero, RHSKnownOne; 1224 DAG.computeKnownBits(N.getOperand(0), 1225 LHSKnownZero, LHSKnownOne); 1226 1227 if (LHSKnownZero.getBoolValue()) { 1228 DAG.computeKnownBits(N.getOperand(1), 1229 RHSKnownZero, RHSKnownOne); 1230 // If all of the bits are known zero on the LHS or RHS, the add won't 1231 // carry. 1232 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1233 Base = N.getOperand(0); 1234 Index = N.getOperand(1); 1235 return true; 1236 } 1237 } 1238 } 1239 1240 return false; 1241 } 1242 1243 // If we happen to be doing an i64 load or store into a stack slot that has 1244 // less than a 4-byte alignment, then the frame-index elimination may need to 1245 // use an indexed load or store instruction (because the offset may not be a 1246 // multiple of 4). The extra register needed to hold the offset comes from the 1247 // register scavenger, and it is possible that the scavenger will need to use 1248 // an emergency spill slot. As a result, we need to make sure that a spill slot 1249 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1250 // stack slot. 1251 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1252 // FIXME: This does not handle the LWA case. 1253 if (VT != MVT::i64) 1254 return; 1255 1256 // NOTE: We'll exclude negative FIs here, which come from argument 1257 // lowering, because there are no known test cases triggering this problem 1258 // using packed structures (or similar). We can remove this exclusion if 1259 // we find such a test case. The reason why this is so test-case driven is 1260 // because this entire 'fixup' is only to prevent crashes (from the 1261 // register scavenger) on not-really-valid inputs. For example, if we have: 1262 // %a = alloca i1 1263 // %b = bitcast i1* %a to i64* 1264 // store i64* a, i64 b 1265 // then the store should really be marked as 'align 1', but is not. If it 1266 // were marked as 'align 1' then the indexed form would have been 1267 // instruction-selected initially, and the problem this 'fixup' is preventing 1268 // won't happen regardless. 1269 if (FrameIdx < 0) 1270 return; 1271 1272 MachineFunction &MF = DAG.getMachineFunction(); 1273 MachineFrameInfo *MFI = MF.getFrameInfo(); 1274 1275 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1276 if (Align >= 4) 1277 return; 1278 1279 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1280 FuncInfo->setHasNonRISpills(); 1281 } 1282 1283 /// Returns true if the address N can be represented by a base register plus 1284 /// a signed 16-bit displacement [r+imm], and if it is not better 1285 /// represented as reg+reg. If Aligned is true, only accept displacements 1286 /// suitable for STD and friends, i.e. multiples of 4. 1287 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1288 SDValue &Base, 1289 SelectionDAG &DAG, 1290 bool Aligned) const { 1291 // FIXME dl should come from parent load or store, not from address 1292 SDLoc dl(N); 1293 // If this can be more profitably realized as r+r, fail. 1294 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1295 return false; 1296 1297 if (N.getOpcode() == ISD::ADD) { 1298 short imm = 0; 1299 if (isIntS16Immediate(N.getOperand(1), imm) && 1300 (!Aligned || (imm & 3) == 0)) { 1301 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1302 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1303 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1304 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1305 } else { 1306 Base = N.getOperand(0); 1307 } 1308 return true; // [r+i] 1309 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1310 // Match LOAD (ADD (X, Lo(G))). 1311 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1312 && "Cannot handle constant offsets yet!"); 1313 Disp = N.getOperand(1).getOperand(0); // The global address. 1314 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1315 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1316 Disp.getOpcode() == ISD::TargetConstantPool || 1317 Disp.getOpcode() == ISD::TargetJumpTable); 1318 Base = N.getOperand(0); 1319 return true; // [&g+r] 1320 } 1321 } else if (N.getOpcode() == ISD::OR) { 1322 short imm = 0; 1323 if (isIntS16Immediate(N.getOperand(1), imm) && 1324 (!Aligned || (imm & 3) == 0)) { 1325 // If this is an or of disjoint bitfields, we can codegen this as an add 1326 // (for better address arithmetic) if the LHS and RHS of the OR are 1327 // provably disjoint. 1328 APInt LHSKnownZero, LHSKnownOne; 1329 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1330 1331 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1332 // If all of the bits are known zero on the LHS or RHS, the add won't 1333 // carry. 1334 Base = N.getOperand(0); 1335 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1336 return true; 1337 } 1338 } 1339 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1340 // Loading from a constant address. 1341 1342 // If this address fits entirely in a 16-bit sext immediate field, codegen 1343 // this as "d, 0" 1344 short Imm; 1345 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1346 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 1347 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1348 CN->getValueType(0)); 1349 return true; 1350 } 1351 1352 // Handle 32-bit sext immediates with LIS + addr mode. 1353 if ((CN->getValueType(0) == MVT::i32 || 1354 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1355 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1356 int Addr = (int)CN->getZExtValue(); 1357 1358 // Otherwise, break this down into an LIS + disp. 1359 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 1360 1361 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 1362 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1363 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1364 return true; 1365 } 1366 } 1367 1368 Disp = DAG.getTargetConstant(0, getPointerTy()); 1369 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1370 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1371 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1372 } else 1373 Base = N; 1374 return true; // [r+0] 1375 } 1376 1377 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1378 /// represented as an indexed [r+r] operation. 1379 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1380 SDValue &Index, 1381 SelectionDAG &DAG) const { 1382 // Check to see if we can easily represent this as an [r+r] address. This 1383 // will fail if it thinks that the address is more profitably represented as 1384 // reg+imm, e.g. where imm = 0. 1385 if (SelectAddressRegReg(N, Base, Index, DAG)) 1386 return true; 1387 1388 // If the operand is an addition, always emit this as [r+r], since this is 1389 // better (for code size, and execution, as the memop does the add for free) 1390 // than emitting an explicit add. 1391 if (N.getOpcode() == ISD::ADD) { 1392 Base = N.getOperand(0); 1393 Index = N.getOperand(1); 1394 return true; 1395 } 1396 1397 // Otherwise, do it the hard way, using R0 as the base register. 1398 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1399 N.getValueType()); 1400 Index = N; 1401 return true; 1402 } 1403 1404 /// getPreIndexedAddressParts - returns true by value, base pointer and 1405 /// offset pointer and addressing mode by reference if the node's address 1406 /// can be legally represented as pre-indexed load / store address. 1407 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1408 SDValue &Offset, 1409 ISD::MemIndexedMode &AM, 1410 SelectionDAG &DAG) const { 1411 if (DisablePPCPreinc) return false; 1412 1413 bool isLoad = true; 1414 SDValue Ptr; 1415 EVT VT; 1416 unsigned Alignment; 1417 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1418 Ptr = LD->getBasePtr(); 1419 VT = LD->getMemoryVT(); 1420 Alignment = LD->getAlignment(); 1421 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1422 Ptr = ST->getBasePtr(); 1423 VT = ST->getMemoryVT(); 1424 Alignment = ST->getAlignment(); 1425 isLoad = false; 1426 } else 1427 return false; 1428 1429 // PowerPC doesn't have preinc load/store instructions for vectors. 1430 if (VT.isVector()) 1431 return false; 1432 1433 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1434 1435 // Common code will reject creating a pre-inc form if the base pointer 1436 // is a frame index, or if N is a store and the base pointer is either 1437 // the same as or a predecessor of the value being stored. Check for 1438 // those situations here, and try with swapped Base/Offset instead. 1439 bool Swap = false; 1440 1441 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1442 Swap = true; 1443 else if (!isLoad) { 1444 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1445 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1446 Swap = true; 1447 } 1448 1449 if (Swap) 1450 std::swap(Base, Offset); 1451 1452 AM = ISD::PRE_INC; 1453 return true; 1454 } 1455 1456 // LDU/STU can only handle immediates that are a multiple of 4. 1457 if (VT != MVT::i64) { 1458 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1459 return false; 1460 } else { 1461 // LDU/STU need an address with at least 4-byte alignment. 1462 if (Alignment < 4) 1463 return false; 1464 1465 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1466 return false; 1467 } 1468 1469 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1470 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1471 // sext i32 to i64 when addr mode is r+i. 1472 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1473 LD->getExtensionType() == ISD::SEXTLOAD && 1474 isa<ConstantSDNode>(Offset)) 1475 return false; 1476 } 1477 1478 AM = ISD::PRE_INC; 1479 return true; 1480 } 1481 1482 //===----------------------------------------------------------------------===// 1483 // LowerOperation implementation 1484 //===----------------------------------------------------------------------===// 1485 1486 /// GetLabelAccessInfo - Return true if we should reference labels using a 1487 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1488 static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, 1489 unsigned &LoOpFlags, 1490 const GlobalValue *GV = nullptr) { 1491 HiOpFlags = PPCII::MO_HA; 1492 LoOpFlags = PPCII::MO_LO; 1493 1494 // Don't use the pic base if not in PIC relocation model. Or if we are on a 1495 // non-darwin platform. We don't support PIC on other platforms yet. 1496 bool isPIC = TM.getRelocationModel() == Reloc::PIC_ && 1497 TM.getSubtarget<PPCSubtarget>().isDarwin(); 1498 if (isPIC) { 1499 HiOpFlags |= PPCII::MO_PIC_FLAG; 1500 LoOpFlags |= PPCII::MO_PIC_FLAG; 1501 } 1502 1503 // If this is a reference to a global value that requires a non-lazy-ptr, make 1504 // sure that instruction lowering adds it. 1505 if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) { 1506 HiOpFlags |= PPCII::MO_NLP_FLAG; 1507 LoOpFlags |= PPCII::MO_NLP_FLAG; 1508 1509 if (GV->hasHiddenVisibility()) { 1510 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1511 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1512 } 1513 } 1514 1515 return isPIC; 1516 } 1517 1518 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1519 SelectionDAG &DAG) { 1520 EVT PtrVT = HiPart.getValueType(); 1521 SDValue Zero = DAG.getConstant(0, PtrVT); 1522 SDLoc DL(HiPart); 1523 1524 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1525 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1526 1527 // With PIC, the first instruction is actually "GR+hi(&G)". 1528 if (isPIC) 1529 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1530 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1531 1532 // Generate non-pic code that has direct accesses to the constant pool. 1533 // The address of the global is just (hi(&g)+lo(&g)). 1534 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1535 } 1536 1537 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1538 SelectionDAG &DAG) const { 1539 EVT PtrVT = Op.getValueType(); 1540 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1541 const Constant *C = CP->getConstVal(); 1542 1543 // 64-bit SVR4 ABI code is always position-independent. 1544 // The actual address of the GlobalValue is stored in the TOC. 1545 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1546 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 1547 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA, 1548 DAG.getRegister(PPC::X2, MVT::i64)); 1549 } 1550 1551 unsigned MOHiFlag, MOLoFlag; 1552 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1553 SDValue CPIHi = 1554 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 1555 SDValue CPILo = 1556 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 1557 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 1558 } 1559 1560 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1561 EVT PtrVT = Op.getValueType(); 1562 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1563 1564 // 64-bit SVR4 ABI code is always position-independent. 1565 // The actual address of the GlobalValue is stored in the TOC. 1566 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1567 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1568 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA, 1569 DAG.getRegister(PPC::X2, MVT::i64)); 1570 } 1571 1572 unsigned MOHiFlag, MOLoFlag; 1573 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1574 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 1575 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 1576 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 1577 } 1578 1579 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1580 SelectionDAG &DAG) const { 1581 EVT PtrVT = Op.getValueType(); 1582 1583 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1584 1585 unsigned MOHiFlag, MOLoFlag; 1586 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1587 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 1588 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 1589 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 1590 } 1591 1592 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1593 SelectionDAG &DAG) const { 1594 1595 // FIXME: TLS addresses currently use medium model code sequences, 1596 // which is the most useful form. Eventually support for small and 1597 // large models could be added if users need it, at the cost of 1598 // additional complexity. 1599 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1600 SDLoc dl(GA); 1601 const GlobalValue *GV = GA->getGlobal(); 1602 EVT PtrVT = getPointerTy(); 1603 bool is64bit = Subtarget.isPPC64(); 1604 1605 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 1606 1607 if (Model == TLSModel::LocalExec) { 1608 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1609 PPCII::MO_TPREL_HA); 1610 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1611 PPCII::MO_TPREL_LO); 1612 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 1613 is64bit ? MVT::i64 : MVT::i32); 1614 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 1615 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 1616 } 1617 1618 if (Model == TLSModel::InitialExec) { 1619 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1620 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1621 PPCII::MO_TLS); 1622 SDValue GOTPtr; 1623 if (is64bit) { 1624 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1625 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 1626 PtrVT, GOTReg, TGA); 1627 } else 1628 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 1629 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 1630 PtrVT, TGA, GOTPtr); 1631 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 1632 } 1633 1634 if (Model == TLSModel::GeneralDynamic) { 1635 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1636 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1637 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 1638 GOTReg, TGA); 1639 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT, 1640 GOTEntryHi, TGA); 1641 1642 // We need a chain node, and don't have one handy. The underlying 1643 // call has no side effects, so using the function entry node 1644 // suffices. 1645 SDValue Chain = DAG.getEntryNode(); 1646 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1647 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1648 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLS_ADDR, dl, 1649 PtrVT, ParmReg, TGA); 1650 // The return value from GET_TLS_ADDR really is in X3 already, but 1651 // some hacks are needed here to tie everything together. The extra 1652 // copies dissolve during subsequent transforms. 1653 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1654 return DAG.getCopyFromReg(Chain, dl, PPC::X3, PtrVT); 1655 } 1656 1657 if (Model == TLSModel::LocalDynamic) { 1658 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1659 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1660 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 1661 GOTReg, TGA); 1662 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT, 1663 GOTEntryHi, TGA); 1664 1665 // We need a chain node, and don't have one handy. The underlying 1666 // call has no side effects, so using the function entry node 1667 // suffices. 1668 SDValue Chain = DAG.getEntryNode(); 1669 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1670 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1671 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLSLD_ADDR, dl, 1672 PtrVT, ParmReg, TGA); 1673 // The return value from GET_TLSLD_ADDR really is in X3 already, but 1674 // some hacks are needed here to tie everything together. The extra 1675 // copies dissolve during subsequent transforms. 1676 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1677 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT, 1678 Chain, ParmReg, TGA); 1679 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 1680 } 1681 1682 llvm_unreachable("Unknown TLS model!"); 1683 } 1684 1685 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1686 SelectionDAG &DAG) const { 1687 EVT PtrVT = Op.getValueType(); 1688 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1689 SDLoc DL(GSDN); 1690 const GlobalValue *GV = GSDN->getGlobal(); 1691 1692 // 64-bit SVR4 ABI code is always position-independent. 1693 // The actual address of the GlobalValue is stored in the TOC. 1694 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 1695 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 1696 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA, 1697 DAG.getRegister(PPC::X2, MVT::i64)); 1698 } 1699 1700 unsigned MOHiFlag, MOLoFlag; 1701 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV); 1702 1703 SDValue GAHi = 1704 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 1705 SDValue GALo = 1706 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 1707 1708 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 1709 1710 // If the global reference is actually to a non-lazy-pointer, we have to do an 1711 // extra load to get the address of the global. 1712 if (MOHiFlag & PPCII::MO_NLP_FLAG) 1713 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 1714 false, false, false, 0); 1715 return Ptr; 1716 } 1717 1718 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1719 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1720 SDLoc dl(Op); 1721 1722 if (Op.getValueType() == MVT::v2i64) { 1723 // When the operands themselves are v2i64 values, we need to do something 1724 // special because VSX has no underlying comparison operations for these. 1725 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 1726 // Equality can be handled by casting to the legal type for Altivec 1727 // comparisons, everything else needs to be expanded. 1728 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 1729 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 1730 DAG.getSetCC(dl, MVT::v4i32, 1731 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 1732 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 1733 CC)); 1734 } 1735 1736 return SDValue(); 1737 } 1738 1739 // We handle most of these in the usual way. 1740 return Op; 1741 } 1742 1743 // If we're comparing for equality to zero, expose the fact that this is 1744 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1745 // fold the new nodes. 1746 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1747 if (C->isNullValue() && CC == ISD::SETEQ) { 1748 EVT VT = Op.getOperand(0).getValueType(); 1749 SDValue Zext = Op.getOperand(0); 1750 if (VT.bitsLT(MVT::i32)) { 1751 VT = MVT::i32; 1752 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1753 } 1754 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1755 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1756 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1757 DAG.getConstant(Log2b, MVT::i32)); 1758 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1759 } 1760 // Leave comparisons against 0 and -1 alone for now, since they're usually 1761 // optimized. FIXME: revisit this when we can custom lower all setcc 1762 // optimizations. 1763 if (C->isAllOnesValue() || C->isNullValue()) 1764 return SDValue(); 1765 } 1766 1767 // If we have an integer seteq/setne, turn it into a compare against zero 1768 // by xor'ing the rhs with the lhs, which is faster than setting a 1769 // condition register, reading it back out, and masking the correct bit. The 1770 // normal approach here uses sub to do this instead of xor. Using xor exposes 1771 // the result to other bit-twiddling opportunities. 1772 EVT LHSVT = Op.getOperand(0).getValueType(); 1773 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1774 EVT VT = Op.getValueType(); 1775 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1776 Op.getOperand(1)); 1777 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1778 } 1779 return SDValue(); 1780 } 1781 1782 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1783 const PPCSubtarget &Subtarget) const { 1784 SDNode *Node = Op.getNode(); 1785 EVT VT = Node->getValueType(0); 1786 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1787 SDValue InChain = Node->getOperand(0); 1788 SDValue VAListPtr = Node->getOperand(1); 1789 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1790 SDLoc dl(Node); 1791 1792 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 1793 1794 // gpr_index 1795 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1796 VAListPtr, MachinePointerInfo(SV), MVT::i8, 1797 false, false, 0); 1798 InChain = GprIndex.getValue(1); 1799 1800 if (VT == MVT::i64) { 1801 // Check if GprIndex is even 1802 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 1803 DAG.getConstant(1, MVT::i32)); 1804 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 1805 DAG.getConstant(0, MVT::i32), ISD::SETNE); 1806 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 1807 DAG.getConstant(1, MVT::i32)); 1808 // Align GprIndex to be even if it isn't 1809 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 1810 GprIndex); 1811 } 1812 1813 // fpr index is 1 byte after gpr 1814 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1815 DAG.getConstant(1, MVT::i32)); 1816 1817 // fpr 1818 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1819 FprPtr, MachinePointerInfo(SV), MVT::i8, 1820 false, false, 0); 1821 InChain = FprIndex.getValue(1); 1822 1823 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1824 DAG.getConstant(8, MVT::i32)); 1825 1826 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1827 DAG.getConstant(4, MVT::i32)); 1828 1829 // areas 1830 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 1831 MachinePointerInfo(), false, false, 1832 false, 0); 1833 InChain = OverflowArea.getValue(1); 1834 1835 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 1836 MachinePointerInfo(), false, false, 1837 false, 0); 1838 InChain = RegSaveArea.getValue(1); 1839 1840 // select overflow_area if index > 8 1841 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 1842 DAG.getConstant(8, MVT::i32), ISD::SETLT); 1843 1844 // adjustment constant gpr_index * 4/8 1845 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 1846 VT.isInteger() ? GprIndex : FprIndex, 1847 DAG.getConstant(VT.isInteger() ? 4 : 8, 1848 MVT::i32)); 1849 1850 // OurReg = RegSaveArea + RegConstant 1851 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 1852 RegConstant); 1853 1854 // Floating types are 32 bytes into RegSaveArea 1855 if (VT.isFloatingPoint()) 1856 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 1857 DAG.getConstant(32, MVT::i32)); 1858 1859 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 1860 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 1861 VT.isInteger() ? GprIndex : FprIndex, 1862 DAG.getConstant(VT == MVT::i64 ? 2 : 1, 1863 MVT::i32)); 1864 1865 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 1866 VT.isInteger() ? VAListPtr : FprPtr, 1867 MachinePointerInfo(SV), 1868 MVT::i8, false, false, 0); 1869 1870 // determine if we should load from reg_save_area or overflow_area 1871 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 1872 1873 // increase overflow_area by 4/8 if gpr/fpr > 8 1874 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 1875 DAG.getConstant(VT.isInteger() ? 4 : 8, 1876 MVT::i32)); 1877 1878 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 1879 OverflowAreaPlusN); 1880 1881 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 1882 OverflowAreaPtr, 1883 MachinePointerInfo(), 1884 MVT::i32, false, false, 0); 1885 1886 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 1887 false, false, false, 0); 1888 } 1889 1890 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 1891 const PPCSubtarget &Subtarget) const { 1892 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 1893 1894 // We have to copy the entire va_list struct: 1895 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 1896 return DAG.getMemcpy(Op.getOperand(0), Op, 1897 Op.getOperand(1), Op.getOperand(2), 1898 DAG.getConstant(12, MVT::i32), 8, false, true, 1899 MachinePointerInfo(), MachinePointerInfo()); 1900 } 1901 1902 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 1903 SelectionDAG &DAG) const { 1904 return Op.getOperand(0); 1905 } 1906 1907 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 1908 SelectionDAG &DAG) const { 1909 SDValue Chain = Op.getOperand(0); 1910 SDValue Trmp = Op.getOperand(1); // trampoline 1911 SDValue FPtr = Op.getOperand(2); // nested function 1912 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 1913 SDLoc dl(Op); 1914 1915 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1916 bool isPPC64 = (PtrVT == MVT::i64); 1917 Type *IntPtrTy = 1918 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType( 1919 *DAG.getContext()); 1920 1921 TargetLowering::ArgListTy Args; 1922 TargetLowering::ArgListEntry Entry; 1923 1924 Entry.Ty = IntPtrTy; 1925 Entry.Node = Trmp; Args.push_back(Entry); 1926 1927 // TrampSize == (isPPC64 ? 48 : 40); 1928 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 1929 isPPC64 ? MVT::i64 : MVT::i32); 1930 Args.push_back(Entry); 1931 1932 Entry.Node = FPtr; Args.push_back(Entry); 1933 Entry.Node = Nest; Args.push_back(Entry); 1934 1935 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 1936 TargetLowering::CallLoweringInfo CLI(DAG); 1937 CLI.setDebugLoc(dl).setChain(Chain) 1938 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 1939 DAG.getExternalSymbol("__trampoline_setup", PtrVT), &Args, 0); 1940 1941 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 1942 return CallResult.second; 1943 } 1944 1945 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1946 const PPCSubtarget &Subtarget) const { 1947 MachineFunction &MF = DAG.getMachineFunction(); 1948 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1949 1950 SDLoc dl(Op); 1951 1952 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 1953 // vastart just stores the address of the VarArgsFrameIndex slot into the 1954 // memory location argument. 1955 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1956 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1957 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1958 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 1959 MachinePointerInfo(SV), 1960 false, false, 0); 1961 } 1962 1963 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 1964 // We suppose the given va_list is already allocated. 1965 // 1966 // typedef struct { 1967 // char gpr; /* index into the array of 8 GPRs 1968 // * stored in the register save area 1969 // * gpr=0 corresponds to r3, 1970 // * gpr=1 to r4, etc. 1971 // */ 1972 // char fpr; /* index into the array of 8 FPRs 1973 // * stored in the register save area 1974 // * fpr=0 corresponds to f1, 1975 // * fpr=1 to f2, etc. 1976 // */ 1977 // char *overflow_arg_area; 1978 // /* location on stack that holds 1979 // * the next overflow argument 1980 // */ 1981 // char *reg_save_area; 1982 // /* where r3:r10 and f1:f8 (if saved) 1983 // * are stored 1984 // */ 1985 // } va_list[1]; 1986 1987 1988 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32); 1989 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32); 1990 1991 1992 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1993 1994 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 1995 PtrVT); 1996 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1997 PtrVT); 1998 1999 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2000 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 2001 2002 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2003 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 2004 2005 uint64_t FPROffset = 1; 2006 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 2007 2008 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2009 2010 // Store first byte : number of int regs 2011 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2012 Op.getOperand(1), 2013 MachinePointerInfo(SV), 2014 MVT::i8, false, false, 0); 2015 uint64_t nextOffset = FPROffset; 2016 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2017 ConstFPROffset); 2018 2019 // Store second byte : number of float regs 2020 SDValue secondStore = 2021 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2022 MachinePointerInfo(SV, nextOffset), MVT::i8, 2023 false, false, 0); 2024 nextOffset += StackOffset; 2025 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2026 2027 // Store second word : arguments given on stack 2028 SDValue thirdStore = 2029 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2030 MachinePointerInfo(SV, nextOffset), 2031 false, false, 0); 2032 nextOffset += FrameOffset; 2033 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2034 2035 // Store third word : arguments given in registers 2036 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2037 MachinePointerInfo(SV, nextOffset), 2038 false, false, 0); 2039 2040 } 2041 2042 #include "PPCGenCallingConv.inc" 2043 2044 // Function whose sole purpose is to kill compiler warnings 2045 // stemming from unused functions included from PPCGenCallingConv.inc. 2046 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2047 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2048 } 2049 2050 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2051 CCValAssign::LocInfo &LocInfo, 2052 ISD::ArgFlagsTy &ArgFlags, 2053 CCState &State) { 2054 return true; 2055 } 2056 2057 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2058 MVT &LocVT, 2059 CCValAssign::LocInfo &LocInfo, 2060 ISD::ArgFlagsTy &ArgFlags, 2061 CCState &State) { 2062 static const MCPhysReg ArgRegs[] = { 2063 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2064 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2065 }; 2066 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2067 2068 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 2069 2070 // Skip one register if the first unallocated register has an even register 2071 // number and there are still argument registers available which have not been 2072 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2073 // need to skip a register if RegNum is odd. 2074 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2075 State.AllocateReg(ArgRegs[RegNum]); 2076 } 2077 2078 // Always return false here, as this function only makes sure that the first 2079 // unallocated register has an odd register number and does not actually 2080 // allocate a register for the current argument. 2081 return false; 2082 } 2083 2084 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2085 MVT &LocVT, 2086 CCValAssign::LocInfo &LocInfo, 2087 ISD::ArgFlagsTy &ArgFlags, 2088 CCState &State) { 2089 static const MCPhysReg ArgRegs[] = { 2090 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2091 PPC::F8 2092 }; 2093 2094 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2095 2096 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 2097 2098 // If there is only one Floating-point register left we need to put both f64 2099 // values of a split ppc_fp128 value on the stack. 2100 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2101 State.AllocateReg(ArgRegs[RegNum]); 2102 } 2103 2104 // Always return false here, as this function only makes sure that the two f64 2105 // values a ppc_fp128 value is split into are both passed in registers or both 2106 // passed on the stack and does not actually allocate a register for the 2107 // current argument. 2108 return false; 2109 } 2110 2111 /// GetFPR - Get the set of FP registers that should be allocated for arguments, 2112 /// on Darwin. 2113 static const MCPhysReg *GetFPR() { 2114 static const MCPhysReg FPR[] = { 2115 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2116 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 2117 }; 2118 2119 return FPR; 2120 } 2121 2122 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2123 /// the stack. 2124 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2125 unsigned PtrByteSize) { 2126 unsigned ArgSize = ArgVT.getStoreSize(); 2127 if (Flags.isByVal()) 2128 ArgSize = Flags.getByValSize(); 2129 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2130 2131 return ArgSize; 2132 } 2133 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2134 /// ensure minimum alignment required for target. 2135 static unsigned EnsureStackAlignment(const TargetMachine &Target, 2136 unsigned NumBytes) { 2137 unsigned TargetAlign = Target.getFrameLowering()->getStackAlignment(); 2138 unsigned AlignMask = TargetAlign - 1; 2139 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2140 return NumBytes; 2141 } 2142 2143 SDValue 2144 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 2145 CallingConv::ID CallConv, bool isVarArg, 2146 const SmallVectorImpl<ISD::InputArg> 2147 &Ins, 2148 SDLoc dl, SelectionDAG &DAG, 2149 SmallVectorImpl<SDValue> &InVals) 2150 const { 2151 if (Subtarget.isSVR4ABI()) { 2152 if (Subtarget.isPPC64()) 2153 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2154 dl, DAG, InVals); 2155 else 2156 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2157 dl, DAG, InVals); 2158 } else { 2159 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2160 dl, DAG, InVals); 2161 } 2162 } 2163 2164 SDValue 2165 PPCTargetLowering::LowerFormalArguments_32SVR4( 2166 SDValue Chain, 2167 CallingConv::ID CallConv, bool isVarArg, 2168 const SmallVectorImpl<ISD::InputArg> 2169 &Ins, 2170 SDLoc dl, SelectionDAG &DAG, 2171 SmallVectorImpl<SDValue> &InVals) const { 2172 2173 // 32-bit SVR4 ABI Stack Frame Layout: 2174 // +-----------------------------------+ 2175 // +--> | Back chain | 2176 // | +-----------------------------------+ 2177 // | | Floating-point register save area | 2178 // | +-----------------------------------+ 2179 // | | General register save area | 2180 // | +-----------------------------------+ 2181 // | | CR save word | 2182 // | +-----------------------------------+ 2183 // | | VRSAVE save word | 2184 // | +-----------------------------------+ 2185 // | | Alignment padding | 2186 // | +-----------------------------------+ 2187 // | | Vector register save area | 2188 // | +-----------------------------------+ 2189 // | | Local variable space | 2190 // | +-----------------------------------+ 2191 // | | Parameter list area | 2192 // | +-----------------------------------+ 2193 // | | LR save word | 2194 // | +-----------------------------------+ 2195 // SP--> +--- | Back chain | 2196 // +-----------------------------------+ 2197 // 2198 // Specifications: 2199 // System V Application Binary Interface PowerPC Processor Supplement 2200 // AltiVec Technology Programming Interface Manual 2201 2202 MachineFunction &MF = DAG.getMachineFunction(); 2203 MachineFrameInfo *MFI = MF.getFrameInfo(); 2204 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2205 2206 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2207 // Potential tail calls could cause overwriting of argument stack slots. 2208 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2209 (CallConv == CallingConv::Fast)); 2210 unsigned PtrByteSize = 4; 2211 2212 // Assign locations to all of the incoming arguments. 2213 SmallVector<CCValAssign, 16> ArgLocs; 2214 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2215 getTargetMachine(), ArgLocs, *DAG.getContext()); 2216 2217 // Reserve space for the linkage area on the stack. 2218 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(false, false); 2219 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2220 2221 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2222 2223 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2224 CCValAssign &VA = ArgLocs[i]; 2225 2226 // Arguments stored in registers. 2227 if (VA.isRegLoc()) { 2228 const TargetRegisterClass *RC; 2229 EVT ValVT = VA.getValVT(); 2230 2231 switch (ValVT.getSimpleVT().SimpleTy) { 2232 default: 2233 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2234 case MVT::i1: 2235 case MVT::i32: 2236 RC = &PPC::GPRCRegClass; 2237 break; 2238 case MVT::f32: 2239 RC = &PPC::F4RCRegClass; 2240 break; 2241 case MVT::f64: 2242 if (Subtarget.hasVSX()) 2243 RC = &PPC::VSFRCRegClass; 2244 else 2245 RC = &PPC::F8RCRegClass; 2246 break; 2247 case MVT::v16i8: 2248 case MVT::v8i16: 2249 case MVT::v4i32: 2250 case MVT::v4f32: 2251 RC = &PPC::VRRCRegClass; 2252 break; 2253 case MVT::v2f64: 2254 case MVT::v2i64: 2255 RC = &PPC::VSHRCRegClass; 2256 break; 2257 } 2258 2259 // Transform the arguments stored in physical registers into virtual ones. 2260 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2261 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2262 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2263 2264 if (ValVT == MVT::i1) 2265 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2266 2267 InVals.push_back(ArgValue); 2268 } else { 2269 // Argument stored in memory. 2270 assert(VA.isMemLoc()); 2271 2272 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2273 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2274 isImmutable); 2275 2276 // Create load nodes to retrieve arguments from the stack. 2277 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2278 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2279 MachinePointerInfo(), 2280 false, false, false, 0)); 2281 } 2282 } 2283 2284 // Assign locations to all of the incoming aggregate by value arguments. 2285 // Aggregates passed by value are stored in the local variable space of the 2286 // caller's stack frame, right above the parameter list area. 2287 SmallVector<CCValAssign, 16> ByValArgLocs; 2288 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2289 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 2290 2291 // Reserve stack space for the allocations in CCInfo. 2292 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2293 2294 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2295 2296 // Area that is at least reserved in the caller of this function. 2297 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2298 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2299 2300 // Set the size that is at least reserved in caller of this function. Tail 2301 // call optimized function's reserved stack space needs to be aligned so that 2302 // taking the difference between two stack areas will result in an aligned 2303 // stack. 2304 MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea); 2305 FuncInfo->setMinReservedArea(MinReservedArea); 2306 2307 SmallVector<SDValue, 8> MemOps; 2308 2309 // If the function takes variable number of arguments, make a frame index for 2310 // the start of the first vararg value... for expansion of llvm.va_start. 2311 if (isVarArg) { 2312 static const MCPhysReg GPArgRegs[] = { 2313 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2314 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2315 }; 2316 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2317 2318 static const MCPhysReg FPArgRegs[] = { 2319 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2320 PPC::F8 2321 }; 2322 const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2323 2324 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, 2325 NumGPArgRegs)); 2326 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, 2327 NumFPArgRegs)); 2328 2329 // Make room for NumGPArgRegs and NumFPArgRegs. 2330 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2331 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8; 2332 2333 FuncInfo->setVarArgsStackOffset( 2334 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2335 CCInfo.getNextStackOffset(), true)); 2336 2337 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2338 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2339 2340 // The fixed integer arguments of a variadic function are stored to the 2341 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2342 // the result of va_next. 2343 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2344 // Get an existing live-in vreg, or add a new one. 2345 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2346 if (!VReg) 2347 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2348 2349 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2350 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2351 MachinePointerInfo(), false, false, 0); 2352 MemOps.push_back(Store); 2353 // Increment the address by four for the next argument to store 2354 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2355 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2356 } 2357 2358 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2359 // is set. 2360 // The double arguments are stored to the VarArgsFrameIndex 2361 // on the stack. 2362 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2363 // Get an existing live-in vreg, or add a new one. 2364 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2365 if (!VReg) 2366 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2367 2368 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2369 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2370 MachinePointerInfo(), false, false, 0); 2371 MemOps.push_back(Store); 2372 // Increment the address by eight for the next argument to store 2373 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 2374 PtrVT); 2375 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2376 } 2377 } 2378 2379 if (!MemOps.empty()) 2380 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 2381 2382 return Chain; 2383 } 2384 2385 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2386 // value to MVT::i64 and then truncate to the correct register size. 2387 SDValue 2388 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 2389 SelectionDAG &DAG, SDValue ArgVal, 2390 SDLoc dl) const { 2391 if (Flags.isSExt()) 2392 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 2393 DAG.getValueType(ObjectVT)); 2394 else if (Flags.isZExt()) 2395 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 2396 DAG.getValueType(ObjectVT)); 2397 2398 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 2399 } 2400 2401 SDValue 2402 PPCTargetLowering::LowerFormalArguments_64SVR4( 2403 SDValue Chain, 2404 CallingConv::ID CallConv, bool isVarArg, 2405 const SmallVectorImpl<ISD::InputArg> 2406 &Ins, 2407 SDLoc dl, SelectionDAG &DAG, 2408 SmallVectorImpl<SDValue> &InVals) const { 2409 // TODO: add description of PPC stack frame format, or at least some docs. 2410 // 2411 bool isLittleEndian = Subtarget.isLittleEndian(); 2412 MachineFunction &MF = DAG.getMachineFunction(); 2413 MachineFrameInfo *MFI = MF.getFrameInfo(); 2414 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2415 2416 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2417 // Potential tail calls could cause overwriting of argument stack slots. 2418 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2419 (CallConv == CallingConv::Fast)); 2420 unsigned PtrByteSize = 8; 2421 2422 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false); 2423 unsigned ArgOffset = LinkageSize; 2424 // Area that is at least reserved in caller of this function. 2425 unsigned MinReservedArea = ArgOffset; 2426 2427 static const MCPhysReg GPR[] = { 2428 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2429 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2430 }; 2431 2432 static const MCPhysReg *FPR = GetFPR(); 2433 2434 static const MCPhysReg VR[] = { 2435 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2436 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2437 }; 2438 static const MCPhysReg VSRH[] = { 2439 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 2440 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 2441 }; 2442 2443 const unsigned Num_GPR_Regs = array_lengthof(GPR); 2444 const unsigned Num_FPR_Regs = 13; 2445 const unsigned Num_VR_Regs = array_lengthof(VR); 2446 2447 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2448 2449 // Add DAG nodes to load the arguments or copy them out of registers. On 2450 // entry to a function on PPC, the arguments start after the linkage area, 2451 // although the first ones are often in registers. 2452 2453 SmallVector<SDValue, 8> MemOps; 2454 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2455 unsigned CurArgIdx = 0; 2456 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2457 SDValue ArgVal; 2458 bool needsLoad = false; 2459 EVT ObjectVT = Ins[ArgNo].VT; 2460 unsigned ObjSize = ObjectVT.getStoreSize(); 2461 unsigned ArgSize = ObjSize; 2462 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2463 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2464 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2465 2466 unsigned CurArgOffset = ArgOffset; 2467 2468 // Altivec parameters are padded to a 16 byte boundary. 2469 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2470 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8 || 2471 ObjectVT==MVT::v2f64 || ObjectVT==MVT::v2i64) 2472 MinReservedArea = ((MinReservedArea+15)/16)*16; 2473 2474 // Calculate min reserved area. 2475 MinReservedArea += CalculateStackSlotSize(ObjectVT, Flags, PtrByteSize); 2476 2477 // FIXME the codegen can be much improved in some cases. 2478 // We do not have to keep everything in memory. 2479 if (Flags.isByVal()) { 2480 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2481 ObjSize = Flags.getByValSize(); 2482 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2483 // Empty aggregate parameters do not take up registers. Examples: 2484 // struct { } a; 2485 // union { } b; 2486 // int c[0]; 2487 // etc. However, we have to provide a place-holder in InVals, so 2488 // pretend we have an 8-byte item at the current address for that 2489 // purpose. 2490 if (!ObjSize) { 2491 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2492 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2493 InVals.push_back(FIN); 2494 continue; 2495 } 2496 2497 unsigned BVAlign = Flags.getByValAlign(); 2498 if (BVAlign > 8) { 2499 ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign; 2500 CurArgOffset = ArgOffset; 2501 } 2502 2503 // All aggregates smaller than 8 bytes must be passed right-justified. 2504 if (ObjSize < PtrByteSize && !isLittleEndian) 2505 CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize); 2506 // The value of the object is its address. 2507 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2508 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2509 InVals.push_back(FIN); 2510 2511 if (ObjSize < 8) { 2512 if (GPR_idx != Num_GPR_Regs) { 2513 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2514 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2515 SDValue Store; 2516 2517 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 2518 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 2519 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 2520 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2521 MachinePointerInfo(FuncArg), 2522 ObjType, false, false, 0); 2523 } else { 2524 // For sizes that don't fit a truncating store (3, 5, 6, 7), 2525 // store the whole register as-is to the parameter save area 2526 // slot. The address of the parameter was already calculated 2527 // above (InVals.push_back(FIN)) to be the right-justified 2528 // offset within the slot. For this store, we need a new 2529 // frame index that points at the beginning of the slot. 2530 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2531 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2532 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2533 MachinePointerInfo(FuncArg), 2534 false, false, 0); 2535 } 2536 2537 MemOps.push_back(Store); 2538 ++GPR_idx; 2539 } 2540 // Whether we copied from a register or not, advance the offset 2541 // into the parameter save area by a full doubleword. 2542 ArgOffset += PtrByteSize; 2543 continue; 2544 } 2545 2546 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2547 // Store whatever pieces of the object are in registers 2548 // to memory. ArgOffset will be the address of the beginning 2549 // of the object. 2550 if (GPR_idx != Num_GPR_Regs) { 2551 unsigned VReg; 2552 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2553 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2554 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2555 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2556 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2557 MachinePointerInfo(FuncArg, j), 2558 false, false, 0); 2559 MemOps.push_back(Store); 2560 ++GPR_idx; 2561 ArgOffset += PtrByteSize; 2562 } else { 2563 ArgOffset += ArgSize - j; 2564 break; 2565 } 2566 } 2567 continue; 2568 } 2569 2570 switch (ObjectVT.getSimpleVT().SimpleTy) { 2571 default: llvm_unreachable("Unhandled argument type!"); 2572 case MVT::i1: 2573 case MVT::i32: 2574 case MVT::i64: 2575 if (GPR_idx != Num_GPR_Regs) { 2576 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2577 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2578 2579 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 2580 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2581 // value to MVT::i64 and then truncate to the correct register size. 2582 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2583 2584 ++GPR_idx; 2585 } else { 2586 needsLoad = true; 2587 ArgSize = PtrByteSize; 2588 } 2589 ArgOffset += 8; 2590 break; 2591 2592 case MVT::f32: 2593 case MVT::f64: 2594 // Every 8 bytes of argument space consumes one of the GPRs available for 2595 // argument passing. 2596 if (GPR_idx != Num_GPR_Regs) { 2597 ++GPR_idx; 2598 } 2599 if (FPR_idx != Num_FPR_Regs) { 2600 unsigned VReg; 2601 2602 if (ObjectVT == MVT::f32) 2603 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2604 else 2605 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() ? 2606 &PPC::VSFRCRegClass : 2607 &PPC::F8RCRegClass); 2608 2609 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2610 ++FPR_idx; 2611 } else { 2612 needsLoad = true; 2613 ArgSize = PtrByteSize; 2614 } 2615 2616 ArgOffset += 8; 2617 break; 2618 case MVT::v4f32: 2619 case MVT::v4i32: 2620 case MVT::v8i16: 2621 case MVT::v16i8: 2622 case MVT::v2f64: 2623 case MVT::v2i64: 2624 // Vectors are aligned to a 16-byte boundary in the argument save area. 2625 while ((ArgOffset % 16) != 0) { 2626 ArgOffset += PtrByteSize; 2627 if (GPR_idx != Num_GPR_Regs) 2628 GPR_idx++; 2629 } 2630 if (VR_idx != Num_VR_Regs) { 2631 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 2632 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 2633 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2634 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2635 ++VR_idx; 2636 } else { 2637 CurArgOffset = ArgOffset; 2638 needsLoad = true; 2639 } 2640 ArgOffset += 16; 2641 GPR_idx = std::min(GPR_idx + 2, Num_GPR_Regs); 2642 break; 2643 } 2644 2645 // We need to load the argument to a virtual register if we determined 2646 // above that we ran out of physical registers of the appropriate type. 2647 if (needsLoad) { 2648 if (ObjSize < ArgSize && !isLittleEndian) 2649 CurArgOffset += ArgSize - ObjSize; 2650 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 2651 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2652 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2653 false, false, false, 0); 2654 } 2655 2656 InVals.push_back(ArgVal); 2657 } 2658 2659 // Area that is at least reserved in the caller of this function. 2660 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 2661 2662 // Set the size that is at least reserved in caller of this function. Tail 2663 // call optimized functions' reserved stack space needs to be aligned so that 2664 // taking the difference between two stack areas will result in an aligned 2665 // stack. 2666 MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea); 2667 FuncInfo->setMinReservedArea(MinReservedArea); 2668 2669 // If the function takes variable number of arguments, make a frame index for 2670 // the start of the first vararg value... for expansion of llvm.va_start. 2671 if (isVarArg) { 2672 int Depth = ArgOffset; 2673 2674 FuncInfo->setVarArgsFrameIndex( 2675 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 2676 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2677 2678 // If this function is vararg, store any remaining integer argument regs 2679 // to their spots on the stack so that they may be loaded by deferencing the 2680 // result of va_next. 2681 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2682 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2683 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2684 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2685 MachinePointerInfo(), false, false, 0); 2686 MemOps.push_back(Store); 2687 // Increment the address by four for the next argument to store 2688 SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT); 2689 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2690 } 2691 } 2692 2693 if (!MemOps.empty()) 2694 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 2695 2696 return Chain; 2697 } 2698 2699 SDValue 2700 PPCTargetLowering::LowerFormalArguments_Darwin( 2701 SDValue Chain, 2702 CallingConv::ID CallConv, bool isVarArg, 2703 const SmallVectorImpl<ISD::InputArg> 2704 &Ins, 2705 SDLoc dl, SelectionDAG &DAG, 2706 SmallVectorImpl<SDValue> &InVals) const { 2707 // TODO: add description of PPC stack frame format, or at least some docs. 2708 // 2709 MachineFunction &MF = DAG.getMachineFunction(); 2710 MachineFrameInfo *MFI = MF.getFrameInfo(); 2711 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2712 2713 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2714 bool isPPC64 = PtrVT == MVT::i64; 2715 // Potential tail calls could cause overwriting of argument stack slots. 2716 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2717 (CallConv == CallingConv::Fast)); 2718 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2719 2720 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true); 2721 unsigned ArgOffset = LinkageSize; 2722 // Area that is at least reserved in caller of this function. 2723 unsigned MinReservedArea = ArgOffset; 2724 2725 static const MCPhysReg GPR_32[] = { // 32-bit registers. 2726 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2727 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2728 }; 2729 static const MCPhysReg GPR_64[] = { // 64-bit registers. 2730 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2731 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2732 }; 2733 2734 static const MCPhysReg *FPR = GetFPR(); 2735 2736 static const MCPhysReg VR[] = { 2737 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2738 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2739 }; 2740 2741 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 2742 const unsigned Num_FPR_Regs = 13; 2743 const unsigned Num_VR_Regs = array_lengthof( VR); 2744 2745 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2746 2747 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 2748 2749 // In 32-bit non-varargs functions, the stack space for vectors is after the 2750 // stack space for non-vectors. We do not use this space unless we have 2751 // too many vectors to fit in registers, something that only occurs in 2752 // constructed examples:), but we have to walk the arglist to figure 2753 // that out...for the pathological case, compute VecArgOffset as the 2754 // start of the vector parameter area. Computing VecArgOffset is the 2755 // entire point of the following loop. 2756 unsigned VecArgOffset = ArgOffset; 2757 if (!isVarArg && !isPPC64) { 2758 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 2759 ++ArgNo) { 2760 EVT ObjectVT = Ins[ArgNo].VT; 2761 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2762 2763 if (Flags.isByVal()) { 2764 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 2765 unsigned ObjSize = Flags.getByValSize(); 2766 unsigned ArgSize = 2767 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2768 VecArgOffset += ArgSize; 2769 continue; 2770 } 2771 2772 switch(ObjectVT.getSimpleVT().SimpleTy) { 2773 default: llvm_unreachable("Unhandled argument type!"); 2774 case MVT::i1: 2775 case MVT::i32: 2776 case MVT::f32: 2777 VecArgOffset += 4; 2778 break; 2779 case MVT::i64: // PPC64 2780 case MVT::f64: 2781 // FIXME: We are guaranteed to be !isPPC64 at this point. 2782 // Does MVT::i64 apply? 2783 VecArgOffset += 8; 2784 break; 2785 case MVT::v4f32: 2786 case MVT::v4i32: 2787 case MVT::v8i16: 2788 case MVT::v16i8: 2789 // Nothing to do, we're only looking at Nonvector args here. 2790 break; 2791 } 2792 } 2793 } 2794 // We've found where the vector parameter area in memory is. Skip the 2795 // first 12 parameters; these don't use that memory. 2796 VecArgOffset = ((VecArgOffset+15)/16)*16; 2797 VecArgOffset += 12*16; 2798 2799 // Add DAG nodes to load the arguments or copy them out of registers. On 2800 // entry to a function on PPC, the arguments start after the linkage area, 2801 // although the first ones are often in registers. 2802 2803 SmallVector<SDValue, 8> MemOps; 2804 unsigned nAltivecParamsAtEnd = 0; 2805 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2806 unsigned CurArgIdx = 0; 2807 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2808 SDValue ArgVal; 2809 bool needsLoad = false; 2810 EVT ObjectVT = Ins[ArgNo].VT; 2811 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2812 unsigned ArgSize = ObjSize; 2813 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2814 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2815 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2816 2817 unsigned CurArgOffset = ArgOffset; 2818 2819 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2820 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2821 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2822 if (isVarArg || isPPC64) { 2823 MinReservedArea = ((MinReservedArea+15)/16)*16; 2824 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2825 Flags, 2826 PtrByteSize); 2827 } else nAltivecParamsAtEnd++; 2828 } else 2829 // Calculate min reserved area. 2830 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2831 Flags, 2832 PtrByteSize); 2833 2834 // FIXME the codegen can be much improved in some cases. 2835 // We do not have to keep everything in memory. 2836 if (Flags.isByVal()) { 2837 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2838 ObjSize = Flags.getByValSize(); 2839 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2840 // Objects of size 1 and 2 are right justified, everything else is 2841 // left justified. This means the memory address is adjusted forwards. 2842 if (ObjSize==1 || ObjSize==2) { 2843 CurArgOffset = CurArgOffset + (4 - ObjSize); 2844 } 2845 // The value of the object is its address. 2846 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2847 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2848 InVals.push_back(FIN); 2849 if (ObjSize==1 || ObjSize==2) { 2850 if (GPR_idx != Num_GPR_Regs) { 2851 unsigned VReg; 2852 if (isPPC64) 2853 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2854 else 2855 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2856 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2857 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 2858 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2859 MachinePointerInfo(FuncArg), 2860 ObjType, false, false, 0); 2861 MemOps.push_back(Store); 2862 ++GPR_idx; 2863 } 2864 2865 ArgOffset += PtrByteSize; 2866 2867 continue; 2868 } 2869 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2870 // Store whatever pieces of the object are in registers 2871 // to memory. ArgOffset will be the address of the beginning 2872 // of the object. 2873 if (GPR_idx != Num_GPR_Regs) { 2874 unsigned VReg; 2875 if (isPPC64) 2876 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2877 else 2878 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2879 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2880 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2881 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2882 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2883 MachinePointerInfo(FuncArg, j), 2884 false, false, 0); 2885 MemOps.push_back(Store); 2886 ++GPR_idx; 2887 ArgOffset += PtrByteSize; 2888 } else { 2889 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 2890 break; 2891 } 2892 } 2893 continue; 2894 } 2895 2896 switch (ObjectVT.getSimpleVT().SimpleTy) { 2897 default: llvm_unreachable("Unhandled argument type!"); 2898 case MVT::i1: 2899 case MVT::i32: 2900 if (!isPPC64) { 2901 if (GPR_idx != Num_GPR_Regs) { 2902 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2903 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2904 2905 if (ObjectVT == MVT::i1) 2906 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 2907 2908 ++GPR_idx; 2909 } else { 2910 needsLoad = true; 2911 ArgSize = PtrByteSize; 2912 } 2913 // All int arguments reserve stack space in the Darwin ABI. 2914 ArgOffset += PtrByteSize; 2915 break; 2916 } 2917 // FALLTHROUGH 2918 case MVT::i64: // PPC64 2919 if (GPR_idx != Num_GPR_Regs) { 2920 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2921 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2922 2923 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 2924 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2925 // value to MVT::i64 and then truncate to the correct register size. 2926 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2927 2928 ++GPR_idx; 2929 } else { 2930 needsLoad = true; 2931 ArgSize = PtrByteSize; 2932 } 2933 // All int arguments reserve stack space in the Darwin ABI. 2934 ArgOffset += 8; 2935 break; 2936 2937 case MVT::f32: 2938 case MVT::f64: 2939 // Every 4 bytes of argument space consumes one of the GPRs available for 2940 // argument passing. 2941 if (GPR_idx != Num_GPR_Regs) { 2942 ++GPR_idx; 2943 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 2944 ++GPR_idx; 2945 } 2946 if (FPR_idx != Num_FPR_Regs) { 2947 unsigned VReg; 2948 2949 if (ObjectVT == MVT::f32) 2950 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2951 else 2952 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2953 2954 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2955 ++FPR_idx; 2956 } else { 2957 needsLoad = true; 2958 } 2959 2960 // All FP arguments reserve stack space in the Darwin ABI. 2961 ArgOffset += isPPC64 ? 8 : ObjSize; 2962 break; 2963 case MVT::v4f32: 2964 case MVT::v4i32: 2965 case MVT::v8i16: 2966 case MVT::v16i8: 2967 // Note that vector arguments in registers don't reserve stack space, 2968 // except in varargs functions. 2969 if (VR_idx != Num_VR_Regs) { 2970 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2971 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2972 if (isVarArg) { 2973 while ((ArgOffset % 16) != 0) { 2974 ArgOffset += PtrByteSize; 2975 if (GPR_idx != Num_GPR_Regs) 2976 GPR_idx++; 2977 } 2978 ArgOffset += 16; 2979 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2980 } 2981 ++VR_idx; 2982 } else { 2983 if (!isVarArg && !isPPC64) { 2984 // Vectors go after all the nonvectors. 2985 CurArgOffset = VecArgOffset; 2986 VecArgOffset += 16; 2987 } else { 2988 // Vectors are aligned. 2989 ArgOffset = ((ArgOffset+15)/16)*16; 2990 CurArgOffset = ArgOffset; 2991 ArgOffset += 16; 2992 } 2993 needsLoad = true; 2994 } 2995 break; 2996 } 2997 2998 // We need to load the argument to a virtual register if we determined above 2999 // that we ran out of physical registers of the appropriate type. 3000 if (needsLoad) { 3001 int FI = MFI->CreateFixedObject(ObjSize, 3002 CurArgOffset + (ArgSize - ObjSize), 3003 isImmutable); 3004 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3005 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3006 false, false, false, 0); 3007 } 3008 3009 InVals.push_back(ArgVal); 3010 } 3011 3012 // Allow for Altivec parameters at the end, if needed. 3013 if (nAltivecParamsAtEnd) { 3014 MinReservedArea = ((MinReservedArea+15)/16)*16; 3015 MinReservedArea += 16*nAltivecParamsAtEnd; 3016 } 3017 3018 // Area that is at least reserved in the caller of this function. 3019 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3020 3021 // Set the size that is at least reserved in caller of this function. Tail 3022 // call optimized functions' reserved stack space needs to be aligned so that 3023 // taking the difference between two stack areas will result in an aligned 3024 // stack. 3025 MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea); 3026 FuncInfo->setMinReservedArea(MinReservedArea); 3027 3028 // If the function takes variable number of arguments, make a frame index for 3029 // the start of the first vararg value... for expansion of llvm.va_start. 3030 if (isVarArg) { 3031 int Depth = ArgOffset; 3032 3033 FuncInfo->setVarArgsFrameIndex( 3034 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3035 Depth, true)); 3036 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3037 3038 // If this function is vararg, store any remaining integer argument regs 3039 // to their spots on the stack so that they may be loaded by deferencing the 3040 // result of va_next. 3041 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3042 unsigned VReg; 3043 3044 if (isPPC64) 3045 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3046 else 3047 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3048 3049 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3050 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3051 MachinePointerInfo(), false, false, 0); 3052 MemOps.push_back(Store); 3053 // Increment the address by four for the next argument to store 3054 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 3055 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3056 } 3057 } 3058 3059 if (!MemOps.empty()) 3060 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3061 3062 return Chain; 3063 } 3064 3065 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3066 /// adjusted to accommodate the arguments for the tailcall. 3067 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3068 unsigned ParamSize) { 3069 3070 if (!isTailCall) return 0; 3071 3072 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3073 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3074 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3075 // Remember only if the new adjustement is bigger. 3076 if (SPDiff < FI->getTailCallSPDelta()) 3077 FI->setTailCallSPDelta(SPDiff); 3078 3079 return SPDiff; 3080 } 3081 3082 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 3083 /// for tail call optimization. Targets which want to do tail call 3084 /// optimization should implement this function. 3085 bool 3086 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 3087 CallingConv::ID CalleeCC, 3088 bool isVarArg, 3089 const SmallVectorImpl<ISD::InputArg> &Ins, 3090 SelectionDAG& DAG) const { 3091 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 3092 return false; 3093 3094 // Variable argument functions are not supported. 3095 if (isVarArg) 3096 return false; 3097 3098 MachineFunction &MF = DAG.getMachineFunction(); 3099 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3100 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 3101 // Functions containing by val parameters are not supported. 3102 for (unsigned i = 0; i != Ins.size(); i++) { 3103 ISD::ArgFlagsTy Flags = Ins[i].Flags; 3104 if (Flags.isByVal()) return false; 3105 } 3106 3107 // Non-PIC/GOT tail calls are supported. 3108 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 3109 return true; 3110 3111 // At the moment we can only do local tail calls (in same module, hidden 3112 // or protected) if we are generating PIC. 3113 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3114 return G->getGlobal()->hasHiddenVisibility() 3115 || G->getGlobal()->hasProtectedVisibility(); 3116 } 3117 3118 return false; 3119 } 3120 3121 /// isCallCompatibleAddress - Return the immediate to use if the specified 3122 /// 32-bit value is representable in the immediate field of a BxA instruction. 3123 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 3124 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 3125 if (!C) return nullptr; 3126 3127 int Addr = C->getZExtValue(); 3128 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 3129 SignExtend32<26>(Addr) != Addr) 3130 return nullptr; // Top 6 bits have to be sext of immediate. 3131 3132 return DAG.getConstant((int)C->getZExtValue() >> 2, 3133 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 3134 } 3135 3136 namespace { 3137 3138 struct TailCallArgumentInfo { 3139 SDValue Arg; 3140 SDValue FrameIdxOp; 3141 int FrameIdx; 3142 3143 TailCallArgumentInfo() : FrameIdx(0) {} 3144 }; 3145 3146 } 3147 3148 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 3149 static void 3150 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 3151 SDValue Chain, 3152 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 3153 SmallVectorImpl<SDValue> &MemOpChains, 3154 SDLoc dl) { 3155 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 3156 SDValue Arg = TailCallArgs[i].Arg; 3157 SDValue FIN = TailCallArgs[i].FrameIdxOp; 3158 int FI = TailCallArgs[i].FrameIdx; 3159 // Store relative to framepointer. 3160 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 3161 MachinePointerInfo::getFixedStack(FI), 3162 false, false, 0)); 3163 } 3164 } 3165 3166 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 3167 /// the appropriate stack slot for the tail call optimized function call. 3168 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 3169 MachineFunction &MF, 3170 SDValue Chain, 3171 SDValue OldRetAddr, 3172 SDValue OldFP, 3173 int SPDiff, 3174 bool isPPC64, 3175 bool isDarwinABI, 3176 SDLoc dl) { 3177 if (SPDiff) { 3178 // Calculate the new stack slot for the return address. 3179 int SlotSize = isPPC64 ? 8 : 4; 3180 int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64, 3181 isDarwinABI); 3182 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3183 NewRetAddrLoc, true); 3184 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3185 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 3186 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 3187 MachinePointerInfo::getFixedStack(NewRetAddr), 3188 false, false, 0); 3189 3190 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 3191 // slot as the FP is never overwritten. 3192 if (isDarwinABI) { 3193 int NewFPLoc = 3194 SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI); 3195 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3196 true); 3197 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3198 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 3199 MachinePointerInfo::getFixedStack(NewFPIdx), 3200 false, false, 0); 3201 } 3202 } 3203 return Chain; 3204 } 3205 3206 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3207 /// the position of the argument. 3208 static void 3209 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3210 SDValue Arg, int SPDiff, unsigned ArgOffset, 3211 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3212 int Offset = ArgOffset + SPDiff; 3213 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3214 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3215 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3216 SDValue FIN = DAG.getFrameIndex(FI, VT); 3217 TailCallArgumentInfo Info; 3218 Info.Arg = Arg; 3219 Info.FrameIdxOp = FIN; 3220 Info.FrameIdx = FI; 3221 TailCallArguments.push_back(Info); 3222 } 3223 3224 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3225 /// stack slot. Returns the chain as result and the loaded frame pointers in 3226 /// LROpOut/FPOpout. Used when tail calling. 3227 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3228 int SPDiff, 3229 SDValue Chain, 3230 SDValue &LROpOut, 3231 SDValue &FPOpOut, 3232 bool isDarwinABI, 3233 SDLoc dl) const { 3234 if (SPDiff) { 3235 // Load the LR and FP stack slot for later adjusting. 3236 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 3237 LROpOut = getReturnAddrFrameIndex(DAG); 3238 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3239 false, false, false, 0); 3240 Chain = SDValue(LROpOut.getNode(), 1); 3241 3242 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3243 // slot as the FP is never overwritten. 3244 if (isDarwinABI) { 3245 FPOpOut = getFramePointerFrameIndex(DAG); 3246 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 3247 false, false, false, 0); 3248 Chain = SDValue(FPOpOut.getNode(), 1); 3249 } 3250 } 3251 return Chain; 3252 } 3253 3254 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 3255 /// by "Src" to address "Dst" of size "Size". Alignment information is 3256 /// specified by the specific parameter attribute. The copy will be passed as 3257 /// a byval function parameter. 3258 /// Sometimes what we are copying is the end of a larger object, the part that 3259 /// does not fit in registers. 3260 static SDValue 3261 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 3262 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 3263 SDLoc dl) { 3264 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 3265 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 3266 false, false, MachinePointerInfo(), 3267 MachinePointerInfo()); 3268 } 3269 3270 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 3271 /// tail calls. 3272 static void 3273 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 3274 SDValue Arg, SDValue PtrOff, int SPDiff, 3275 unsigned ArgOffset, bool isPPC64, bool isTailCall, 3276 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 3277 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 3278 SDLoc dl) { 3279 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3280 if (!isTailCall) { 3281 if (isVector) { 3282 SDValue StackPtr; 3283 if (isPPC64) 3284 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3285 else 3286 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3287 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3288 DAG.getConstant(ArgOffset, PtrVT)); 3289 } 3290 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3291 MachinePointerInfo(), false, false, 0)); 3292 // Calculate and remember argument location. 3293 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 3294 TailCallArguments); 3295 } 3296 3297 static 3298 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 3299 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 3300 SDValue LROp, SDValue FPOp, bool isDarwinABI, 3301 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 3302 MachineFunction &MF = DAG.getMachineFunction(); 3303 3304 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 3305 // might overwrite each other in case of tail call optimization. 3306 SmallVector<SDValue, 8> MemOpChains2; 3307 // Do not flag preceding copytoreg stuff together with the following stuff. 3308 InFlag = SDValue(); 3309 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 3310 MemOpChains2, dl); 3311 if (!MemOpChains2.empty()) 3312 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 3313 3314 // Store the return address to the appropriate stack slot. 3315 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 3316 isPPC64, isDarwinABI, dl); 3317 3318 // Emit callseq_end just before tailcall node. 3319 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3320 DAG.getIntPtrConstant(0, true), InFlag, dl); 3321 InFlag = Chain.getValue(1); 3322 } 3323 3324 static 3325 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 3326 SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall, 3327 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 3328 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 3329 const PPCSubtarget &Subtarget) { 3330 3331 bool isPPC64 = Subtarget.isPPC64(); 3332 bool isSVR4ABI = Subtarget.isSVR4ABI(); 3333 3334 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3335 NodeTys.push_back(MVT::Other); // Returns a chain 3336 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 3337 3338 unsigned CallOpc = PPCISD::CALL; 3339 3340 bool needIndirectCall = true; 3341 if (!isSVR4ABI || !isPPC64) 3342 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 3343 // If this is an absolute destination address, use the munged value. 3344 Callee = SDValue(Dest, 0); 3345 needIndirectCall = false; 3346 } 3347 3348 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3349 // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201 3350 // Use indirect calls for ALL functions calls in JIT mode, since the 3351 // far-call stubs may be outside relocation limits for a BL instruction. 3352 if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) { 3353 unsigned OpFlags = 0; 3354 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3355 (Subtarget.getTargetTriple().isMacOSX() && 3356 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 3357 (G->getGlobal()->isDeclaration() || 3358 G->getGlobal()->isWeakForLinker())) { 3359 // PC-relative references to external symbols should go through $stub, 3360 // unless we're building with the leopard linker or later, which 3361 // automatically synthesizes these stubs. 3362 OpFlags = PPCII::MO_DARWIN_STUB; 3363 } 3364 3365 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 3366 // every direct call is) turn it into a TargetGlobalAddress / 3367 // TargetExternalSymbol node so that legalize doesn't hack it. 3368 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 3369 Callee.getValueType(), 3370 0, OpFlags); 3371 needIndirectCall = false; 3372 } 3373 } 3374 3375 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 3376 unsigned char OpFlags = 0; 3377 3378 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3379 (Subtarget.getTargetTriple().isMacOSX() && 3380 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) { 3381 // PC-relative references to external symbols should go through $stub, 3382 // unless we're building with the leopard linker or later, which 3383 // automatically synthesizes these stubs. 3384 OpFlags = PPCII::MO_DARWIN_STUB; 3385 } 3386 3387 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 3388 OpFlags); 3389 needIndirectCall = false; 3390 } 3391 3392 if (needIndirectCall) { 3393 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 3394 // to do the call, we can't use PPCISD::CALL. 3395 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 3396 3397 if (isSVR4ABI && isPPC64) { 3398 // Function pointers in the 64-bit SVR4 ABI do not point to the function 3399 // entry point, but to the function descriptor (the function entry point 3400 // address is part of the function descriptor though). 3401 // The function descriptor is a three doubleword structure with the 3402 // following fields: function entry point, TOC base address and 3403 // environment pointer. 3404 // Thus for a call through a function pointer, the following actions need 3405 // to be performed: 3406 // 1. Save the TOC of the caller in the TOC save area of its stack 3407 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 3408 // 2. Load the address of the function entry point from the function 3409 // descriptor. 3410 // 3. Load the TOC of the callee from the function descriptor into r2. 3411 // 4. Load the environment pointer from the function descriptor into 3412 // r11. 3413 // 5. Branch to the function entry point address. 3414 // 6. On return of the callee, the TOC of the caller needs to be 3415 // restored (this is done in FinishCall()). 3416 // 3417 // All those operations are flagged together to ensure that no other 3418 // operations can be scheduled in between. E.g. without flagging the 3419 // operations together, a TOC access in the caller could be scheduled 3420 // between the load of the callee TOC and the branch to the callee, which 3421 // results in the TOC access going through the TOC of the callee instead 3422 // of going through the TOC of the caller, which leads to incorrect code. 3423 3424 // Load the address of the function entry point from the function 3425 // descriptor. 3426 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue); 3427 SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, 3428 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 3429 Chain = LoadFuncPtr.getValue(1); 3430 InFlag = LoadFuncPtr.getValue(2); 3431 3432 // Load environment pointer into r11. 3433 // Offset of the environment pointer within the function descriptor. 3434 SDValue PtrOff = DAG.getIntPtrConstant(16); 3435 3436 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 3437 SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, 3438 InFlag); 3439 Chain = LoadEnvPtr.getValue(1); 3440 InFlag = LoadEnvPtr.getValue(2); 3441 3442 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 3443 InFlag); 3444 Chain = EnvVal.getValue(0); 3445 InFlag = EnvVal.getValue(1); 3446 3447 // Load TOC of the callee into r2. We are using a target-specific load 3448 // with r2 hard coded, because the result of a target-independent load 3449 // would never go directly into r2, since r2 is a reserved register (which 3450 // prevents the register allocator from allocating it), resulting in an 3451 // additional register being allocated and an unnecessary move instruction 3452 // being generated. 3453 VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3454 SDValue TOCOff = DAG.getIntPtrConstant(8); 3455 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 3456 SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, 3457 AddTOC, InFlag); 3458 Chain = LoadTOCPtr.getValue(0); 3459 InFlag = LoadTOCPtr.getValue(1); 3460 3461 MTCTROps[0] = Chain; 3462 MTCTROps[1] = LoadFuncPtr; 3463 MTCTROps[2] = InFlag; 3464 } 3465 3466 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 3467 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 3468 InFlag = Chain.getValue(1); 3469 3470 NodeTys.clear(); 3471 NodeTys.push_back(MVT::Other); 3472 NodeTys.push_back(MVT::Glue); 3473 Ops.push_back(Chain); 3474 CallOpc = PPCISD::BCTRL; 3475 Callee.setNode(nullptr); 3476 // Add use of X11 (holding environment pointer) 3477 if (isSVR4ABI && isPPC64) 3478 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 3479 // Add CTR register as callee so a bctr can be emitted later. 3480 if (isTailCall) 3481 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 3482 } 3483 3484 // If this is a direct call, pass the chain and the callee. 3485 if (Callee.getNode()) { 3486 Ops.push_back(Chain); 3487 Ops.push_back(Callee); 3488 } 3489 // If this is a tail call add stack pointer delta. 3490 if (isTailCall) 3491 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 3492 3493 // Add argument registers to the end of the list so that they are known live 3494 // into the call. 3495 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 3496 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 3497 RegsToPass[i].second.getValueType())); 3498 3499 return CallOpc; 3500 } 3501 3502 static 3503 bool isLocalCall(const SDValue &Callee) 3504 { 3505 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3506 return !G->getGlobal()->isDeclaration() && 3507 !G->getGlobal()->isWeakForLinker(); 3508 return false; 3509 } 3510 3511 SDValue 3512 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 3513 CallingConv::ID CallConv, bool isVarArg, 3514 const SmallVectorImpl<ISD::InputArg> &Ins, 3515 SDLoc dl, SelectionDAG &DAG, 3516 SmallVectorImpl<SDValue> &InVals) const { 3517 3518 SmallVector<CCValAssign, 16> RVLocs; 3519 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3520 getTargetMachine(), RVLocs, *DAG.getContext()); 3521 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 3522 3523 // Copy all of the result registers out of their specified physreg. 3524 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 3525 CCValAssign &VA = RVLocs[i]; 3526 assert(VA.isRegLoc() && "Can only return in registers!"); 3527 3528 SDValue Val = DAG.getCopyFromReg(Chain, dl, 3529 VA.getLocReg(), VA.getLocVT(), InFlag); 3530 Chain = Val.getValue(1); 3531 InFlag = Val.getValue(2); 3532 3533 switch (VA.getLocInfo()) { 3534 default: llvm_unreachable("Unknown loc info!"); 3535 case CCValAssign::Full: break; 3536 case CCValAssign::AExt: 3537 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3538 break; 3539 case CCValAssign::ZExt: 3540 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 3541 DAG.getValueType(VA.getValVT())); 3542 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3543 break; 3544 case CCValAssign::SExt: 3545 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 3546 DAG.getValueType(VA.getValVT())); 3547 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3548 break; 3549 } 3550 3551 InVals.push_back(Val); 3552 } 3553 3554 return Chain; 3555 } 3556 3557 SDValue 3558 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 3559 bool isTailCall, bool isVarArg, 3560 SelectionDAG &DAG, 3561 SmallVector<std::pair<unsigned, SDValue>, 8> 3562 &RegsToPass, 3563 SDValue InFlag, SDValue Chain, 3564 SDValue &Callee, 3565 int SPDiff, unsigned NumBytes, 3566 const SmallVectorImpl<ISD::InputArg> &Ins, 3567 SmallVectorImpl<SDValue> &InVals) const { 3568 std::vector<EVT> NodeTys; 3569 SmallVector<SDValue, 8> Ops; 3570 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 3571 isTailCall, RegsToPass, Ops, NodeTys, 3572 Subtarget); 3573 3574 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 3575 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 3576 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 3577 3578 // When performing tail call optimization the callee pops its arguments off 3579 // the stack. Account for this here so these bytes can be pushed back on in 3580 // PPCFrameLowering::eliminateCallFramePseudoInstr. 3581 int BytesCalleePops = 3582 (CallConv == CallingConv::Fast && 3583 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 3584 3585 // Add a register mask operand representing the call-preserved registers. 3586 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 3587 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 3588 assert(Mask && "Missing call preserved mask for calling convention"); 3589 Ops.push_back(DAG.getRegisterMask(Mask)); 3590 3591 if (InFlag.getNode()) 3592 Ops.push_back(InFlag); 3593 3594 // Emit tail call. 3595 if (isTailCall) { 3596 assert(((Callee.getOpcode() == ISD::Register && 3597 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 3598 Callee.getOpcode() == ISD::TargetExternalSymbol || 3599 Callee.getOpcode() == ISD::TargetGlobalAddress || 3600 isa<ConstantSDNode>(Callee)) && 3601 "Expecting an global address, external symbol, absolute value or register"); 3602 3603 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 3604 } 3605 3606 // Add a NOP immediately after the branch instruction when using the 64-bit 3607 // SVR4 ABI. At link time, if caller and callee are in a different module and 3608 // thus have a different TOC, the call will be replaced with a call to a stub 3609 // function which saves the current TOC, loads the TOC of the callee and 3610 // branches to the callee. The NOP will be replaced with a load instruction 3611 // which restores the TOC of the caller from the TOC save slot of the current 3612 // stack frame. If caller and callee belong to the same module (and have the 3613 // same TOC), the NOP will remain unchanged. 3614 3615 bool needsTOCRestore = false; 3616 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64()) { 3617 if (CallOpc == PPCISD::BCTRL) { 3618 // This is a call through a function pointer. 3619 // Restore the caller TOC from the save area into R2. 3620 // See PrepareCall() for more information about calls through function 3621 // pointers in the 64-bit SVR4 ABI. 3622 // We are using a target-specific load with r2 hard coded, because the 3623 // result of a target-independent load would never go directly into r2, 3624 // since r2 is a reserved register (which prevents the register allocator 3625 // from allocating it), resulting in an additional register being 3626 // allocated and an unnecessary move instruction being generated. 3627 needsTOCRestore = true; 3628 } else if ((CallOpc == PPCISD::CALL) && 3629 (!isLocalCall(Callee) || 3630 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 3631 // Otherwise insert NOP for non-local calls. 3632 CallOpc = PPCISD::CALL_NOP; 3633 } 3634 } 3635 3636 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 3637 InFlag = Chain.getValue(1); 3638 3639 if (needsTOCRestore) { 3640 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3641 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3642 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 3643 unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(); 3644 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset); 3645 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 3646 Chain = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, AddTOC, InFlag); 3647 InFlag = Chain.getValue(1); 3648 } 3649 3650 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3651 DAG.getIntPtrConstant(BytesCalleePops, true), 3652 InFlag, dl); 3653 if (!Ins.empty()) 3654 InFlag = Chain.getValue(1); 3655 3656 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 3657 Ins, dl, DAG, InVals); 3658 } 3659 3660 SDValue 3661 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 3662 SmallVectorImpl<SDValue> &InVals) const { 3663 SelectionDAG &DAG = CLI.DAG; 3664 SDLoc &dl = CLI.DL; 3665 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 3666 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 3667 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 3668 SDValue Chain = CLI.Chain; 3669 SDValue Callee = CLI.Callee; 3670 bool &isTailCall = CLI.IsTailCall; 3671 CallingConv::ID CallConv = CLI.CallConv; 3672 bool isVarArg = CLI.IsVarArg; 3673 3674 if (isTailCall) 3675 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 3676 Ins, DAG); 3677 3678 if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall()) 3679 report_fatal_error("failed to perform tail call elimination on a call " 3680 "site marked musttail"); 3681 3682 if (Subtarget.isSVR4ABI()) { 3683 if (Subtarget.isPPC64()) 3684 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 3685 isTailCall, Outs, OutVals, Ins, 3686 dl, DAG, InVals); 3687 else 3688 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 3689 isTailCall, Outs, OutVals, Ins, 3690 dl, DAG, InVals); 3691 } 3692 3693 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 3694 isTailCall, Outs, OutVals, Ins, 3695 dl, DAG, InVals); 3696 } 3697 3698 SDValue 3699 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 3700 CallingConv::ID CallConv, bool isVarArg, 3701 bool isTailCall, 3702 const SmallVectorImpl<ISD::OutputArg> &Outs, 3703 const SmallVectorImpl<SDValue> &OutVals, 3704 const SmallVectorImpl<ISD::InputArg> &Ins, 3705 SDLoc dl, SelectionDAG &DAG, 3706 SmallVectorImpl<SDValue> &InVals) const { 3707 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 3708 // of the 32-bit SVR4 ABI stack frame layout. 3709 3710 assert((CallConv == CallingConv::C || 3711 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 3712 3713 unsigned PtrByteSize = 4; 3714 3715 MachineFunction &MF = DAG.getMachineFunction(); 3716 3717 // Mark this function as potentially containing a function that contains a 3718 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3719 // and restoring the callers stack pointer in this functions epilog. This is 3720 // done because by tail calling the called function might overwrite the value 3721 // in this function's (MF) stack pointer stack slot 0(SP). 3722 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3723 CallConv == CallingConv::Fast) 3724 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3725 3726 // Count how many bytes are to be pushed on the stack, including the linkage 3727 // area, parameter list area and the part of the local variable space which 3728 // contains copies of aggregates which are passed by value. 3729 3730 // Assign locations to all of the outgoing arguments. 3731 SmallVector<CCValAssign, 16> ArgLocs; 3732 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3733 getTargetMachine(), ArgLocs, *DAG.getContext()); 3734 3735 // Reserve space for the linkage area on the stack. 3736 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 3737 3738 if (isVarArg) { 3739 // Handle fixed and variable vector arguments differently. 3740 // Fixed vector arguments go into registers as long as registers are 3741 // available. Variable vector arguments always go into memory. 3742 unsigned NumArgs = Outs.size(); 3743 3744 for (unsigned i = 0; i != NumArgs; ++i) { 3745 MVT ArgVT = Outs[i].VT; 3746 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3747 bool Result; 3748 3749 if (Outs[i].IsFixed) { 3750 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 3751 CCInfo); 3752 } else { 3753 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 3754 ArgFlags, CCInfo); 3755 } 3756 3757 if (Result) { 3758 #ifndef NDEBUG 3759 errs() << "Call operand #" << i << " has unhandled type " 3760 << EVT(ArgVT).getEVTString() << "\n"; 3761 #endif 3762 llvm_unreachable(nullptr); 3763 } 3764 } 3765 } else { 3766 // All arguments are treated the same. 3767 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 3768 } 3769 3770 // Assign locations to all of the outgoing aggregate by value arguments. 3771 SmallVector<CCValAssign, 16> ByValArgLocs; 3772 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3773 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 3774 3775 // Reserve stack space for the allocations in CCInfo. 3776 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3777 3778 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 3779 3780 // Size of the linkage area, parameter list area and the part of the local 3781 // space variable where copies of aggregates which are passed by value are 3782 // stored. 3783 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 3784 3785 // Calculate by how many bytes the stack has to be adjusted in case of tail 3786 // call optimization. 3787 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3788 3789 // Adjust the stack pointer for the new arguments... 3790 // These operations are automatically eliminated by the prolog/epilog pass 3791 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 3792 dl); 3793 SDValue CallSeqStart = Chain; 3794 3795 // Load the return address and frame pointer so it can be moved somewhere else 3796 // later. 3797 SDValue LROp, FPOp; 3798 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 3799 dl); 3800 3801 // Set up a copy of the stack pointer for use loading and storing any 3802 // arguments that may not fit in the registers available for argument 3803 // passing. 3804 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3805 3806 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3807 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3808 SmallVector<SDValue, 8> MemOpChains; 3809 3810 bool seenFloatArg = false; 3811 // Walk the register/memloc assignments, inserting copies/loads. 3812 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 3813 i != e; 3814 ++i) { 3815 CCValAssign &VA = ArgLocs[i]; 3816 SDValue Arg = OutVals[i]; 3817 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3818 3819 if (Flags.isByVal()) { 3820 // Argument is an aggregate which is passed by value, thus we need to 3821 // create a copy of it in the local variable space of the current stack 3822 // frame (which is the stack frame of the caller) and pass the address of 3823 // this copy to the callee. 3824 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 3825 CCValAssign &ByValVA = ByValArgLocs[j++]; 3826 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 3827 3828 // Memory reserved in the local variable space of the callers stack frame. 3829 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 3830 3831 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3832 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3833 3834 // Create a copy of the argument in the local area of the current 3835 // stack frame. 3836 SDValue MemcpyCall = 3837 CreateCopyOfByValArgument(Arg, PtrOff, 3838 CallSeqStart.getNode()->getOperand(0), 3839 Flags, DAG, dl); 3840 3841 // This must go outside the CALLSEQ_START..END. 3842 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3843 CallSeqStart.getNode()->getOperand(1), 3844 SDLoc(MemcpyCall)); 3845 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3846 NewCallSeqStart.getNode()); 3847 Chain = CallSeqStart = NewCallSeqStart; 3848 3849 // Pass the address of the aggregate copy on the stack either in a 3850 // physical register or in the parameter list area of the current stack 3851 // frame to the callee. 3852 Arg = PtrOff; 3853 } 3854 3855 if (VA.isRegLoc()) { 3856 if (Arg.getValueType() == MVT::i1) 3857 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 3858 3859 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 3860 // Put argument in a physical register. 3861 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 3862 } else { 3863 // Put argument in the parameter list area of the current stack frame. 3864 assert(VA.isMemLoc()); 3865 unsigned LocMemOffset = VA.getLocMemOffset(); 3866 3867 if (!isTailCall) { 3868 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3869 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3870 3871 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3872 MachinePointerInfo(), 3873 false, false, 0)); 3874 } else { 3875 // Calculate and remember argument location. 3876 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 3877 TailCallArguments); 3878 } 3879 } 3880 } 3881 3882 if (!MemOpChains.empty()) 3883 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 3884 3885 // Build a sequence of copy-to-reg nodes chained together with token chain 3886 // and flag operands which copy the outgoing args into the appropriate regs. 3887 SDValue InFlag; 3888 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3889 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3890 RegsToPass[i].second, InFlag); 3891 InFlag = Chain.getValue(1); 3892 } 3893 3894 // Set CR bit 6 to true if this is a vararg call with floating args passed in 3895 // registers. 3896 if (isVarArg) { 3897 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3898 SDValue Ops[] = { Chain, InFlag }; 3899 3900 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 3901 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 3902 3903 InFlag = Chain.getValue(1); 3904 } 3905 3906 if (isTailCall) 3907 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 3908 false, TailCallArguments); 3909 3910 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3911 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3912 Ins, InVals); 3913 } 3914 3915 // Copy an argument into memory, being careful to do this outside the 3916 // call sequence for the call to which the argument belongs. 3917 SDValue 3918 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 3919 SDValue CallSeqStart, 3920 ISD::ArgFlagsTy Flags, 3921 SelectionDAG &DAG, 3922 SDLoc dl) const { 3923 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 3924 CallSeqStart.getNode()->getOperand(0), 3925 Flags, DAG, dl); 3926 // The MEMCPY must go outside the CALLSEQ_START..END. 3927 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3928 CallSeqStart.getNode()->getOperand(1), 3929 SDLoc(MemcpyCall)); 3930 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3931 NewCallSeqStart.getNode()); 3932 return NewCallSeqStart; 3933 } 3934 3935 SDValue 3936 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 3937 CallingConv::ID CallConv, bool isVarArg, 3938 bool isTailCall, 3939 const SmallVectorImpl<ISD::OutputArg> &Outs, 3940 const SmallVectorImpl<SDValue> &OutVals, 3941 const SmallVectorImpl<ISD::InputArg> &Ins, 3942 SDLoc dl, SelectionDAG &DAG, 3943 SmallVectorImpl<SDValue> &InVals) const { 3944 3945 bool isLittleEndian = Subtarget.isLittleEndian(); 3946 unsigned NumOps = Outs.size(); 3947 3948 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3949 unsigned PtrByteSize = 8; 3950 3951 MachineFunction &MF = DAG.getMachineFunction(); 3952 3953 // Mark this function as potentially containing a function that contains a 3954 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3955 // and restoring the callers stack pointer in this functions epilog. This is 3956 // done because by tail calling the called function might overwrite the value 3957 // in this function's (MF) stack pointer stack slot 0(SP). 3958 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3959 CallConv == CallingConv::Fast) 3960 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3961 3962 // Count how many bytes are to be pushed on the stack, including the linkage 3963 // area, and parameter passing area. We start with at least 48 bytes, which 3964 // is reserved space for [SP][CR][LR][3 x unused]. 3965 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false); 3966 unsigned NumBytes = LinkageSize; 3967 3968 // Add up all the space actually used. 3969 for (unsigned i = 0; i != NumOps; ++i) { 3970 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3971 EVT ArgVT = Outs[i].VT; 3972 3973 // Altivec parameters are padded to a 16 byte boundary. 3974 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3975 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3976 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) 3977 NumBytes = ((NumBytes+15)/16)*16; 3978 3979 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3980 } 3981 3982 // The prolog code of the callee may store up to 8 GPR argument registers to 3983 // the stack, allowing va_start to index over them in memory if its varargs. 3984 // Because we cannot tell if this is needed on the caller side, we have to 3985 // conservatively assume that it is needed. As such, make sure we have at 3986 // least enough stack space for the caller to store the 8 GPRs. 3987 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 3988 3989 // Tail call needs the stack to be aligned. 3990 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3991 CallConv == CallingConv::Fast) 3992 NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes); 3993 3994 // Calculate by how many bytes the stack has to be adjusted in case of tail 3995 // call optimization. 3996 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3997 3998 // To protect arguments on the stack from being clobbered in a tail call, 3999 // force all the loads to happen before doing any other lowering. 4000 if (isTailCall) 4001 Chain = DAG.getStackArgumentTokenFactor(Chain); 4002 4003 // Adjust the stack pointer for the new arguments... 4004 // These operations are automatically eliminated by the prolog/epilog pass 4005 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 4006 dl); 4007 SDValue CallSeqStart = Chain; 4008 4009 // Load the return address and frame pointer so it can be move somewhere else 4010 // later. 4011 SDValue LROp, FPOp; 4012 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4013 dl); 4014 4015 // Set up a copy of the stack pointer for use loading and storing any 4016 // arguments that may not fit in the registers available for argument 4017 // passing. 4018 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4019 4020 // Figure out which arguments are going to go in registers, and which in 4021 // memory. Also, if this is a vararg function, floating point operations 4022 // must be stored to our stack, and loaded into integer regs as well, if 4023 // any integer regs are available for argument passing. 4024 unsigned ArgOffset = LinkageSize; 4025 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4026 4027 static const MCPhysReg GPR[] = { 4028 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4029 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4030 }; 4031 static const MCPhysReg *FPR = GetFPR(); 4032 4033 static const MCPhysReg VR[] = { 4034 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4035 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4036 }; 4037 static const MCPhysReg VSRH[] = { 4038 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 4039 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 4040 }; 4041 4042 const unsigned NumGPRs = array_lengthof(GPR); 4043 const unsigned NumFPRs = 13; 4044 const unsigned NumVRs = array_lengthof(VR); 4045 4046 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4047 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4048 4049 SmallVector<SDValue, 8> MemOpChains; 4050 for (unsigned i = 0; i != NumOps; ++i) { 4051 SDValue Arg = OutVals[i]; 4052 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4053 4054 // PtrOff will be used to store the current argument to the stack if a 4055 // register cannot be found for it. 4056 SDValue PtrOff; 4057 4058 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 4059 4060 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4061 4062 // Promote integers to 64-bit values. 4063 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 4064 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4065 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4066 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4067 } 4068 4069 // FIXME memcpy is used way more than necessary. Correctness first. 4070 // Note: "by value" is code for passing a structure by value, not 4071 // basic types. 4072 if (Flags.isByVal()) { 4073 // Note: Size includes alignment padding, so 4074 // struct x { short a; char b; } 4075 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 4076 // These are the proper values we need for right-justifying the 4077 // aggregate in a parameter register. 4078 unsigned Size = Flags.getByValSize(); 4079 4080 // An empty aggregate parameter takes up no storage and no 4081 // registers. 4082 if (Size == 0) 4083 continue; 4084 4085 unsigned BVAlign = Flags.getByValAlign(); 4086 if (BVAlign > 8) { 4087 if (BVAlign % PtrByteSize != 0) 4088 llvm_unreachable( 4089 "ByVal alignment is not a multiple of the pointer size"); 4090 4091 ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign; 4092 } 4093 4094 // All aggregates smaller than 8 bytes must be passed right-justified. 4095 if (Size==1 || Size==2 || Size==4) { 4096 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 4097 if (GPR_idx != NumGPRs) { 4098 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4099 MachinePointerInfo(), VT, 4100 false, false, 0); 4101 MemOpChains.push_back(Load.getValue(1)); 4102 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4103 4104 ArgOffset += PtrByteSize; 4105 continue; 4106 } 4107 } 4108 4109 if (GPR_idx == NumGPRs && Size < 8) { 4110 SDValue AddPtr = PtrOff; 4111 if (!isLittleEndian) { 4112 SDValue Const = DAG.getConstant(PtrByteSize - Size, 4113 PtrOff.getValueType()); 4114 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4115 } 4116 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4117 CallSeqStart, 4118 Flags, DAG, dl); 4119 ArgOffset += PtrByteSize; 4120 continue; 4121 } 4122 // Copy entire object into memory. There are cases where gcc-generated 4123 // code assumes it is there, even if it could be put entirely into 4124 // registers. (This is not what the doc says.) 4125 4126 // FIXME: The above statement is likely due to a misunderstanding of the 4127 // documents. All arguments must be copied into the parameter area BY 4128 // THE CALLEE in the event that the callee takes the address of any 4129 // formal argument. That has not yet been implemented. However, it is 4130 // reasonable to use the stack area as a staging area for the register 4131 // load. 4132 4133 // Skip this for small aggregates, as we will use the same slot for a 4134 // right-justified copy, below. 4135 if (Size >= 8) 4136 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4137 CallSeqStart, 4138 Flags, DAG, dl); 4139 4140 // When a register is available, pass a small aggregate right-justified. 4141 if (Size < 8 && GPR_idx != NumGPRs) { 4142 // The easiest way to get this right-justified in a register 4143 // is to copy the structure into the rightmost portion of a 4144 // local variable slot, then load the whole slot into the 4145 // register. 4146 // FIXME: The memcpy seems to produce pretty awful code for 4147 // small aggregates, particularly for packed ones. 4148 // FIXME: It would be preferable to use the slot in the 4149 // parameter save area instead of a new local variable. 4150 SDValue AddPtr = PtrOff; 4151 if (!isLittleEndian) { 4152 SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType()); 4153 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4154 } 4155 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4156 CallSeqStart, 4157 Flags, DAG, dl); 4158 4159 // Load the slot into the register. 4160 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 4161 MachinePointerInfo(), 4162 false, false, false, 0); 4163 MemOpChains.push_back(Load.getValue(1)); 4164 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4165 4166 // Done with this argument. 4167 ArgOffset += PtrByteSize; 4168 continue; 4169 } 4170 4171 // For aggregates larger than PtrByteSize, copy the pieces of the 4172 // object that fit into registers from the parameter save area. 4173 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4174 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 4175 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4176 if (GPR_idx != NumGPRs) { 4177 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4178 MachinePointerInfo(), 4179 false, false, false, 0); 4180 MemOpChains.push_back(Load.getValue(1)); 4181 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4182 ArgOffset += PtrByteSize; 4183 } else { 4184 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4185 break; 4186 } 4187 } 4188 continue; 4189 } 4190 4191 switch (Arg.getSimpleValueType().SimpleTy) { 4192 default: llvm_unreachable("Unexpected ValueType for argument!"); 4193 case MVT::i1: 4194 case MVT::i32: 4195 case MVT::i64: 4196 if (GPR_idx != NumGPRs) { 4197 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4198 } else { 4199 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4200 true, isTailCall, false, MemOpChains, 4201 TailCallArguments, dl); 4202 } 4203 ArgOffset += PtrByteSize; 4204 break; 4205 case MVT::f32: 4206 case MVT::f64: 4207 if (FPR_idx != NumFPRs) { 4208 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4209 4210 if (isVarArg) { 4211 // A single float or an aggregate containing only a single float 4212 // must be passed right-justified in the stack doubleword, and 4213 // in the GPR, if one is available. 4214 SDValue StoreOff; 4215 if (Arg.getSimpleValueType().SimpleTy == MVT::f32 && 4216 !isLittleEndian) { 4217 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4218 StoreOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4219 } else 4220 StoreOff = PtrOff; 4221 4222 SDValue Store = DAG.getStore(Chain, dl, Arg, StoreOff, 4223 MachinePointerInfo(), false, false, 0); 4224 MemOpChains.push_back(Store); 4225 4226 // Float varargs are always shadowed in available integer registers 4227 if (GPR_idx != NumGPRs) { 4228 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4229 MachinePointerInfo(), false, false, 4230 false, 0); 4231 MemOpChains.push_back(Load.getValue(1)); 4232 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4233 } 4234 } else if (GPR_idx != NumGPRs) 4235 // If we have any FPRs remaining, we may also have GPRs remaining. 4236 ++GPR_idx; 4237 } else { 4238 // Single-precision floating-point values are mapped to the 4239 // second (rightmost) word of the stack doubleword. 4240 if (Arg.getValueType() == MVT::f32 && !isLittleEndian) { 4241 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4242 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4243 } 4244 4245 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4246 true, isTailCall, false, MemOpChains, 4247 TailCallArguments, dl); 4248 } 4249 ArgOffset += 8; 4250 break; 4251 case MVT::v4f32: 4252 case MVT::v4i32: 4253 case MVT::v8i16: 4254 case MVT::v16i8: 4255 case MVT::v2f64: 4256 case MVT::v2i64: 4257 // Vectors are aligned to a 16-byte boundary in the argument save area. 4258 while (ArgOffset % 16 !=0) { 4259 ArgOffset += PtrByteSize; 4260 if (GPR_idx != NumGPRs) 4261 GPR_idx++; 4262 } 4263 4264 // For a varargs call, named arguments go into VRs or on the stack as 4265 // usual; unnamed arguments always go to the stack or the corresponding 4266 // GPRs when within range. For now, we always put the value in both 4267 // locations (or even all three). 4268 if (isVarArg) { 4269 // We could elide this store in the case where the object fits 4270 // entirely in R registers. Maybe later. 4271 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4272 DAG.getConstant(ArgOffset, PtrVT)); 4273 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4274 MachinePointerInfo(), false, false, 0); 4275 MemOpChains.push_back(Store); 4276 if (VR_idx != NumVRs) { 4277 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4278 MachinePointerInfo(), 4279 false, false, false, 0); 4280 MemOpChains.push_back(Load.getValue(1)); 4281 4282 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 4283 Arg.getSimpleValueType() == MVT::v2i64) ? 4284 VSRH[VR_idx] : VR[VR_idx]; 4285 ++VR_idx; 4286 4287 RegsToPass.push_back(std::make_pair(VReg, Load)); 4288 } 4289 ArgOffset += 16; 4290 for (unsigned i=0; i<16; i+=PtrByteSize) { 4291 if (GPR_idx == NumGPRs) 4292 break; 4293 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4294 DAG.getConstant(i, PtrVT)); 4295 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4296 false, false, false, 0); 4297 MemOpChains.push_back(Load.getValue(1)); 4298 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4299 } 4300 break; 4301 } 4302 4303 // Non-varargs Altivec params go into VRs or on the stack. 4304 if (VR_idx != NumVRs) { 4305 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 4306 Arg.getSimpleValueType() == MVT::v2i64) ? 4307 VSRH[VR_idx] : VR[VR_idx]; 4308 ++VR_idx; 4309 4310 RegsToPass.push_back(std::make_pair(VReg, Arg)); 4311 } else { 4312 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4313 true, isTailCall, true, MemOpChains, 4314 TailCallArguments, dl); 4315 } 4316 ArgOffset += 16; 4317 GPR_idx = std::min(GPR_idx + 2, NumGPRs); 4318 break; 4319 } 4320 } 4321 4322 if (!MemOpChains.empty()) 4323 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4324 4325 // Check if this is an indirect call (MTCTR/BCTRL). 4326 // See PrepareCall() for more information about calls through function 4327 // pointers in the 64-bit SVR4 ABI. 4328 if (!isTailCall && 4329 !dyn_cast<GlobalAddressSDNode>(Callee) && 4330 !dyn_cast<ExternalSymbolSDNode>(Callee)) { 4331 // Load r2 into a virtual register and store it to the TOC save area. 4332 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 4333 // TOC save area offset. 4334 unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(); 4335 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset); 4336 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4337 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), 4338 false, false, 0); 4339 } 4340 4341 // Build a sequence of copy-to-reg nodes chained together with token chain 4342 // and flag operands which copy the outgoing args into the appropriate regs. 4343 SDValue InFlag; 4344 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4345 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4346 RegsToPass[i].second, InFlag); 4347 InFlag = Chain.getValue(1); 4348 } 4349 4350 if (isTailCall) 4351 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 4352 FPOp, true, TailCallArguments); 4353 4354 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4355 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4356 Ins, InVals); 4357 } 4358 4359 SDValue 4360 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 4361 CallingConv::ID CallConv, bool isVarArg, 4362 bool isTailCall, 4363 const SmallVectorImpl<ISD::OutputArg> &Outs, 4364 const SmallVectorImpl<SDValue> &OutVals, 4365 const SmallVectorImpl<ISD::InputArg> &Ins, 4366 SDLoc dl, SelectionDAG &DAG, 4367 SmallVectorImpl<SDValue> &InVals) const { 4368 4369 unsigned NumOps = Outs.size(); 4370 4371 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4372 bool isPPC64 = PtrVT == MVT::i64; 4373 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4374 4375 MachineFunction &MF = DAG.getMachineFunction(); 4376 4377 // Mark this function as potentially containing a function that contains a 4378 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4379 // and restoring the callers stack pointer in this functions epilog. This is 4380 // done because by tail calling the called function might overwrite the value 4381 // in this function's (MF) stack pointer stack slot 0(SP). 4382 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4383 CallConv == CallingConv::Fast) 4384 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4385 4386 // Count how many bytes are to be pushed on the stack, including the linkage 4387 // area, and parameter passing area. We start with 24/48 bytes, which is 4388 // prereserved space for [SP][CR][LR][3 x unused]. 4389 unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true); 4390 unsigned NumBytes = LinkageSize; 4391 4392 // Add up all the space actually used. 4393 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 4394 // they all go in registers, but we must reserve stack space for them for 4395 // possible use by the caller. In varargs or 64-bit calls, parameters are 4396 // assigned stack space in order, with padding so Altivec parameters are 4397 // 16-byte aligned. 4398 unsigned nAltivecParamsAtEnd = 0; 4399 for (unsigned i = 0; i != NumOps; ++i) { 4400 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4401 EVT ArgVT = Outs[i].VT; 4402 // Varargs Altivec parameters are padded to a 16 byte boundary. 4403 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 4404 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 4405 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 4406 if (!isVarArg && !isPPC64) { 4407 // Non-varargs Altivec parameters go after all the non-Altivec 4408 // parameters; handle those later so we know how much padding we need. 4409 nAltivecParamsAtEnd++; 4410 continue; 4411 } 4412 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 4413 NumBytes = ((NumBytes+15)/16)*16; 4414 } 4415 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 4416 } 4417 4418 // Allow for Altivec parameters at the end, if needed. 4419 if (nAltivecParamsAtEnd) { 4420 NumBytes = ((NumBytes+15)/16)*16; 4421 NumBytes += 16*nAltivecParamsAtEnd; 4422 } 4423 4424 // The prolog code of the callee may store up to 8 GPR argument registers to 4425 // the stack, allowing va_start to index over them in memory if its varargs. 4426 // Because we cannot tell if this is needed on the caller side, we have to 4427 // conservatively assume that it is needed. As such, make sure we have at 4428 // least enough stack space for the caller to store the 8 GPRs. 4429 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 4430 4431 // Tail call needs the stack to be aligned. 4432 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4433 CallConv == CallingConv::Fast) 4434 NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes); 4435 4436 // Calculate by how many bytes the stack has to be adjusted in case of tail 4437 // call optimization. 4438 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4439 4440 // To protect arguments on the stack from being clobbered in a tail call, 4441 // force all the loads to happen before doing any other lowering. 4442 if (isTailCall) 4443 Chain = DAG.getStackArgumentTokenFactor(Chain); 4444 4445 // Adjust the stack pointer for the new arguments... 4446 // These operations are automatically eliminated by the prolog/epilog pass 4447 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 4448 dl); 4449 SDValue CallSeqStart = Chain; 4450 4451 // Load the return address and frame pointer so it can be move somewhere else 4452 // later. 4453 SDValue LROp, FPOp; 4454 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4455 dl); 4456 4457 // Set up a copy of the stack pointer for use loading and storing any 4458 // arguments that may not fit in the registers available for argument 4459 // passing. 4460 SDValue StackPtr; 4461 if (isPPC64) 4462 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4463 else 4464 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4465 4466 // Figure out which arguments are going to go in registers, and which in 4467 // memory. Also, if this is a vararg function, floating point operations 4468 // must be stored to our stack, and loaded into integer regs as well, if 4469 // any integer regs are available for argument passing. 4470 unsigned ArgOffset = LinkageSize; 4471 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4472 4473 static const MCPhysReg GPR_32[] = { // 32-bit registers. 4474 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4475 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4476 }; 4477 static const MCPhysReg GPR_64[] = { // 64-bit registers. 4478 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4479 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4480 }; 4481 static const MCPhysReg *FPR = GetFPR(); 4482 4483 static const MCPhysReg VR[] = { 4484 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4485 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4486 }; 4487 const unsigned NumGPRs = array_lengthof(GPR_32); 4488 const unsigned NumFPRs = 13; 4489 const unsigned NumVRs = array_lengthof(VR); 4490 4491 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 4492 4493 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4494 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4495 4496 SmallVector<SDValue, 8> MemOpChains; 4497 for (unsigned i = 0; i != NumOps; ++i) { 4498 SDValue Arg = OutVals[i]; 4499 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4500 4501 // PtrOff will be used to store the current argument to the stack if a 4502 // register cannot be found for it. 4503 SDValue PtrOff; 4504 4505 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 4506 4507 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4508 4509 // On PPC64, promote integers to 64-bit values. 4510 if (isPPC64 && Arg.getValueType() == MVT::i32) { 4511 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4512 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4513 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4514 } 4515 4516 // FIXME memcpy is used way more than necessary. Correctness first. 4517 // Note: "by value" is code for passing a structure by value, not 4518 // basic types. 4519 if (Flags.isByVal()) { 4520 unsigned Size = Flags.getByValSize(); 4521 // Very small objects are passed right-justified. Everything else is 4522 // passed left-justified. 4523 if (Size==1 || Size==2) { 4524 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 4525 if (GPR_idx != NumGPRs) { 4526 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4527 MachinePointerInfo(), VT, 4528 false, false, 0); 4529 MemOpChains.push_back(Load.getValue(1)); 4530 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4531 4532 ArgOffset += PtrByteSize; 4533 } else { 4534 SDValue Const = DAG.getConstant(PtrByteSize - Size, 4535 PtrOff.getValueType()); 4536 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4537 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4538 CallSeqStart, 4539 Flags, DAG, dl); 4540 ArgOffset += PtrByteSize; 4541 } 4542 continue; 4543 } 4544 // Copy entire object into memory. There are cases where gcc-generated 4545 // code assumes it is there, even if it could be put entirely into 4546 // registers. (This is not what the doc says.) 4547 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4548 CallSeqStart, 4549 Flags, DAG, dl); 4550 4551 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 4552 // copy the pieces of the object that fit into registers from the 4553 // parameter save area. 4554 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4555 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 4556 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4557 if (GPR_idx != NumGPRs) { 4558 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4559 MachinePointerInfo(), 4560 false, false, false, 0); 4561 MemOpChains.push_back(Load.getValue(1)); 4562 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4563 ArgOffset += PtrByteSize; 4564 } else { 4565 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4566 break; 4567 } 4568 } 4569 continue; 4570 } 4571 4572 switch (Arg.getSimpleValueType().SimpleTy) { 4573 default: llvm_unreachable("Unexpected ValueType for argument!"); 4574 case MVT::i1: 4575 case MVT::i32: 4576 case MVT::i64: 4577 if (GPR_idx != NumGPRs) { 4578 if (Arg.getValueType() == MVT::i1) 4579 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 4580 4581 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4582 } else { 4583 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4584 isPPC64, isTailCall, false, MemOpChains, 4585 TailCallArguments, dl); 4586 } 4587 ArgOffset += PtrByteSize; 4588 break; 4589 case MVT::f32: 4590 case MVT::f64: 4591 if (FPR_idx != NumFPRs) { 4592 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4593 4594 if (isVarArg) { 4595 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4596 MachinePointerInfo(), false, false, 0); 4597 MemOpChains.push_back(Store); 4598 4599 // Float varargs are always shadowed in available integer registers 4600 if (GPR_idx != NumGPRs) { 4601 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4602 MachinePointerInfo(), false, false, 4603 false, 0); 4604 MemOpChains.push_back(Load.getValue(1)); 4605 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4606 } 4607 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 4608 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4609 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4610 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4611 MachinePointerInfo(), 4612 false, false, false, 0); 4613 MemOpChains.push_back(Load.getValue(1)); 4614 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4615 } 4616 } else { 4617 // If we have any FPRs remaining, we may also have GPRs remaining. 4618 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 4619 // GPRs. 4620 if (GPR_idx != NumGPRs) 4621 ++GPR_idx; 4622 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 4623 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 4624 ++GPR_idx; 4625 } 4626 } else 4627 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4628 isPPC64, isTailCall, false, MemOpChains, 4629 TailCallArguments, dl); 4630 if (isPPC64) 4631 ArgOffset += 8; 4632 else 4633 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 4634 break; 4635 case MVT::v4f32: 4636 case MVT::v4i32: 4637 case MVT::v8i16: 4638 case MVT::v16i8: 4639 if (isVarArg) { 4640 // These go aligned on the stack, or in the corresponding R registers 4641 // when within range. The Darwin PPC ABI doc claims they also go in 4642 // V registers; in fact gcc does this only for arguments that are 4643 // prototyped, not for those that match the ... We do it for all 4644 // arguments, seems to work. 4645 while (ArgOffset % 16 !=0) { 4646 ArgOffset += PtrByteSize; 4647 if (GPR_idx != NumGPRs) 4648 GPR_idx++; 4649 } 4650 // We could elide this store in the case where the object fits 4651 // entirely in R registers. Maybe later. 4652 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4653 DAG.getConstant(ArgOffset, PtrVT)); 4654 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4655 MachinePointerInfo(), false, false, 0); 4656 MemOpChains.push_back(Store); 4657 if (VR_idx != NumVRs) { 4658 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4659 MachinePointerInfo(), 4660 false, false, false, 0); 4661 MemOpChains.push_back(Load.getValue(1)); 4662 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4663 } 4664 ArgOffset += 16; 4665 for (unsigned i=0; i<16; i+=PtrByteSize) { 4666 if (GPR_idx == NumGPRs) 4667 break; 4668 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4669 DAG.getConstant(i, PtrVT)); 4670 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4671 false, false, false, 0); 4672 MemOpChains.push_back(Load.getValue(1)); 4673 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4674 } 4675 break; 4676 } 4677 4678 // Non-varargs Altivec params generally go in registers, but have 4679 // stack space allocated at the end. 4680 if (VR_idx != NumVRs) { 4681 // Doesn't have GPR space allocated. 4682 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4683 } else if (nAltivecParamsAtEnd==0) { 4684 // We are emitting Altivec params in order. 4685 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4686 isPPC64, isTailCall, true, MemOpChains, 4687 TailCallArguments, dl); 4688 ArgOffset += 16; 4689 } 4690 break; 4691 } 4692 } 4693 // If all Altivec parameters fit in registers, as they usually do, 4694 // they get stack space following the non-Altivec parameters. We 4695 // don't track this here because nobody below needs it. 4696 // If there are more Altivec parameters than fit in registers emit 4697 // the stores here. 4698 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 4699 unsigned j = 0; 4700 // Offset is aligned; skip 1st 12 params which go in V registers. 4701 ArgOffset = ((ArgOffset+15)/16)*16; 4702 ArgOffset += 12*16; 4703 for (unsigned i = 0; i != NumOps; ++i) { 4704 SDValue Arg = OutVals[i]; 4705 EVT ArgType = Outs[i].VT; 4706 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 4707 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 4708 if (++j > NumVRs) { 4709 SDValue PtrOff; 4710 // We are emitting Altivec params in order. 4711 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4712 isPPC64, isTailCall, true, MemOpChains, 4713 TailCallArguments, dl); 4714 ArgOffset += 16; 4715 } 4716 } 4717 } 4718 } 4719 4720 if (!MemOpChains.empty()) 4721 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4722 4723 // On Darwin, R12 must contain the address of an indirect callee. This does 4724 // not mean the MTCTR instruction must use R12; it's easier to model this as 4725 // an extra parameter, so do that. 4726 if (!isTailCall && 4727 !dyn_cast<GlobalAddressSDNode>(Callee) && 4728 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4729 !isBLACompatibleAddress(Callee, DAG)) 4730 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 4731 PPC::R12), Callee)); 4732 4733 // Build a sequence of copy-to-reg nodes chained together with token chain 4734 // and flag operands which copy the outgoing args into the appropriate regs. 4735 SDValue InFlag; 4736 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4737 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4738 RegsToPass[i].second, InFlag); 4739 InFlag = Chain.getValue(1); 4740 } 4741 4742 if (isTailCall) 4743 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 4744 FPOp, true, TailCallArguments); 4745 4746 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4747 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4748 Ins, InVals); 4749 } 4750 4751 bool 4752 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 4753 MachineFunction &MF, bool isVarArg, 4754 const SmallVectorImpl<ISD::OutputArg> &Outs, 4755 LLVMContext &Context) const { 4756 SmallVector<CCValAssign, 16> RVLocs; 4757 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 4758 RVLocs, Context); 4759 return CCInfo.CheckReturn(Outs, RetCC_PPC); 4760 } 4761 4762 SDValue 4763 PPCTargetLowering::LowerReturn(SDValue Chain, 4764 CallingConv::ID CallConv, bool isVarArg, 4765 const SmallVectorImpl<ISD::OutputArg> &Outs, 4766 const SmallVectorImpl<SDValue> &OutVals, 4767 SDLoc dl, SelectionDAG &DAG) const { 4768 4769 SmallVector<CCValAssign, 16> RVLocs; 4770 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4771 getTargetMachine(), RVLocs, *DAG.getContext()); 4772 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 4773 4774 SDValue Flag; 4775 SmallVector<SDValue, 4> RetOps(1, Chain); 4776 4777 // Copy the result values into the output registers. 4778 for (unsigned i = 0; i != RVLocs.size(); ++i) { 4779 CCValAssign &VA = RVLocs[i]; 4780 assert(VA.isRegLoc() && "Can only return in registers!"); 4781 4782 SDValue Arg = OutVals[i]; 4783 4784 switch (VA.getLocInfo()) { 4785 default: llvm_unreachable("Unknown loc info!"); 4786 case CCValAssign::Full: break; 4787 case CCValAssign::AExt: 4788 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 4789 break; 4790 case CCValAssign::ZExt: 4791 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 4792 break; 4793 case CCValAssign::SExt: 4794 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 4795 break; 4796 } 4797 4798 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 4799 Flag = Chain.getValue(1); 4800 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 4801 } 4802 4803 RetOps[0] = Chain; // Update chain. 4804 4805 // Add the flag if we have it. 4806 if (Flag.getNode()) 4807 RetOps.push_back(Flag); 4808 4809 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 4810 } 4811 4812 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 4813 const PPCSubtarget &Subtarget) const { 4814 // When we pop the dynamic allocation we need to restore the SP link. 4815 SDLoc dl(Op); 4816 4817 // Get the corect type for pointers. 4818 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4819 4820 // Construct the stack pointer operand. 4821 bool isPPC64 = Subtarget.isPPC64(); 4822 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 4823 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 4824 4825 // Get the operands for the STACKRESTORE. 4826 SDValue Chain = Op.getOperand(0); 4827 SDValue SaveSP = Op.getOperand(1); 4828 4829 // Load the old link SP. 4830 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 4831 MachinePointerInfo(), 4832 false, false, false, 0); 4833 4834 // Restore the stack pointer. 4835 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 4836 4837 // Store the old link SP. 4838 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 4839 false, false, 0); 4840 } 4841 4842 4843 4844 SDValue 4845 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 4846 MachineFunction &MF = DAG.getMachineFunction(); 4847 bool isPPC64 = Subtarget.isPPC64(); 4848 bool isDarwinABI = Subtarget.isDarwinABI(); 4849 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4850 4851 // Get current frame pointer save index. The users of this index will be 4852 // primarily DYNALLOC instructions. 4853 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4854 int RASI = FI->getReturnAddrSaveIndex(); 4855 4856 // If the frame pointer save index hasn't been defined yet. 4857 if (!RASI) { 4858 // Find out what the fix offset of the frame pointer save area. 4859 int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); 4860 // Allocate the frame index for frame pointer save area. 4861 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true); 4862 // Save the result. 4863 FI->setReturnAddrSaveIndex(RASI); 4864 } 4865 return DAG.getFrameIndex(RASI, PtrVT); 4866 } 4867 4868 SDValue 4869 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 4870 MachineFunction &MF = DAG.getMachineFunction(); 4871 bool isPPC64 = Subtarget.isPPC64(); 4872 bool isDarwinABI = Subtarget.isDarwinABI(); 4873 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4874 4875 // Get current frame pointer save index. The users of this index will be 4876 // primarily DYNALLOC instructions. 4877 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4878 int FPSI = FI->getFramePointerSaveIndex(); 4879 4880 // If the frame pointer save index hasn't been defined yet. 4881 if (!FPSI) { 4882 // Find out what the fix offset of the frame pointer save area. 4883 int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, 4884 isDarwinABI); 4885 4886 // Allocate the frame index for frame pointer save area. 4887 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 4888 // Save the result. 4889 FI->setFramePointerSaveIndex(FPSI); 4890 } 4891 return DAG.getFrameIndex(FPSI, PtrVT); 4892 } 4893 4894 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 4895 SelectionDAG &DAG, 4896 const PPCSubtarget &Subtarget) const { 4897 // Get the inputs. 4898 SDValue Chain = Op.getOperand(0); 4899 SDValue Size = Op.getOperand(1); 4900 SDLoc dl(Op); 4901 4902 // Get the corect type for pointers. 4903 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4904 // Negate the size. 4905 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 4906 DAG.getConstant(0, PtrVT), Size); 4907 // Construct a node for the frame pointer save index. 4908 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 4909 // Build a DYNALLOC node. 4910 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 4911 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 4912 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 4913 } 4914 4915 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 4916 SelectionDAG &DAG) const { 4917 SDLoc DL(Op); 4918 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 4919 DAG.getVTList(MVT::i32, MVT::Other), 4920 Op.getOperand(0), Op.getOperand(1)); 4921 } 4922 4923 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 4924 SelectionDAG &DAG) const { 4925 SDLoc DL(Op); 4926 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 4927 Op.getOperand(0), Op.getOperand(1)); 4928 } 4929 4930 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 4931 assert(Op.getValueType() == MVT::i1 && 4932 "Custom lowering only for i1 loads"); 4933 4934 // First, load 8 bits into 32 bits, then truncate to 1 bit. 4935 4936 SDLoc dl(Op); 4937 LoadSDNode *LD = cast<LoadSDNode>(Op); 4938 4939 SDValue Chain = LD->getChain(); 4940 SDValue BasePtr = LD->getBasePtr(); 4941 MachineMemOperand *MMO = LD->getMemOperand(); 4942 4943 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(), Chain, 4944 BasePtr, MVT::i8, MMO); 4945 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 4946 4947 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 4948 return DAG.getMergeValues(Ops, dl); 4949 } 4950 4951 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 4952 assert(Op.getOperand(1).getValueType() == MVT::i1 && 4953 "Custom lowering only for i1 stores"); 4954 4955 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 4956 4957 SDLoc dl(Op); 4958 StoreSDNode *ST = cast<StoreSDNode>(Op); 4959 4960 SDValue Chain = ST->getChain(); 4961 SDValue BasePtr = ST->getBasePtr(); 4962 SDValue Value = ST->getValue(); 4963 MachineMemOperand *MMO = ST->getMemOperand(); 4964 4965 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(), Value); 4966 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 4967 } 4968 4969 // FIXME: Remove this once the ANDI glue bug is fixed: 4970 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 4971 assert(Op.getValueType() == MVT::i1 && 4972 "Custom lowering only for i1 results"); 4973 4974 SDLoc DL(Op); 4975 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 4976 Op.getOperand(0)); 4977 } 4978 4979 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 4980 /// possible. 4981 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 4982 // Not FP? Not a fsel. 4983 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 4984 !Op.getOperand(2).getValueType().isFloatingPoint()) 4985 return Op; 4986 4987 // We might be able to do better than this under some circumstances, but in 4988 // general, fsel-based lowering of select is a finite-math-only optimization. 4989 // For more information, see section F.3 of the 2.06 ISA specification. 4990 if (!DAG.getTarget().Options.NoInfsFPMath || 4991 !DAG.getTarget().Options.NoNaNsFPMath) 4992 return Op; 4993 4994 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4995 4996 EVT ResVT = Op.getValueType(); 4997 EVT CmpVT = Op.getOperand(0).getValueType(); 4998 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4999 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 5000 SDLoc dl(Op); 5001 5002 // If the RHS of the comparison is a 0.0, we don't need to do the 5003 // subtraction at all. 5004 SDValue Sel1; 5005 if (isFloatingPointZero(RHS)) 5006 switch (CC) { 5007 default: break; // SETUO etc aren't handled by fsel. 5008 case ISD::SETNE: 5009 std::swap(TV, FV); 5010 case ISD::SETEQ: 5011 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5012 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5013 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 5014 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 5015 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 5016 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 5017 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 5018 case ISD::SETULT: 5019 case ISD::SETLT: 5020 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 5021 case ISD::SETOGE: 5022 case ISD::SETGE: 5023 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5024 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5025 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 5026 case ISD::SETUGT: 5027 case ISD::SETGT: 5028 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 5029 case ISD::SETOLE: 5030 case ISD::SETLE: 5031 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 5032 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 5033 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 5034 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 5035 } 5036 5037 SDValue Cmp; 5038 switch (CC) { 5039 default: break; // SETUO etc aren't handled by fsel. 5040 case ISD::SETNE: 5041 std::swap(TV, FV); 5042 case ISD::SETEQ: 5043 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 5044 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5045 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5046 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 5047 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 5048 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 5049 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 5050 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 5051 case ISD::SETULT: 5052 case ISD::SETLT: 5053 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 5054 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5055 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5056 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 5057 case ISD::SETOGE: 5058 case ISD::SETGE: 5059 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 5060 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5061 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5062 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 5063 case ISD::SETUGT: 5064 case ISD::SETGT: 5065 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 5066 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5067 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5068 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 5069 case ISD::SETOLE: 5070 case ISD::SETLE: 5071 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 5072 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 5073 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 5074 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 5075 } 5076 return Op; 5077 } 5078 5079 // FIXME: Split this code up when LegalizeDAGTypes lands. 5080 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 5081 SDLoc dl) const { 5082 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 5083 SDValue Src = Op.getOperand(0); 5084 if (Src.getValueType() == MVT::f32) 5085 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 5086 5087 SDValue Tmp; 5088 switch (Op.getSimpleValueType().SimpleTy) { 5089 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 5090 case MVT::i32: 5091 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 5092 (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : 5093 PPCISD::FCTIDZ), 5094 dl, MVT::f64, Src); 5095 break; 5096 case MVT::i64: 5097 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 5098 "i64 FP_TO_UINT is supported only with FPCVT"); 5099 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 5100 PPCISD::FCTIDUZ, 5101 dl, MVT::f64, Src); 5102 break; 5103 } 5104 5105 // Convert the FP value to an int value through memory. 5106 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 5107 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 5108 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 5109 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 5110 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI); 5111 5112 // Emit a store to the stack slot. 5113 SDValue Chain; 5114 if (i32Stack) { 5115 MachineFunction &MF = DAG.getMachineFunction(); 5116 MachineMemOperand *MMO = 5117 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 5118 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 5119 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 5120 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 5121 } else 5122 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 5123 MPI, false, false, 0); 5124 5125 // Result is a load from the stack slot. If loading 4 bytes, make sure to 5126 // add in a bias. 5127 if (Op.getValueType() == MVT::i32 && !i32Stack) { 5128 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 5129 DAG.getConstant(4, FIPtr.getValueType())); 5130 MPI = MachinePointerInfo(); 5131 } 5132 5133 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MPI, 5134 false, false, false, 0); 5135 } 5136 5137 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 5138 SelectionDAG &DAG) const { 5139 SDLoc dl(Op); 5140 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 5141 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 5142 return SDValue(); 5143 5144 if (Op.getOperand(0).getValueType() == MVT::i1) 5145 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 5146 DAG.getConstantFP(1.0, Op.getValueType()), 5147 DAG.getConstantFP(0.0, Op.getValueType())); 5148 5149 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 5150 "UINT_TO_FP is supported only with FPCVT"); 5151 5152 // If we have FCFIDS, then use it when converting to single-precision. 5153 // Otherwise, convert to double-precision and then round. 5154 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 5155 (Op.getOpcode() == ISD::UINT_TO_FP ? 5156 PPCISD::FCFIDUS : PPCISD::FCFIDS) : 5157 (Op.getOpcode() == ISD::UINT_TO_FP ? 5158 PPCISD::FCFIDU : PPCISD::FCFID); 5159 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 5160 MVT::f32 : MVT::f64; 5161 5162 if (Op.getOperand(0).getValueType() == MVT::i64) { 5163 SDValue SINT = Op.getOperand(0); 5164 // When converting to single-precision, we actually need to convert 5165 // to double-precision first and then round to single-precision. 5166 // To avoid double-rounding effects during that operation, we have 5167 // to prepare the input operand. Bits that might be truncated when 5168 // converting to double-precision are replaced by a bit that won't 5169 // be lost at this stage, but is below the single-precision rounding 5170 // position. 5171 // 5172 // However, if -enable-unsafe-fp-math is in effect, accept double 5173 // rounding to avoid the extra overhead. 5174 if (Op.getValueType() == MVT::f32 && 5175 !Subtarget.hasFPCVT() && 5176 !DAG.getTarget().Options.UnsafeFPMath) { 5177 5178 // Twiddle input to make sure the low 11 bits are zero. (If this 5179 // is the case, we are guaranteed the value will fit into the 53 bit 5180 // mantissa of an IEEE double-precision value without rounding.) 5181 // If any of those low 11 bits were not zero originally, make sure 5182 // bit 12 (value 2048) is set instead, so that the final rounding 5183 // to single-precision gets the correct result. 5184 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 5185 SINT, DAG.getConstant(2047, MVT::i64)); 5186 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 5187 Round, DAG.getConstant(2047, MVT::i64)); 5188 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 5189 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 5190 Round, DAG.getConstant(-2048, MVT::i64)); 5191 5192 // However, we cannot use that value unconditionally: if the magnitude 5193 // of the input value is small, the bit-twiddling we did above might 5194 // end up visibly changing the output. Fortunately, in that case, we 5195 // don't need to twiddle bits since the original input will convert 5196 // exactly to double-precision floating-point already. Therefore, 5197 // construct a conditional to use the original value if the top 11 5198 // bits are all sign-bit copies, and use the rounded value computed 5199 // above otherwise. 5200 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 5201 SINT, DAG.getConstant(53, MVT::i32)); 5202 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 5203 Cond, DAG.getConstant(1, MVT::i64)); 5204 Cond = DAG.getSetCC(dl, MVT::i32, 5205 Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT); 5206 5207 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 5208 } 5209 5210 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 5211 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 5212 5213 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 5214 FP = DAG.getNode(ISD::FP_ROUND, dl, 5215 MVT::f32, FP, DAG.getIntPtrConstant(0)); 5216 return FP; 5217 } 5218 5219 assert(Op.getOperand(0).getValueType() == MVT::i32 && 5220 "Unhandled INT_TO_FP type in custom expander!"); 5221 // Since we only generate this in 64-bit mode, we can take advantage of 5222 // 64-bit registers. In particular, sign extend the input value into the 5223 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 5224 // then lfd it and fcfid it. 5225 MachineFunction &MF = DAG.getMachineFunction(); 5226 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 5227 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5228 5229 SDValue Ld; 5230 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 5231 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 5232 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 5233 5234 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 5235 MachinePointerInfo::getFixedStack(FrameIdx), 5236 false, false, 0); 5237 5238 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 5239 "Expected an i32 store"); 5240 MachineMemOperand *MMO = 5241 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), 5242 MachineMemOperand::MOLoad, 4, 4); 5243 SDValue Ops[] = { Store, FIdx }; 5244 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 5245 PPCISD::LFIWZX : PPCISD::LFIWAX, 5246 dl, DAG.getVTList(MVT::f64, MVT::Other), 5247 Ops, MVT::i32, MMO); 5248 } else { 5249 assert(Subtarget.isPPC64() && 5250 "i32->FP without LFIWAX supported only on PPC64"); 5251 5252 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 5253 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 5254 5255 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 5256 Op.getOperand(0)); 5257 5258 // STD the extended value into the stack slot. 5259 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx, 5260 MachinePointerInfo::getFixedStack(FrameIdx), 5261 false, false, 0); 5262 5263 // Load the value as a double. 5264 Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, 5265 MachinePointerInfo::getFixedStack(FrameIdx), 5266 false, false, false, 0); 5267 } 5268 5269 // FCFID it and return it. 5270 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 5271 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 5272 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 5273 return FP; 5274 } 5275 5276 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 5277 SelectionDAG &DAG) const { 5278 SDLoc dl(Op); 5279 /* 5280 The rounding mode is in bits 30:31 of FPSR, and has the following 5281 settings: 5282 00 Round to nearest 5283 01 Round to 0 5284 10 Round to +inf 5285 11 Round to -inf 5286 5287 FLT_ROUNDS, on the other hand, expects the following: 5288 -1 Undefined 5289 0 Round to 0 5290 1 Round to nearest 5291 2 Round to +inf 5292 3 Round to -inf 5293 5294 To perform the conversion, we do: 5295 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 5296 */ 5297 5298 MachineFunction &MF = DAG.getMachineFunction(); 5299 EVT VT = Op.getValueType(); 5300 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5301 5302 // Save FP Control Word to register 5303 EVT NodeTys[] = { 5304 MVT::f64, // return register 5305 MVT::Glue // unused in this context 5306 }; 5307 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 5308 5309 // Save FP register to stack slot 5310 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 5311 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 5312 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 5313 StackSlot, MachinePointerInfo(), false, false,0); 5314 5315 // Load FP Control Word from low 32 bits of stack slot. 5316 SDValue Four = DAG.getConstant(4, PtrVT); 5317 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 5318 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 5319 false, false, false, 0); 5320 5321 // Transform as necessary 5322 SDValue CWD1 = 5323 DAG.getNode(ISD::AND, dl, MVT::i32, 5324 CWD, DAG.getConstant(3, MVT::i32)); 5325 SDValue CWD2 = 5326 DAG.getNode(ISD::SRL, dl, MVT::i32, 5327 DAG.getNode(ISD::AND, dl, MVT::i32, 5328 DAG.getNode(ISD::XOR, dl, MVT::i32, 5329 CWD, DAG.getConstant(3, MVT::i32)), 5330 DAG.getConstant(3, MVT::i32)), 5331 DAG.getConstant(1, MVT::i32)); 5332 5333 SDValue RetVal = 5334 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 5335 5336 return DAG.getNode((VT.getSizeInBits() < 16 ? 5337 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 5338 } 5339 5340 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 5341 EVT VT = Op.getValueType(); 5342 unsigned BitWidth = VT.getSizeInBits(); 5343 SDLoc dl(Op); 5344 assert(Op.getNumOperands() == 3 && 5345 VT == Op.getOperand(1).getValueType() && 5346 "Unexpected SHL!"); 5347 5348 // Expand into a bunch of logical ops. Note that these ops 5349 // depend on the PPC behavior for oversized shift amounts. 5350 SDValue Lo = Op.getOperand(0); 5351 SDValue Hi = Op.getOperand(1); 5352 SDValue Amt = Op.getOperand(2); 5353 EVT AmtVT = Amt.getValueType(); 5354 5355 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5356 DAG.getConstant(BitWidth, AmtVT), Amt); 5357 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 5358 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 5359 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 5360 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5361 DAG.getConstant(-BitWidth, AmtVT)); 5362 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 5363 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5364 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 5365 SDValue OutOps[] = { OutLo, OutHi }; 5366 return DAG.getMergeValues(OutOps, dl); 5367 } 5368 5369 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 5370 EVT VT = Op.getValueType(); 5371 SDLoc dl(Op); 5372 unsigned BitWidth = VT.getSizeInBits(); 5373 assert(Op.getNumOperands() == 3 && 5374 VT == Op.getOperand(1).getValueType() && 5375 "Unexpected SRL!"); 5376 5377 // Expand into a bunch of logical ops. Note that these ops 5378 // depend on the PPC behavior for oversized shift amounts. 5379 SDValue Lo = Op.getOperand(0); 5380 SDValue Hi = Op.getOperand(1); 5381 SDValue Amt = Op.getOperand(2); 5382 EVT AmtVT = Amt.getValueType(); 5383 5384 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5385 DAG.getConstant(BitWidth, AmtVT), Amt); 5386 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5387 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5388 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5389 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5390 DAG.getConstant(-BitWidth, AmtVT)); 5391 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 5392 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5393 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 5394 SDValue OutOps[] = { OutLo, OutHi }; 5395 return DAG.getMergeValues(OutOps, dl); 5396 } 5397 5398 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 5399 SDLoc dl(Op); 5400 EVT VT = Op.getValueType(); 5401 unsigned BitWidth = VT.getSizeInBits(); 5402 assert(Op.getNumOperands() == 3 && 5403 VT == Op.getOperand(1).getValueType() && 5404 "Unexpected SRA!"); 5405 5406 // Expand into a bunch of logical ops, followed by a select_cc. 5407 SDValue Lo = Op.getOperand(0); 5408 SDValue Hi = Op.getOperand(1); 5409 SDValue Amt = Op.getOperand(2); 5410 EVT AmtVT = Amt.getValueType(); 5411 5412 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5413 DAG.getConstant(BitWidth, AmtVT), Amt); 5414 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5415 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5416 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5417 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5418 DAG.getConstant(-BitWidth, AmtVT)); 5419 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 5420 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 5421 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 5422 Tmp4, Tmp6, ISD::SETLE); 5423 SDValue OutOps[] = { OutLo, OutHi }; 5424 return DAG.getMergeValues(OutOps, dl); 5425 } 5426 5427 //===----------------------------------------------------------------------===// 5428 // Vector related lowering. 5429 // 5430 5431 /// BuildSplatI - Build a canonical splati of Val with an element size of 5432 /// SplatSize. Cast the result to VT. 5433 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 5434 SelectionDAG &DAG, SDLoc dl) { 5435 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 5436 5437 static const EVT VTys[] = { // canonical VT to use for each size. 5438 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 5439 }; 5440 5441 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 5442 5443 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 5444 if (Val == -1) 5445 SplatSize = 1; 5446 5447 EVT CanonicalVT = VTys[SplatSize-1]; 5448 5449 // Build a canonical splat for this value. 5450 SDValue Elt = DAG.getConstant(Val, MVT::i32); 5451 SmallVector<SDValue, 8> Ops; 5452 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 5453 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops); 5454 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 5455 } 5456 5457 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 5458 /// specified intrinsic ID. 5459 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 5460 SelectionDAG &DAG, SDLoc dl, 5461 EVT DestVT = MVT::Other) { 5462 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 5463 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5464 DAG.getConstant(IID, MVT::i32), Op); 5465 } 5466 5467 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 5468 /// specified intrinsic ID. 5469 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 5470 SelectionDAG &DAG, SDLoc dl, 5471 EVT DestVT = MVT::Other) { 5472 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 5473 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5474 DAG.getConstant(IID, MVT::i32), LHS, RHS); 5475 } 5476 5477 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 5478 /// specified intrinsic ID. 5479 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 5480 SDValue Op2, SelectionDAG &DAG, 5481 SDLoc dl, EVT DestVT = MVT::Other) { 5482 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 5483 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5484 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 5485 } 5486 5487 5488 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 5489 /// amount. The result has the specified value type. 5490 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 5491 EVT VT, SelectionDAG &DAG, SDLoc dl) { 5492 // Force LHS/RHS to be the right type. 5493 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 5494 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 5495 5496 int Ops[16]; 5497 for (unsigned i = 0; i != 16; ++i) 5498 Ops[i] = i + Amt; 5499 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 5500 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5501 } 5502 5503 // If this is a case we can't handle, return null and let the default 5504 // expansion code take care of it. If we CAN select this case, and if it 5505 // selects to a single instruction, return Op. Otherwise, if we can codegen 5506 // this case more efficiently than a constant pool load, lower it to the 5507 // sequence of ops that should be used. 5508 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 5509 SelectionDAG &DAG) const { 5510 SDLoc dl(Op); 5511 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 5512 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 5513 5514 // Check if this is a splat of a constant value. 5515 APInt APSplatBits, APSplatUndef; 5516 unsigned SplatBitSize; 5517 bool HasAnyUndefs; 5518 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 5519 HasAnyUndefs, 0, true) || SplatBitSize > 32) 5520 return SDValue(); 5521 5522 unsigned SplatBits = APSplatBits.getZExtValue(); 5523 unsigned SplatUndef = APSplatUndef.getZExtValue(); 5524 unsigned SplatSize = SplatBitSize / 8; 5525 5526 // First, handle single instruction cases. 5527 5528 // All zeros? 5529 if (SplatBits == 0) { 5530 // Canonicalize all zero vectors to be v4i32. 5531 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 5532 SDValue Z = DAG.getConstant(0, MVT::i32); 5533 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 5534 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 5535 } 5536 return Op; 5537 } 5538 5539 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 5540 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 5541 (32-SplatBitSize)); 5542 if (SextVal >= -16 && SextVal <= 15) 5543 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 5544 5545 5546 // Two instruction sequences. 5547 5548 // If this value is in the range [-32,30] and is even, use: 5549 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 5550 // If this value is in the range [17,31] and is odd, use: 5551 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 5552 // If this value is in the range [-31,-17] and is odd, use: 5553 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 5554 // Note the last two are three-instruction sequences. 5555 if (SextVal >= -32 && SextVal <= 31) { 5556 // To avoid having these optimizations undone by constant folding, 5557 // we convert to a pseudo that will be expanded later into one of 5558 // the above forms. 5559 SDValue Elt = DAG.getConstant(SextVal, MVT::i32); 5560 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 5561 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 5562 SDValue EltSize = DAG.getConstant(SplatSize, MVT::i32); 5563 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 5564 if (VT == Op.getValueType()) 5565 return RetVal; 5566 else 5567 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 5568 } 5569 5570 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 5571 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 5572 // for fneg/fabs. 5573 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 5574 // Make -1 and vspltisw -1: 5575 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 5576 5577 // Make the VSLW intrinsic, computing 0x8000_0000. 5578 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 5579 OnesV, DAG, dl); 5580 5581 // xor by OnesV to invert it. 5582 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 5583 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5584 } 5585 5586 // The remaining cases assume either big endian element order or 5587 // a splat-size that equates to the element size of the vector 5588 // to be built. An example that doesn't work for little endian is 5589 // {0, -1, 0, -1, 0, -1, 0, -1} which has a splat size of 32 bits 5590 // and a vector element size of 16 bits. The code below will 5591 // produce the vector in big endian element order, which for little 5592 // endian is {-1, 0, -1, 0, -1, 0, -1, 0}. 5593 5594 // For now, just avoid these optimizations in that case. 5595 // FIXME: Develop correct optimizations for LE with mismatched 5596 // splat and element sizes. 5597 5598 if (Subtarget.isLittleEndian() && 5599 SplatSize != Op.getValueType().getVectorElementType().getSizeInBits()) 5600 return SDValue(); 5601 5602 // Check to see if this is a wide variety of vsplti*, binop self cases. 5603 static const signed char SplatCsts[] = { 5604 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 5605 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 5606 }; 5607 5608 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 5609 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 5610 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 5611 int i = SplatCsts[idx]; 5612 5613 // Figure out what shift amount will be used by altivec if shifted by i in 5614 // this splat size. 5615 unsigned TypeShiftAmt = i & (SplatBitSize-1); 5616 5617 // vsplti + shl self. 5618 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 5619 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5620 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5621 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 5622 Intrinsic::ppc_altivec_vslw 5623 }; 5624 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5625 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5626 } 5627 5628 // vsplti + srl self. 5629 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5630 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5631 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5632 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 5633 Intrinsic::ppc_altivec_vsrw 5634 }; 5635 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5636 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5637 } 5638 5639 // vsplti + sra self. 5640 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5641 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5642 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5643 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 5644 Intrinsic::ppc_altivec_vsraw 5645 }; 5646 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5647 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5648 } 5649 5650 // vsplti + rol self. 5651 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 5652 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 5653 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5654 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5655 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 5656 Intrinsic::ppc_altivec_vrlw 5657 }; 5658 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5659 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5660 } 5661 5662 // t = vsplti c, result = vsldoi t, t, 1 5663 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 5664 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5665 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 5666 } 5667 // t = vsplti c, result = vsldoi t, t, 2 5668 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 5669 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5670 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 5671 } 5672 // t = vsplti c, result = vsldoi t, t, 3 5673 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 5674 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5675 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 5676 } 5677 } 5678 5679 return SDValue(); 5680 } 5681 5682 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5683 /// the specified operations to build the shuffle. 5684 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5685 SDValue RHS, SelectionDAG &DAG, 5686 SDLoc dl) { 5687 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5688 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5689 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5690 5691 enum { 5692 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 5693 OP_VMRGHW, 5694 OP_VMRGLW, 5695 OP_VSPLTISW0, 5696 OP_VSPLTISW1, 5697 OP_VSPLTISW2, 5698 OP_VSPLTISW3, 5699 OP_VSLDOI4, 5700 OP_VSLDOI8, 5701 OP_VSLDOI12 5702 }; 5703 5704 if (OpNum == OP_COPY) { 5705 if (LHSID == (1*9+2)*9+3) return LHS; 5706 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 5707 return RHS; 5708 } 5709 5710 SDValue OpLHS, OpRHS; 5711 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 5712 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 5713 5714 int ShufIdxs[16]; 5715 switch (OpNum) { 5716 default: llvm_unreachable("Unknown i32 permute!"); 5717 case OP_VMRGHW: 5718 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 5719 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 5720 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 5721 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 5722 break; 5723 case OP_VMRGLW: 5724 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 5725 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 5726 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 5727 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 5728 break; 5729 case OP_VSPLTISW0: 5730 for (unsigned i = 0; i != 16; ++i) 5731 ShufIdxs[i] = (i&3)+0; 5732 break; 5733 case OP_VSPLTISW1: 5734 for (unsigned i = 0; i != 16; ++i) 5735 ShufIdxs[i] = (i&3)+4; 5736 break; 5737 case OP_VSPLTISW2: 5738 for (unsigned i = 0; i != 16; ++i) 5739 ShufIdxs[i] = (i&3)+8; 5740 break; 5741 case OP_VSPLTISW3: 5742 for (unsigned i = 0; i != 16; ++i) 5743 ShufIdxs[i] = (i&3)+12; 5744 break; 5745 case OP_VSLDOI4: 5746 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 5747 case OP_VSLDOI8: 5748 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 5749 case OP_VSLDOI12: 5750 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 5751 } 5752 EVT VT = OpLHS.getValueType(); 5753 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 5754 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 5755 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 5756 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5757 } 5758 5759 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 5760 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 5761 /// return the code it can be lowered into. Worst case, it can always be 5762 /// lowered into a vperm. 5763 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 5764 SelectionDAG &DAG) const { 5765 SDLoc dl(Op); 5766 SDValue V1 = Op.getOperand(0); 5767 SDValue V2 = Op.getOperand(1); 5768 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5769 EVT VT = Op.getValueType(); 5770 bool isLittleEndian = Subtarget.isLittleEndian(); 5771 5772 // Cases that are handled by instructions that take permute immediates 5773 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 5774 // selected by the instruction selector. 5775 if (V2.getOpcode() == ISD::UNDEF) { 5776 if (PPC::isSplatShuffleMask(SVOp, 1) || 5777 PPC::isSplatShuffleMask(SVOp, 2) || 5778 PPC::isSplatShuffleMask(SVOp, 4) || 5779 PPC::isVPKUWUMShuffleMask(SVOp, true, DAG) || 5780 PPC::isVPKUHUMShuffleMask(SVOp, true, DAG) || 5781 PPC::isVSLDOIShuffleMask(SVOp, true, DAG) != -1 || 5782 PPC::isVMRGLShuffleMask(SVOp, 1, true, DAG) || 5783 PPC::isVMRGLShuffleMask(SVOp, 2, true, DAG) || 5784 PPC::isVMRGLShuffleMask(SVOp, 4, true, DAG) || 5785 PPC::isVMRGHShuffleMask(SVOp, 1, true, DAG) || 5786 PPC::isVMRGHShuffleMask(SVOp, 2, true, DAG) || 5787 PPC::isVMRGHShuffleMask(SVOp, 4, true, DAG)) { 5788 return Op; 5789 } 5790 } 5791 5792 // Altivec has a variety of "shuffle immediates" that take two vector inputs 5793 // and produce a fixed permutation. If any of these match, do not lower to 5794 // VPERM. 5795 if (PPC::isVPKUWUMShuffleMask(SVOp, false, DAG) || 5796 PPC::isVPKUHUMShuffleMask(SVOp, false, DAG) || 5797 PPC::isVSLDOIShuffleMask(SVOp, false, DAG) != -1 || 5798 PPC::isVMRGLShuffleMask(SVOp, 1, false, DAG) || 5799 PPC::isVMRGLShuffleMask(SVOp, 2, false, DAG) || 5800 PPC::isVMRGLShuffleMask(SVOp, 4, false, DAG) || 5801 PPC::isVMRGHShuffleMask(SVOp, 1, false, DAG) || 5802 PPC::isVMRGHShuffleMask(SVOp, 2, false, DAG) || 5803 PPC::isVMRGHShuffleMask(SVOp, 4, false, DAG)) 5804 return Op; 5805 5806 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 5807 // perfect shuffle table to emit an optimal matching sequence. 5808 ArrayRef<int> PermMask = SVOp->getMask(); 5809 5810 unsigned PFIndexes[4]; 5811 bool isFourElementShuffle = true; 5812 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 5813 unsigned EltNo = 8; // Start out undef. 5814 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 5815 if (PermMask[i*4+j] < 0) 5816 continue; // Undef, ignore it. 5817 5818 unsigned ByteSource = PermMask[i*4+j]; 5819 if ((ByteSource & 3) != j) { 5820 isFourElementShuffle = false; 5821 break; 5822 } 5823 5824 if (EltNo == 8) { 5825 EltNo = ByteSource/4; 5826 } else if (EltNo != ByteSource/4) { 5827 isFourElementShuffle = false; 5828 break; 5829 } 5830 } 5831 PFIndexes[i] = EltNo; 5832 } 5833 5834 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 5835 // perfect shuffle vector to determine if it is cost effective to do this as 5836 // discrete instructions, or whether we should use a vperm. 5837 // For now, we skip this for little endian until such time as we have a 5838 // little-endian perfect shuffle table. 5839 if (isFourElementShuffle && !isLittleEndian) { 5840 // Compute the index in the perfect shuffle table. 5841 unsigned PFTableIndex = 5842 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5843 5844 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5845 unsigned Cost = (PFEntry >> 30); 5846 5847 // Determining when to avoid vperm is tricky. Many things affect the cost 5848 // of vperm, particularly how many times the perm mask needs to be computed. 5849 // For example, if the perm mask can be hoisted out of a loop or is already 5850 // used (perhaps because there are multiple permutes with the same shuffle 5851 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 5852 // the loop requires an extra register. 5853 // 5854 // As a compromise, we only emit discrete instructions if the shuffle can be 5855 // generated in 3 or fewer operations. When we have loop information 5856 // available, if this block is within a loop, we should avoid using vperm 5857 // for 3-operation perms and use a constant pool load instead. 5858 if (Cost < 3) 5859 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 5860 } 5861 5862 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 5863 // vector that will get spilled to the constant pool. 5864 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 5865 5866 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 5867 // that it is in input element units, not in bytes. Convert now. 5868 5869 // For little endian, the order of the input vectors is reversed, and 5870 // the permutation mask is complemented with respect to 31. This is 5871 // necessary to produce proper semantics with the big-endian-biased vperm 5872 // instruction. 5873 EVT EltVT = V1.getValueType().getVectorElementType(); 5874 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 5875 5876 SmallVector<SDValue, 16> ResultMask; 5877 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5878 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 5879 5880 for (unsigned j = 0; j != BytesPerElement; ++j) 5881 if (isLittleEndian) 5882 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement+j), 5883 MVT::i32)); 5884 else 5885 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 5886 MVT::i32)); 5887 } 5888 5889 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 5890 ResultMask); 5891 if (isLittleEndian) 5892 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 5893 V2, V1, VPermMask); 5894 else 5895 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 5896 V1, V2, VPermMask); 5897 } 5898 5899 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 5900 /// altivec comparison. If it is, return true and fill in Opc/isDot with 5901 /// information about the intrinsic. 5902 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 5903 bool &isDot) { 5904 unsigned IntrinsicID = 5905 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 5906 CompareOpc = -1; 5907 isDot = false; 5908 switch (IntrinsicID) { 5909 default: return false; 5910 // Comparison predicates. 5911 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 5912 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 5913 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 5914 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 5915 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 5916 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 5917 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 5918 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 5919 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 5920 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 5921 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 5922 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 5923 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 5924 5925 // Normal Comparisons. 5926 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 5927 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 5928 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 5929 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 5930 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 5931 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 5932 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 5933 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 5934 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 5935 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 5936 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 5937 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 5938 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 5939 } 5940 return true; 5941 } 5942 5943 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 5944 /// lower, do it, otherwise return null. 5945 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 5946 SelectionDAG &DAG) const { 5947 // If this is a lowered altivec predicate compare, CompareOpc is set to the 5948 // opcode number of the comparison. 5949 SDLoc dl(Op); 5950 int CompareOpc; 5951 bool isDot; 5952 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 5953 return SDValue(); // Don't custom lower most intrinsics. 5954 5955 // If this is a non-dot comparison, make the VCMP node and we are done. 5956 if (!isDot) { 5957 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 5958 Op.getOperand(1), Op.getOperand(2), 5959 DAG.getConstant(CompareOpc, MVT::i32)); 5960 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 5961 } 5962 5963 // Create the PPCISD altivec 'dot' comparison node. 5964 SDValue Ops[] = { 5965 Op.getOperand(2), // LHS 5966 Op.getOperand(3), // RHS 5967 DAG.getConstant(CompareOpc, MVT::i32) 5968 }; 5969 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 5970 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 5971 5972 // Now that we have the comparison, emit a copy from the CR to a GPR. 5973 // This is flagged to the above dot comparison. 5974 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 5975 DAG.getRegister(PPC::CR6, MVT::i32), 5976 CompNode.getValue(1)); 5977 5978 // Unpack the result based on how the target uses it. 5979 unsigned BitNo; // Bit # of CR6. 5980 bool InvertBit; // Invert result? 5981 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 5982 default: // Can't happen, don't crash on invalid number though. 5983 case 0: // Return the value of the EQ bit of CR6. 5984 BitNo = 0; InvertBit = false; 5985 break; 5986 case 1: // Return the inverted value of the EQ bit of CR6. 5987 BitNo = 0; InvertBit = true; 5988 break; 5989 case 2: // Return the value of the LT bit of CR6. 5990 BitNo = 2; InvertBit = false; 5991 break; 5992 case 3: // Return the inverted value of the LT bit of CR6. 5993 BitNo = 2; InvertBit = true; 5994 break; 5995 } 5996 5997 // Shift the bit into the low position. 5998 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 5999 DAG.getConstant(8-(3-BitNo), MVT::i32)); 6000 // Isolate the bit. 6001 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 6002 DAG.getConstant(1, MVT::i32)); 6003 6004 // If we are supposed to, toggle the bit. 6005 if (InvertBit) 6006 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 6007 DAG.getConstant(1, MVT::i32)); 6008 return Flags; 6009 } 6010 6011 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 6012 SelectionDAG &DAG) const { 6013 SDLoc dl(Op); 6014 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 6015 // instructions), but for smaller types, we need to first extend up to v2i32 6016 // before doing going farther. 6017 if (Op.getValueType() == MVT::v2i64) { 6018 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 6019 if (ExtVT != MVT::v2i32) { 6020 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 6021 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 6022 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 6023 ExtVT.getVectorElementType(), 4))); 6024 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 6025 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 6026 DAG.getValueType(MVT::v2i32)); 6027 } 6028 6029 return Op; 6030 } 6031 6032 return SDValue(); 6033 } 6034 6035 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 6036 SelectionDAG &DAG) const { 6037 SDLoc dl(Op); 6038 // Create a stack slot that is 16-byte aligned. 6039 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 6040 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 6041 EVT PtrVT = getPointerTy(); 6042 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6043 6044 // Store the input value into Value#0 of the stack slot. 6045 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 6046 Op.getOperand(0), FIdx, MachinePointerInfo(), 6047 false, false, 0); 6048 // Load it out. 6049 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 6050 false, false, false, 0); 6051 } 6052 6053 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 6054 SDLoc dl(Op); 6055 if (Op.getValueType() == MVT::v4i32) { 6056 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6057 6058 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 6059 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 6060 6061 SDValue RHSSwap = // = vrlw RHS, 16 6062 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 6063 6064 // Shrinkify inputs to v8i16. 6065 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 6066 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 6067 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 6068 6069 // Low parts multiplied together, generating 32-bit results (we ignore the 6070 // top parts). 6071 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 6072 LHS, RHS, DAG, dl, MVT::v4i32); 6073 6074 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 6075 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 6076 // Shift the high parts up 16 bits. 6077 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 6078 Neg16, DAG, dl); 6079 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 6080 } else if (Op.getValueType() == MVT::v8i16) { 6081 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6082 6083 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 6084 6085 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 6086 LHS, RHS, Zero, DAG, dl); 6087 } else if (Op.getValueType() == MVT::v16i8) { 6088 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6089 bool isLittleEndian = Subtarget.isLittleEndian(); 6090 6091 // Multiply the even 8-bit parts, producing 16-bit sums. 6092 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 6093 LHS, RHS, DAG, dl, MVT::v8i16); 6094 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 6095 6096 // Multiply the odd 8-bit parts, producing 16-bit sums. 6097 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 6098 LHS, RHS, DAG, dl, MVT::v8i16); 6099 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 6100 6101 // Merge the results together. Because vmuleub and vmuloub are 6102 // instructions with a big-endian bias, we must reverse the 6103 // element numbering and reverse the meaning of "odd" and "even" 6104 // when generating little endian code. 6105 int Ops[16]; 6106 for (unsigned i = 0; i != 8; ++i) { 6107 if (isLittleEndian) { 6108 Ops[i*2 ] = 2*i; 6109 Ops[i*2+1] = 2*i+16; 6110 } else { 6111 Ops[i*2 ] = 2*i+1; 6112 Ops[i*2+1] = 2*i+1+16; 6113 } 6114 } 6115 if (isLittleEndian) 6116 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 6117 else 6118 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 6119 } else { 6120 llvm_unreachable("Unknown mul to lower!"); 6121 } 6122 } 6123 6124 /// LowerOperation - Provide custom lowering hooks for some operations. 6125 /// 6126 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 6127 switch (Op.getOpcode()) { 6128 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 6129 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 6130 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 6131 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 6132 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 6133 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 6134 case ISD::SETCC: return LowerSETCC(Op, DAG); 6135 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 6136 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 6137 case ISD::VASTART: 6138 return LowerVASTART(Op, DAG, Subtarget); 6139 6140 case ISD::VAARG: 6141 return LowerVAARG(Op, DAG, Subtarget); 6142 6143 case ISD::VACOPY: 6144 return LowerVACOPY(Op, DAG, Subtarget); 6145 6146 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget); 6147 case ISD::DYNAMIC_STACKALLOC: 6148 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget); 6149 6150 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 6151 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 6152 6153 case ISD::LOAD: return LowerLOAD(Op, DAG); 6154 case ISD::STORE: return LowerSTORE(Op, DAG); 6155 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 6156 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 6157 case ISD::FP_TO_UINT: 6158 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 6159 SDLoc(Op)); 6160 case ISD::UINT_TO_FP: 6161 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 6162 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 6163 6164 // Lower 64-bit shifts. 6165 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 6166 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 6167 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 6168 6169 // Vector-related lowering. 6170 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 6171 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 6172 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 6173 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 6174 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 6175 case ISD::MUL: return LowerMUL(Op, DAG); 6176 6177 // For counter-based loop handling. 6178 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 6179 6180 // Frame & Return address. 6181 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 6182 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 6183 } 6184 } 6185 6186 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 6187 SmallVectorImpl<SDValue>&Results, 6188 SelectionDAG &DAG) const { 6189 const TargetMachine &TM = getTargetMachine(); 6190 SDLoc dl(N); 6191 switch (N->getOpcode()) { 6192 default: 6193 llvm_unreachable("Do not know how to custom type legalize this operation!"); 6194 case ISD::INTRINSIC_W_CHAIN: { 6195 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 6196 Intrinsic::ppc_is_decremented_ctr_nonzero) 6197 break; 6198 6199 assert(N->getValueType(0) == MVT::i1 && 6200 "Unexpected result type for CTR decrement intrinsic"); 6201 EVT SVT = getSetCCResultType(*DAG.getContext(), N->getValueType(0)); 6202 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 6203 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 6204 N->getOperand(1)); 6205 6206 Results.push_back(NewInt); 6207 Results.push_back(NewInt.getValue(1)); 6208 break; 6209 } 6210 case ISD::VAARG: { 6211 if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 6212 || TM.getSubtarget<PPCSubtarget>().isPPC64()) 6213 return; 6214 6215 EVT VT = N->getValueType(0); 6216 6217 if (VT == MVT::i64) { 6218 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget); 6219 6220 Results.push_back(NewNode); 6221 Results.push_back(NewNode.getValue(1)); 6222 } 6223 return; 6224 } 6225 case ISD::FP_ROUND_INREG: { 6226 assert(N->getValueType(0) == MVT::ppcf128); 6227 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 6228 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 6229 MVT::f64, N->getOperand(0), 6230 DAG.getIntPtrConstant(0)); 6231 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 6232 MVT::f64, N->getOperand(0), 6233 DAG.getIntPtrConstant(1)); 6234 6235 // Add the two halves of the long double in round-to-zero mode. 6236 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 6237 6238 // We know the low half is about to be thrown away, so just use something 6239 // convenient. 6240 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 6241 FPreg, FPreg)); 6242 return; 6243 } 6244 case ISD::FP_TO_SINT: 6245 // LowerFP_TO_INT() can only handle f32 and f64. 6246 if (N->getOperand(0).getValueType() == MVT::ppcf128) 6247 return; 6248 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 6249 return; 6250 } 6251 } 6252 6253 6254 //===----------------------------------------------------------------------===// 6255 // Other Lowering Code 6256 //===----------------------------------------------------------------------===// 6257 6258 MachineBasicBlock * 6259 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 6260 bool is64bit, unsigned BinOpcode) const { 6261 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 6262 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6263 6264 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6265 MachineFunction *F = BB->getParent(); 6266 MachineFunction::iterator It = BB; 6267 ++It; 6268 6269 unsigned dest = MI->getOperand(0).getReg(); 6270 unsigned ptrA = MI->getOperand(1).getReg(); 6271 unsigned ptrB = MI->getOperand(2).getReg(); 6272 unsigned incr = MI->getOperand(3).getReg(); 6273 DebugLoc dl = MI->getDebugLoc(); 6274 6275 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 6276 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6277 F->insert(It, loopMBB); 6278 F->insert(It, exitMBB); 6279 exitMBB->splice(exitMBB->begin(), BB, 6280 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 6281 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6282 6283 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6284 unsigned TmpReg = (!BinOpcode) ? incr : 6285 RegInfo.createVirtualRegister( 6286 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 6287 (const TargetRegisterClass *) &PPC::GPRCRegClass); 6288 6289 // thisMBB: 6290 // ... 6291 // fallthrough --> loopMBB 6292 BB->addSuccessor(loopMBB); 6293 6294 // loopMBB: 6295 // l[wd]arx dest, ptr 6296 // add r0, dest, incr 6297 // st[wd]cx. r0, ptr 6298 // bne- loopMBB 6299 // fallthrough --> exitMBB 6300 BB = loopMBB; 6301 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 6302 .addReg(ptrA).addReg(ptrB); 6303 if (BinOpcode) 6304 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 6305 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6306 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 6307 BuildMI(BB, dl, TII->get(PPC::BCC)) 6308 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 6309 BB->addSuccessor(loopMBB); 6310 BB->addSuccessor(exitMBB); 6311 6312 // exitMBB: 6313 // ... 6314 BB = exitMBB; 6315 return BB; 6316 } 6317 6318 MachineBasicBlock * 6319 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 6320 MachineBasicBlock *BB, 6321 bool is8bit, // operation 6322 unsigned BinOpcode) const { 6323 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 6324 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6325 // In 64 bit mode we have to use 64 bits for addresses, even though the 6326 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 6327 // registers without caring whether they're 32 or 64, but here we're 6328 // doing actual arithmetic on the addresses. 6329 bool is64bit = Subtarget.isPPC64(); 6330 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 6331 6332 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6333 MachineFunction *F = BB->getParent(); 6334 MachineFunction::iterator It = BB; 6335 ++It; 6336 6337 unsigned dest = MI->getOperand(0).getReg(); 6338 unsigned ptrA = MI->getOperand(1).getReg(); 6339 unsigned ptrB = MI->getOperand(2).getReg(); 6340 unsigned incr = MI->getOperand(3).getReg(); 6341 DebugLoc dl = MI->getDebugLoc(); 6342 6343 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 6344 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6345 F->insert(It, loopMBB); 6346 F->insert(It, exitMBB); 6347 exitMBB->splice(exitMBB->begin(), BB, 6348 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 6349 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6350 6351 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6352 const TargetRegisterClass *RC = 6353 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 6354 (const TargetRegisterClass *) &PPC::GPRCRegClass; 6355 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 6356 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 6357 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 6358 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 6359 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 6360 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 6361 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 6362 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 6363 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 6364 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 6365 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 6366 unsigned Ptr1Reg; 6367 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 6368 6369 // thisMBB: 6370 // ... 6371 // fallthrough --> loopMBB 6372 BB->addSuccessor(loopMBB); 6373 6374 // The 4-byte load must be aligned, while a char or short may be 6375 // anywhere in the word. Hence all this nasty bookkeeping code. 6376 // add ptr1, ptrA, ptrB [copy if ptrA==0] 6377 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 6378 // xori shift, shift1, 24 [16] 6379 // rlwinm ptr, ptr1, 0, 0, 29 6380 // slw incr2, incr, shift 6381 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 6382 // slw mask, mask2, shift 6383 // loopMBB: 6384 // lwarx tmpDest, ptr 6385 // add tmp, tmpDest, incr2 6386 // andc tmp2, tmpDest, mask 6387 // and tmp3, tmp, mask 6388 // or tmp4, tmp3, tmp2 6389 // stwcx. tmp4, ptr 6390 // bne- loopMBB 6391 // fallthrough --> exitMBB 6392 // srw dest, tmpDest, shift 6393 if (ptrA != ZeroReg) { 6394 Ptr1Reg = RegInfo.createVirtualRegister(RC); 6395 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 6396 .addReg(ptrA).addReg(ptrB); 6397 } else { 6398 Ptr1Reg = ptrB; 6399 } 6400 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 6401 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 6402 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 6403 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 6404 if (is64bit) 6405 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 6406 .addReg(Ptr1Reg).addImm(0).addImm(61); 6407 else 6408 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 6409 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 6410 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 6411 .addReg(incr).addReg(ShiftReg); 6412 if (is8bit) 6413 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 6414 else { 6415 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 6416 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 6417 } 6418 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 6419 .addReg(Mask2Reg).addReg(ShiftReg); 6420 6421 BB = loopMBB; 6422 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 6423 .addReg(ZeroReg).addReg(PtrReg); 6424 if (BinOpcode) 6425 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 6426 .addReg(Incr2Reg).addReg(TmpDestReg); 6427 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 6428 .addReg(TmpDestReg).addReg(MaskReg); 6429 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 6430 .addReg(TmpReg).addReg(MaskReg); 6431 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 6432 .addReg(Tmp3Reg).addReg(Tmp2Reg); 6433 BuildMI(BB, dl, TII->get(PPC::STWCX)) 6434 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 6435 BuildMI(BB, dl, TII->get(PPC::BCC)) 6436 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 6437 BB->addSuccessor(loopMBB); 6438 BB->addSuccessor(exitMBB); 6439 6440 // exitMBB: 6441 // ... 6442 BB = exitMBB; 6443 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 6444 .addReg(ShiftReg); 6445 return BB; 6446 } 6447 6448 llvm::MachineBasicBlock* 6449 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 6450 MachineBasicBlock *MBB) const { 6451 DebugLoc DL = MI->getDebugLoc(); 6452 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6453 6454 MachineFunction *MF = MBB->getParent(); 6455 MachineRegisterInfo &MRI = MF->getRegInfo(); 6456 6457 const BasicBlock *BB = MBB->getBasicBlock(); 6458 MachineFunction::iterator I = MBB; 6459 ++I; 6460 6461 // Memory Reference 6462 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6463 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6464 6465 unsigned DstReg = MI->getOperand(0).getReg(); 6466 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 6467 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 6468 unsigned mainDstReg = MRI.createVirtualRegister(RC); 6469 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 6470 6471 MVT PVT = getPointerTy(); 6472 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6473 "Invalid Pointer Size!"); 6474 // For v = setjmp(buf), we generate 6475 // 6476 // thisMBB: 6477 // SjLjSetup mainMBB 6478 // bl mainMBB 6479 // v_restore = 1 6480 // b sinkMBB 6481 // 6482 // mainMBB: 6483 // buf[LabelOffset] = LR 6484 // v_main = 0 6485 // 6486 // sinkMBB: 6487 // v = phi(main, restore) 6488 // 6489 6490 MachineBasicBlock *thisMBB = MBB; 6491 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 6492 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 6493 MF->insert(I, mainMBB); 6494 MF->insert(I, sinkMBB); 6495 6496 MachineInstrBuilder MIB; 6497 6498 // Transfer the remainder of BB and its successor edges to sinkMBB. 6499 sinkMBB->splice(sinkMBB->begin(), MBB, 6500 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 6501 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 6502 6503 // Note that the structure of the jmp_buf used here is not compatible 6504 // with that used by libc, and is not designed to be. Specifically, it 6505 // stores only those 'reserved' registers that LLVM does not otherwise 6506 // understand how to spill. Also, by convention, by the time this 6507 // intrinsic is called, Clang has already stored the frame address in the 6508 // first slot of the buffer and stack address in the third. Following the 6509 // X86 target code, we'll store the jump address in the second slot. We also 6510 // need to save the TOC pointer (R2) to handle jumps between shared 6511 // libraries, and that will be stored in the fourth slot. The thread 6512 // identifier (R13) is not affected. 6513 6514 // thisMBB: 6515 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6516 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6517 const int64_t BPOffset = 4 * PVT.getStoreSize(); 6518 6519 // Prepare IP either in reg. 6520 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 6521 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 6522 unsigned BufReg = MI->getOperand(1).getReg(); 6523 6524 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 6525 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 6526 .addReg(PPC::X2) 6527 .addImm(TOCOffset) 6528 .addReg(BufReg); 6529 MIB.setMemRefs(MMOBegin, MMOEnd); 6530 } 6531 6532 // Naked functions never have a base pointer, and so we use r1. For all 6533 // other functions, this decision must be delayed until during PEI. 6534 unsigned BaseReg; 6535 if (MF->getFunction()->getAttributes().hasAttribute( 6536 AttributeSet::FunctionIndex, Attribute::Naked)) 6537 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 6538 else 6539 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 6540 6541 MIB = BuildMI(*thisMBB, MI, DL, 6542 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 6543 .addReg(BaseReg) 6544 .addImm(BPOffset) 6545 .addReg(BufReg); 6546 MIB.setMemRefs(MMOBegin, MMOEnd); 6547 6548 // Setup 6549 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 6550 const PPCRegisterInfo *TRI = 6551 static_cast<const PPCRegisterInfo*>(getTargetMachine().getRegisterInfo()); 6552 MIB.addRegMask(TRI->getNoPreservedMask()); 6553 6554 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 6555 6556 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 6557 .addMBB(mainMBB); 6558 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 6559 6560 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 6561 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 6562 6563 // mainMBB: 6564 // mainDstReg = 0 6565 MIB = BuildMI(mainMBB, DL, 6566 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 6567 6568 // Store IP 6569 if (Subtarget.isPPC64()) { 6570 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 6571 .addReg(LabelReg) 6572 .addImm(LabelOffset) 6573 .addReg(BufReg); 6574 } else { 6575 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 6576 .addReg(LabelReg) 6577 .addImm(LabelOffset) 6578 .addReg(BufReg); 6579 } 6580 6581 MIB.setMemRefs(MMOBegin, MMOEnd); 6582 6583 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 6584 mainMBB->addSuccessor(sinkMBB); 6585 6586 // sinkMBB: 6587 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 6588 TII->get(PPC::PHI), DstReg) 6589 .addReg(mainDstReg).addMBB(mainMBB) 6590 .addReg(restoreDstReg).addMBB(thisMBB); 6591 6592 MI->eraseFromParent(); 6593 return sinkMBB; 6594 } 6595 6596 MachineBasicBlock * 6597 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 6598 MachineBasicBlock *MBB) const { 6599 DebugLoc DL = MI->getDebugLoc(); 6600 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6601 6602 MachineFunction *MF = MBB->getParent(); 6603 MachineRegisterInfo &MRI = MF->getRegInfo(); 6604 6605 // Memory Reference 6606 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6607 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6608 6609 MVT PVT = getPointerTy(); 6610 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6611 "Invalid Pointer Size!"); 6612 6613 const TargetRegisterClass *RC = 6614 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6615 unsigned Tmp = MRI.createVirtualRegister(RC); 6616 // Since FP is only updated here but NOT referenced, it's treated as GPR. 6617 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 6618 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 6619 unsigned BP = (PVT == MVT::i64) ? PPC::X30 : PPC::R30; 6620 6621 MachineInstrBuilder MIB; 6622 6623 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6624 const int64_t SPOffset = 2 * PVT.getStoreSize(); 6625 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6626 const int64_t BPOffset = 4 * PVT.getStoreSize(); 6627 6628 unsigned BufReg = MI->getOperand(0).getReg(); 6629 6630 // Reload FP (the jumped-to function may not have had a 6631 // frame pointer, and if so, then its r31 will be restored 6632 // as necessary). 6633 if (PVT == MVT::i64) { 6634 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 6635 .addImm(0) 6636 .addReg(BufReg); 6637 } else { 6638 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 6639 .addImm(0) 6640 .addReg(BufReg); 6641 } 6642 MIB.setMemRefs(MMOBegin, MMOEnd); 6643 6644 // Reload IP 6645 if (PVT == MVT::i64) { 6646 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 6647 .addImm(LabelOffset) 6648 .addReg(BufReg); 6649 } else { 6650 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 6651 .addImm(LabelOffset) 6652 .addReg(BufReg); 6653 } 6654 MIB.setMemRefs(MMOBegin, MMOEnd); 6655 6656 // Reload SP 6657 if (PVT == MVT::i64) { 6658 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 6659 .addImm(SPOffset) 6660 .addReg(BufReg); 6661 } else { 6662 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 6663 .addImm(SPOffset) 6664 .addReg(BufReg); 6665 } 6666 MIB.setMemRefs(MMOBegin, MMOEnd); 6667 6668 // Reload BP 6669 if (PVT == MVT::i64) { 6670 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 6671 .addImm(BPOffset) 6672 .addReg(BufReg); 6673 } else { 6674 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 6675 .addImm(BPOffset) 6676 .addReg(BufReg); 6677 } 6678 MIB.setMemRefs(MMOBegin, MMOEnd); 6679 6680 // Reload TOC 6681 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 6682 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 6683 .addImm(TOCOffset) 6684 .addReg(BufReg); 6685 6686 MIB.setMemRefs(MMOBegin, MMOEnd); 6687 } 6688 6689 // Jump 6690 BuildMI(*MBB, MI, DL, 6691 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 6692 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 6693 6694 MI->eraseFromParent(); 6695 return MBB; 6696 } 6697 6698 MachineBasicBlock * 6699 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6700 MachineBasicBlock *BB) const { 6701 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 6702 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 6703 return emitEHSjLjSetJmp(MI, BB); 6704 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 6705 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 6706 return emitEHSjLjLongJmp(MI, BB); 6707 } 6708 6709 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6710 6711 // To "insert" these instructions we actually have to insert their 6712 // control-flow patterns. 6713 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6714 MachineFunction::iterator It = BB; 6715 ++It; 6716 6717 MachineFunction *F = BB->getParent(); 6718 6719 if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 6720 MI->getOpcode() == PPC::SELECT_CC_I8 || 6721 MI->getOpcode() == PPC::SELECT_I4 || 6722 MI->getOpcode() == PPC::SELECT_I8)) { 6723 SmallVector<MachineOperand, 2> Cond; 6724 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 6725 MI->getOpcode() == PPC::SELECT_CC_I8) 6726 Cond.push_back(MI->getOperand(4)); 6727 else 6728 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 6729 Cond.push_back(MI->getOperand(1)); 6730 6731 DebugLoc dl = MI->getDebugLoc(); 6732 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6733 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 6734 Cond, MI->getOperand(2).getReg(), 6735 MI->getOperand(3).getReg()); 6736 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 6737 MI->getOpcode() == PPC::SELECT_CC_I8 || 6738 MI->getOpcode() == PPC::SELECT_CC_F4 || 6739 MI->getOpcode() == PPC::SELECT_CC_F8 || 6740 MI->getOpcode() == PPC::SELECT_CC_VRRC || 6741 MI->getOpcode() == PPC::SELECT_I4 || 6742 MI->getOpcode() == PPC::SELECT_I8 || 6743 MI->getOpcode() == PPC::SELECT_F4 || 6744 MI->getOpcode() == PPC::SELECT_F8 || 6745 MI->getOpcode() == PPC::SELECT_VRRC) { 6746 // The incoming instruction knows the destination vreg to set, the 6747 // condition code register to branch on, the true/false values to 6748 // select between, and a branch opcode to use. 6749 6750 // thisMBB: 6751 // ... 6752 // TrueVal = ... 6753 // cmpTY ccX, r1, r2 6754 // bCC copy1MBB 6755 // fallthrough --> copy0MBB 6756 MachineBasicBlock *thisMBB = BB; 6757 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6758 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6759 DebugLoc dl = MI->getDebugLoc(); 6760 F->insert(It, copy0MBB); 6761 F->insert(It, sinkMBB); 6762 6763 // Transfer the remainder of BB and its successor edges to sinkMBB. 6764 sinkMBB->splice(sinkMBB->begin(), BB, 6765 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 6766 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6767 6768 // Next, add the true and fallthrough blocks as its successors. 6769 BB->addSuccessor(copy0MBB); 6770 BB->addSuccessor(sinkMBB); 6771 6772 if (MI->getOpcode() == PPC::SELECT_I4 || 6773 MI->getOpcode() == PPC::SELECT_I8 || 6774 MI->getOpcode() == PPC::SELECT_F4 || 6775 MI->getOpcode() == PPC::SELECT_F8 || 6776 MI->getOpcode() == PPC::SELECT_VRRC) { 6777 BuildMI(BB, dl, TII->get(PPC::BC)) 6778 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 6779 } else { 6780 unsigned SelectPred = MI->getOperand(4).getImm(); 6781 BuildMI(BB, dl, TII->get(PPC::BCC)) 6782 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 6783 } 6784 6785 // copy0MBB: 6786 // %FalseValue = ... 6787 // # fallthrough to sinkMBB 6788 BB = copy0MBB; 6789 6790 // Update machine-CFG edges 6791 BB->addSuccessor(sinkMBB); 6792 6793 // sinkMBB: 6794 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6795 // ... 6796 BB = sinkMBB; 6797 BuildMI(*BB, BB->begin(), dl, 6798 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 6799 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 6800 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6801 } 6802 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 6803 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 6804 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 6805 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 6806 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 6807 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 6808 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 6809 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 6810 6811 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 6812 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 6813 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 6814 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 6815 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 6816 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 6817 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 6818 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 6819 6820 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 6821 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 6822 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 6823 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 6824 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 6825 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 6826 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 6827 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 6828 6829 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 6830 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 6831 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 6832 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 6833 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 6834 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 6835 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 6836 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 6837 6838 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 6839 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 6840 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 6841 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 6842 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 6843 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 6844 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 6845 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 6846 6847 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 6848 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 6849 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 6850 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 6851 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 6852 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 6853 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 6854 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 6855 6856 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 6857 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 6858 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 6859 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 6860 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 6861 BB = EmitAtomicBinary(MI, BB, false, 0); 6862 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 6863 BB = EmitAtomicBinary(MI, BB, true, 0); 6864 6865 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 6866 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 6867 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 6868 6869 unsigned dest = MI->getOperand(0).getReg(); 6870 unsigned ptrA = MI->getOperand(1).getReg(); 6871 unsigned ptrB = MI->getOperand(2).getReg(); 6872 unsigned oldval = MI->getOperand(3).getReg(); 6873 unsigned newval = MI->getOperand(4).getReg(); 6874 DebugLoc dl = MI->getDebugLoc(); 6875 6876 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6877 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6878 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6879 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6880 F->insert(It, loop1MBB); 6881 F->insert(It, loop2MBB); 6882 F->insert(It, midMBB); 6883 F->insert(It, exitMBB); 6884 exitMBB->splice(exitMBB->begin(), BB, 6885 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 6886 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6887 6888 // thisMBB: 6889 // ... 6890 // fallthrough --> loopMBB 6891 BB->addSuccessor(loop1MBB); 6892 6893 // loop1MBB: 6894 // l[wd]arx dest, ptr 6895 // cmp[wd] dest, oldval 6896 // bne- midMBB 6897 // loop2MBB: 6898 // st[wd]cx. newval, ptr 6899 // bne- loopMBB 6900 // b exitBB 6901 // midMBB: 6902 // st[wd]cx. dest, ptr 6903 // exitBB: 6904 BB = loop1MBB; 6905 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 6906 .addReg(ptrA).addReg(ptrB); 6907 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 6908 .addReg(oldval).addReg(dest); 6909 BuildMI(BB, dl, TII->get(PPC::BCC)) 6910 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6911 BB->addSuccessor(loop2MBB); 6912 BB->addSuccessor(midMBB); 6913 6914 BB = loop2MBB; 6915 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6916 .addReg(newval).addReg(ptrA).addReg(ptrB); 6917 BuildMI(BB, dl, TII->get(PPC::BCC)) 6918 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6919 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6920 BB->addSuccessor(loop1MBB); 6921 BB->addSuccessor(exitMBB); 6922 6923 BB = midMBB; 6924 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6925 .addReg(dest).addReg(ptrA).addReg(ptrB); 6926 BB->addSuccessor(exitMBB); 6927 6928 // exitMBB: 6929 // ... 6930 BB = exitMBB; 6931 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 6932 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 6933 // We must use 64-bit registers for addresses when targeting 64-bit, 6934 // since we're actually doing arithmetic on them. Other registers 6935 // can be 32-bit. 6936 bool is64bit = Subtarget.isPPC64(); 6937 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 6938 6939 unsigned dest = MI->getOperand(0).getReg(); 6940 unsigned ptrA = MI->getOperand(1).getReg(); 6941 unsigned ptrB = MI->getOperand(2).getReg(); 6942 unsigned oldval = MI->getOperand(3).getReg(); 6943 unsigned newval = MI->getOperand(4).getReg(); 6944 DebugLoc dl = MI->getDebugLoc(); 6945 6946 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6947 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6948 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6949 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6950 F->insert(It, loop1MBB); 6951 F->insert(It, loop2MBB); 6952 F->insert(It, midMBB); 6953 F->insert(It, exitMBB); 6954 exitMBB->splice(exitMBB->begin(), BB, 6955 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 6956 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6957 6958 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6959 const TargetRegisterClass *RC = 6960 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 6961 (const TargetRegisterClass *) &PPC::GPRCRegClass; 6962 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 6963 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 6964 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 6965 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 6966 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 6967 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 6968 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 6969 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 6970 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 6971 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 6972 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 6973 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 6974 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 6975 unsigned Ptr1Reg; 6976 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 6977 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 6978 // thisMBB: 6979 // ... 6980 // fallthrough --> loopMBB 6981 BB->addSuccessor(loop1MBB); 6982 6983 // The 4-byte load must be aligned, while a char or short may be 6984 // anywhere in the word. Hence all this nasty bookkeeping code. 6985 // add ptr1, ptrA, ptrB [copy if ptrA==0] 6986 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 6987 // xori shift, shift1, 24 [16] 6988 // rlwinm ptr, ptr1, 0, 0, 29 6989 // slw newval2, newval, shift 6990 // slw oldval2, oldval,shift 6991 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 6992 // slw mask, mask2, shift 6993 // and newval3, newval2, mask 6994 // and oldval3, oldval2, mask 6995 // loop1MBB: 6996 // lwarx tmpDest, ptr 6997 // and tmp, tmpDest, mask 6998 // cmpw tmp, oldval3 6999 // bne- midMBB 7000 // loop2MBB: 7001 // andc tmp2, tmpDest, mask 7002 // or tmp4, tmp2, newval3 7003 // stwcx. tmp4, ptr 7004 // bne- loop1MBB 7005 // b exitBB 7006 // midMBB: 7007 // stwcx. tmpDest, ptr 7008 // exitBB: 7009 // srw dest, tmpDest, shift 7010 if (ptrA != ZeroReg) { 7011 Ptr1Reg = RegInfo.createVirtualRegister(RC); 7012 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 7013 .addReg(ptrA).addReg(ptrB); 7014 } else { 7015 Ptr1Reg = ptrB; 7016 } 7017 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 7018 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 7019 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 7020 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 7021 if (is64bit) 7022 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 7023 .addReg(Ptr1Reg).addImm(0).addImm(61); 7024 else 7025 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 7026 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 7027 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 7028 .addReg(newval).addReg(ShiftReg); 7029 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 7030 .addReg(oldval).addReg(ShiftReg); 7031 if (is8bit) 7032 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 7033 else { 7034 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 7035 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 7036 .addReg(Mask3Reg).addImm(65535); 7037 } 7038 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 7039 .addReg(Mask2Reg).addReg(ShiftReg); 7040 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 7041 .addReg(NewVal2Reg).addReg(MaskReg); 7042 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 7043 .addReg(OldVal2Reg).addReg(MaskReg); 7044 7045 BB = loop1MBB; 7046 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 7047 .addReg(ZeroReg).addReg(PtrReg); 7048 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 7049 .addReg(TmpDestReg).addReg(MaskReg); 7050 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 7051 .addReg(TmpReg).addReg(OldVal3Reg); 7052 BuildMI(BB, dl, TII->get(PPC::BCC)) 7053 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 7054 BB->addSuccessor(loop2MBB); 7055 BB->addSuccessor(midMBB); 7056 7057 BB = loop2MBB; 7058 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 7059 .addReg(TmpDestReg).addReg(MaskReg); 7060 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 7061 .addReg(Tmp2Reg).addReg(NewVal3Reg); 7062 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 7063 .addReg(ZeroReg).addReg(PtrReg); 7064 BuildMI(BB, dl, TII->get(PPC::BCC)) 7065 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 7066 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 7067 BB->addSuccessor(loop1MBB); 7068 BB->addSuccessor(exitMBB); 7069 7070 BB = midMBB; 7071 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 7072 .addReg(ZeroReg).addReg(PtrReg); 7073 BB->addSuccessor(exitMBB); 7074 7075 // exitMBB: 7076 // ... 7077 BB = exitMBB; 7078 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 7079 .addReg(ShiftReg); 7080 } else if (MI->getOpcode() == PPC::FADDrtz) { 7081 // This pseudo performs an FADD with rounding mode temporarily forced 7082 // to round-to-zero. We emit this via custom inserter since the FPSCR 7083 // is not modeled at the SelectionDAG level. 7084 unsigned Dest = MI->getOperand(0).getReg(); 7085 unsigned Src1 = MI->getOperand(1).getReg(); 7086 unsigned Src2 = MI->getOperand(2).getReg(); 7087 DebugLoc dl = MI->getDebugLoc(); 7088 7089 MachineRegisterInfo &RegInfo = F->getRegInfo(); 7090 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 7091 7092 // Save FPSCR value. 7093 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 7094 7095 // Set rounding mode to round-to-zero. 7096 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 7097 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 7098 7099 // Perform addition. 7100 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 7101 7102 // Restore FPSCR value. 7103 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg); 7104 } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 7105 MI->getOpcode() == PPC::ANDIo_1_GT_BIT || 7106 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 7107 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) { 7108 unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 7109 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ? 7110 PPC::ANDIo8 : PPC::ANDIo; 7111 bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 7112 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8); 7113 7114 MachineRegisterInfo &RegInfo = F->getRegInfo(); 7115 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 7116 &PPC::GPRCRegClass : 7117 &PPC::G8RCRegClass); 7118 7119 DebugLoc dl = MI->getDebugLoc(); 7120 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 7121 .addReg(MI->getOperand(1).getReg()).addImm(1); 7122 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 7123 MI->getOperand(0).getReg()) 7124 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 7125 } else { 7126 llvm_unreachable("Unexpected instr type to insert"); 7127 } 7128 7129 MI->eraseFromParent(); // The pseudo instruction is gone now. 7130 return BB; 7131 } 7132 7133 //===----------------------------------------------------------------------===// 7134 // Target Optimization Hooks 7135 //===----------------------------------------------------------------------===// 7136 7137 SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op, 7138 DAGCombinerInfo &DCI) const { 7139 if (DCI.isAfterLegalizeVectorOps()) 7140 return SDValue(); 7141 7142 EVT VT = Op.getValueType(); 7143 7144 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 7145 (VT == MVT::f64 && Subtarget.hasFRE()) || 7146 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 7147 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 7148 7149 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 7150 // For the reciprocal, we need to find the zero of the function: 7151 // F(X) = A X - 1 [which has a zero at X = 1/A] 7152 // => 7153 // X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form 7154 // does not require additional intermediate precision] 7155 7156 // Convergence is quadratic, so we essentially double the number of digits 7157 // correct after every iteration. The minimum architected relative 7158 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 7159 // 23 digits and double has 52 digits. 7160 int Iterations = Subtarget.hasRecipPrec() ? 1 : 3; 7161 if (VT.getScalarType() == MVT::f64) 7162 ++Iterations; 7163 7164 SelectionDAG &DAG = DCI.DAG; 7165 SDLoc dl(Op); 7166 7167 SDValue FPOne = 7168 DAG.getConstantFP(1.0, VT.getScalarType()); 7169 if (VT.isVector()) { 7170 assert(VT.getVectorNumElements() == 4 && 7171 "Unknown vector type"); 7172 FPOne = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 7173 FPOne, FPOne, FPOne, FPOne); 7174 } 7175 7176 SDValue Est = DAG.getNode(PPCISD::FRE, dl, VT, Op); 7177 DCI.AddToWorklist(Est.getNode()); 7178 7179 // Newton iterations: Est = Est + Est (1 - Arg * Est) 7180 for (int i = 0; i < Iterations; ++i) { 7181 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Op, Est); 7182 DCI.AddToWorklist(NewEst.getNode()); 7183 7184 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPOne, NewEst); 7185 DCI.AddToWorklist(NewEst.getNode()); 7186 7187 NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 7188 DCI.AddToWorklist(NewEst.getNode()); 7189 7190 Est = DAG.getNode(ISD::FADD, dl, VT, Est, NewEst); 7191 DCI.AddToWorklist(Est.getNode()); 7192 } 7193 7194 return Est; 7195 } 7196 7197 return SDValue(); 7198 } 7199 7200 SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op, 7201 DAGCombinerInfo &DCI) const { 7202 if (DCI.isAfterLegalizeVectorOps()) 7203 return SDValue(); 7204 7205 EVT VT = Op.getValueType(); 7206 7207 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 7208 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 7209 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 7210 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 7211 7212 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 7213 // For the reciprocal sqrt, we need to find the zero of the function: 7214 // F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 7215 // => 7216 // X_{i+1} = X_i (1.5 - A X_i^2 / 2) 7217 // As a result, we precompute A/2 prior to the iteration loop. 7218 7219 // Convergence is quadratic, so we essentially double the number of digits 7220 // correct after every iteration. The minimum architected relative 7221 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 7222 // 23 digits and double has 52 digits. 7223 int Iterations = Subtarget.hasRecipPrec() ? 1 : 3; 7224 if (VT.getScalarType() == MVT::f64) 7225 ++Iterations; 7226 7227 SelectionDAG &DAG = DCI.DAG; 7228 SDLoc dl(Op); 7229 7230 SDValue FPThreeHalves = 7231 DAG.getConstantFP(1.5, VT.getScalarType()); 7232 if (VT.isVector()) { 7233 assert(VT.getVectorNumElements() == 4 && 7234 "Unknown vector type"); 7235 FPThreeHalves = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 7236 FPThreeHalves, FPThreeHalves, 7237 FPThreeHalves, FPThreeHalves); 7238 } 7239 7240 SDValue Est = DAG.getNode(PPCISD::FRSQRTE, dl, VT, Op); 7241 DCI.AddToWorklist(Est.getNode()); 7242 7243 // We now need 0.5*Arg which we can write as (1.5*Arg - Arg) so that 7244 // this entire sequence requires only one FP constant. 7245 SDValue HalfArg = DAG.getNode(ISD::FMUL, dl, VT, FPThreeHalves, Op); 7246 DCI.AddToWorklist(HalfArg.getNode()); 7247 7248 HalfArg = DAG.getNode(ISD::FSUB, dl, VT, HalfArg, Op); 7249 DCI.AddToWorklist(HalfArg.getNode()); 7250 7251 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est) 7252 for (int i = 0; i < Iterations; ++i) { 7253 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, Est); 7254 DCI.AddToWorklist(NewEst.getNode()); 7255 7256 NewEst = DAG.getNode(ISD::FMUL, dl, VT, HalfArg, NewEst); 7257 DCI.AddToWorklist(NewEst.getNode()); 7258 7259 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPThreeHalves, NewEst); 7260 DCI.AddToWorklist(NewEst.getNode()); 7261 7262 Est = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 7263 DCI.AddToWorklist(Est.getNode()); 7264 } 7265 7266 return Est; 7267 } 7268 7269 return SDValue(); 7270 } 7271 7272 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 7273 // not enforce equality of the chain operands. 7274 static bool isConsecutiveLS(LSBaseSDNode *LS, LSBaseSDNode *Base, 7275 unsigned Bytes, int Dist, 7276 SelectionDAG &DAG) { 7277 EVT VT = LS->getMemoryVT(); 7278 if (VT.getSizeInBits() / 8 != Bytes) 7279 return false; 7280 7281 SDValue Loc = LS->getBasePtr(); 7282 SDValue BaseLoc = Base->getBasePtr(); 7283 if (Loc.getOpcode() == ISD::FrameIndex) { 7284 if (BaseLoc.getOpcode() != ISD::FrameIndex) 7285 return false; 7286 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7287 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 7288 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 7289 int FS = MFI->getObjectSize(FI); 7290 int BFS = MFI->getObjectSize(BFI); 7291 if (FS != BFS || FS != (int)Bytes) return false; 7292 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 7293 } 7294 7295 // Handle X+C 7296 if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 7297 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 7298 return true; 7299 7300 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7301 const GlobalValue *GV1 = nullptr; 7302 const GlobalValue *GV2 = nullptr; 7303 int64_t Offset1 = 0; 7304 int64_t Offset2 = 0; 7305 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 7306 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 7307 if (isGA1 && isGA2 && GV1 == GV2) 7308 return Offset1 == (Offset2 + Dist*Bytes); 7309 return false; 7310 } 7311 7312 // Return true is there is a nearyby consecutive load to the one provided 7313 // (regardless of alignment). We search up and down the chain, looking though 7314 // token factors and other loads (but nothing else). As a result, a true 7315 // results indicates that it is safe to create a new consecutive load adjacent 7316 // to the load provided. 7317 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 7318 SDValue Chain = LD->getChain(); 7319 EVT VT = LD->getMemoryVT(); 7320 7321 SmallSet<SDNode *, 16> LoadRoots; 7322 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 7323 SmallSet<SDNode *, 16> Visited; 7324 7325 // First, search up the chain, branching to follow all token-factor operands. 7326 // If we find a consecutive load, then we're done, otherwise, record all 7327 // nodes just above the top-level loads and token factors. 7328 while (!Queue.empty()) { 7329 SDNode *ChainNext = Queue.pop_back_val(); 7330 if (!Visited.insert(ChainNext)) 7331 continue; 7332 7333 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(ChainNext)) { 7334 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 7335 return true; 7336 7337 if (!Visited.count(ChainLD->getChain().getNode())) 7338 Queue.push_back(ChainLD->getChain().getNode()); 7339 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 7340 for (SDNode::op_iterator O = ChainNext->op_begin(), 7341 OE = ChainNext->op_end(); O != OE; ++O) 7342 if (!Visited.count(O->getNode())) 7343 Queue.push_back(O->getNode()); 7344 } else 7345 LoadRoots.insert(ChainNext); 7346 } 7347 7348 // Second, search down the chain, starting from the top-level nodes recorded 7349 // in the first phase. These top-level nodes are the nodes just above all 7350 // loads and token factors. Starting with their uses, recursively look though 7351 // all loads (just the chain uses) and token factors to find a consecutive 7352 // load. 7353 Visited.clear(); 7354 Queue.clear(); 7355 7356 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 7357 IE = LoadRoots.end(); I != IE; ++I) { 7358 Queue.push_back(*I); 7359 7360 while (!Queue.empty()) { 7361 SDNode *LoadRoot = Queue.pop_back_val(); 7362 if (!Visited.insert(LoadRoot)) 7363 continue; 7364 7365 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(LoadRoot)) 7366 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 7367 return true; 7368 7369 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 7370 UE = LoadRoot->use_end(); UI != UE; ++UI) 7371 if (((isa<LoadSDNode>(*UI) && 7372 cast<LoadSDNode>(*UI)->getChain().getNode() == LoadRoot) || 7373 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 7374 Queue.push_back(*UI); 7375 } 7376 } 7377 7378 return false; 7379 } 7380 7381 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 7382 DAGCombinerInfo &DCI) const { 7383 SelectionDAG &DAG = DCI.DAG; 7384 SDLoc dl(N); 7385 7386 assert(Subtarget.useCRBits() && 7387 "Expecting to be tracking CR bits"); 7388 // If we're tracking CR bits, we need to be careful that we don't have: 7389 // trunc(binary-ops(zext(x), zext(y))) 7390 // or 7391 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 7392 // such that we're unnecessarily moving things into GPRs when it would be 7393 // better to keep them in CR bits. 7394 7395 // Note that trunc here can be an actual i1 trunc, or can be the effective 7396 // truncation that comes from a setcc or select_cc. 7397 if (N->getOpcode() == ISD::TRUNCATE && 7398 N->getValueType(0) != MVT::i1) 7399 return SDValue(); 7400 7401 if (N->getOperand(0).getValueType() != MVT::i32 && 7402 N->getOperand(0).getValueType() != MVT::i64) 7403 return SDValue(); 7404 7405 if (N->getOpcode() == ISD::SETCC || 7406 N->getOpcode() == ISD::SELECT_CC) { 7407 // If we're looking at a comparison, then we need to make sure that the 7408 // high bits (all except for the first) don't matter the result. 7409 ISD::CondCode CC = 7410 cast<CondCodeSDNode>(N->getOperand( 7411 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 7412 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 7413 7414 if (ISD::isSignedIntSetCC(CC)) { 7415 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 7416 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 7417 return SDValue(); 7418 } else if (ISD::isUnsignedIntSetCC(CC)) { 7419 if (!DAG.MaskedValueIsZero(N->getOperand(0), 7420 APInt::getHighBitsSet(OpBits, OpBits-1)) || 7421 !DAG.MaskedValueIsZero(N->getOperand(1), 7422 APInt::getHighBitsSet(OpBits, OpBits-1))) 7423 return SDValue(); 7424 } else { 7425 // This is neither a signed nor an unsigned comparison, just make sure 7426 // that the high bits are equal. 7427 APInt Op1Zero, Op1One; 7428 APInt Op2Zero, Op2One; 7429 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 7430 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 7431 7432 // We don't really care about what is known about the first bit (if 7433 // anything), so clear it in all masks prior to comparing them. 7434 Op1Zero.clearBit(0); Op1One.clearBit(0); 7435 Op2Zero.clearBit(0); Op2One.clearBit(0); 7436 7437 if (Op1Zero != Op2Zero || Op1One != Op2One) 7438 return SDValue(); 7439 } 7440 } 7441 7442 // We now know that the higher-order bits are irrelevant, we just need to 7443 // make sure that all of the intermediate operations are bit operations, and 7444 // all inputs are extensions. 7445 if (N->getOperand(0).getOpcode() != ISD::AND && 7446 N->getOperand(0).getOpcode() != ISD::OR && 7447 N->getOperand(0).getOpcode() != ISD::XOR && 7448 N->getOperand(0).getOpcode() != ISD::SELECT && 7449 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 7450 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 7451 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 7452 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 7453 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 7454 return SDValue(); 7455 7456 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 7457 N->getOperand(1).getOpcode() != ISD::AND && 7458 N->getOperand(1).getOpcode() != ISD::OR && 7459 N->getOperand(1).getOpcode() != ISD::XOR && 7460 N->getOperand(1).getOpcode() != ISD::SELECT && 7461 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 7462 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 7463 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 7464 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 7465 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 7466 return SDValue(); 7467 7468 SmallVector<SDValue, 4> Inputs; 7469 SmallVector<SDValue, 8> BinOps, PromOps; 7470 SmallPtrSet<SDNode *, 16> Visited; 7471 7472 for (unsigned i = 0; i < 2; ++i) { 7473 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 7474 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 7475 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 7476 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 7477 isa<ConstantSDNode>(N->getOperand(i))) 7478 Inputs.push_back(N->getOperand(i)); 7479 else 7480 BinOps.push_back(N->getOperand(i)); 7481 7482 if (N->getOpcode() == ISD::TRUNCATE) 7483 break; 7484 } 7485 7486 // Visit all inputs, collect all binary operations (and, or, xor and 7487 // select) that are all fed by extensions. 7488 while (!BinOps.empty()) { 7489 SDValue BinOp = BinOps.back(); 7490 BinOps.pop_back(); 7491 7492 if (!Visited.insert(BinOp.getNode())) 7493 continue; 7494 7495 PromOps.push_back(BinOp); 7496 7497 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 7498 // The condition of the select is not promoted. 7499 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 7500 continue; 7501 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 7502 continue; 7503 7504 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 7505 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 7506 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 7507 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 7508 isa<ConstantSDNode>(BinOp.getOperand(i))) { 7509 Inputs.push_back(BinOp.getOperand(i)); 7510 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 7511 BinOp.getOperand(i).getOpcode() == ISD::OR || 7512 BinOp.getOperand(i).getOpcode() == ISD::XOR || 7513 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 7514 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 7515 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 7516 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 7517 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 7518 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 7519 BinOps.push_back(BinOp.getOperand(i)); 7520 } else { 7521 // We have an input that is not an extension or another binary 7522 // operation; we'll abort this transformation. 7523 return SDValue(); 7524 } 7525 } 7526 } 7527 7528 // Make sure that this is a self-contained cluster of operations (which 7529 // is not quite the same thing as saying that everything has only one 7530 // use). 7531 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 7532 if (isa<ConstantSDNode>(Inputs[i])) 7533 continue; 7534 7535 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 7536 UE = Inputs[i].getNode()->use_end(); 7537 UI != UE; ++UI) { 7538 SDNode *User = *UI; 7539 if (User != N && !Visited.count(User)) 7540 return SDValue(); 7541 7542 // Make sure that we're not going to promote the non-output-value 7543 // operand(s) or SELECT or SELECT_CC. 7544 // FIXME: Although we could sometimes handle this, and it does occur in 7545 // practice that one of the condition inputs to the select is also one of 7546 // the outputs, we currently can't deal with this. 7547 if (User->getOpcode() == ISD::SELECT) { 7548 if (User->getOperand(0) == Inputs[i]) 7549 return SDValue(); 7550 } else if (User->getOpcode() == ISD::SELECT_CC) { 7551 if (User->getOperand(0) == Inputs[i] || 7552 User->getOperand(1) == Inputs[i]) 7553 return SDValue(); 7554 } 7555 } 7556 } 7557 7558 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 7559 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 7560 UE = PromOps[i].getNode()->use_end(); 7561 UI != UE; ++UI) { 7562 SDNode *User = *UI; 7563 if (User != N && !Visited.count(User)) 7564 return SDValue(); 7565 7566 // Make sure that we're not going to promote the non-output-value 7567 // operand(s) or SELECT or SELECT_CC. 7568 // FIXME: Although we could sometimes handle this, and it does occur in 7569 // practice that one of the condition inputs to the select is also one of 7570 // the outputs, we currently can't deal with this. 7571 if (User->getOpcode() == ISD::SELECT) { 7572 if (User->getOperand(0) == PromOps[i]) 7573 return SDValue(); 7574 } else if (User->getOpcode() == ISD::SELECT_CC) { 7575 if (User->getOperand(0) == PromOps[i] || 7576 User->getOperand(1) == PromOps[i]) 7577 return SDValue(); 7578 } 7579 } 7580 } 7581 7582 // Replace all inputs with the extension operand. 7583 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 7584 // Constants may have users outside the cluster of to-be-promoted nodes, 7585 // and so we need to replace those as we do the promotions. 7586 if (isa<ConstantSDNode>(Inputs[i])) 7587 continue; 7588 else 7589 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 7590 } 7591 7592 // Replace all operations (these are all the same, but have a different 7593 // (i1) return type). DAG.getNode will validate that the types of 7594 // a binary operator match, so go through the list in reverse so that 7595 // we've likely promoted both operands first. Any intermediate truncations or 7596 // extensions disappear. 7597 while (!PromOps.empty()) { 7598 SDValue PromOp = PromOps.back(); 7599 PromOps.pop_back(); 7600 7601 if (PromOp.getOpcode() == ISD::TRUNCATE || 7602 PromOp.getOpcode() == ISD::SIGN_EXTEND || 7603 PromOp.getOpcode() == ISD::ZERO_EXTEND || 7604 PromOp.getOpcode() == ISD::ANY_EXTEND) { 7605 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 7606 PromOp.getOperand(0).getValueType() != MVT::i1) { 7607 // The operand is not yet ready (see comment below). 7608 PromOps.insert(PromOps.begin(), PromOp); 7609 continue; 7610 } 7611 7612 SDValue RepValue = PromOp.getOperand(0); 7613 if (isa<ConstantSDNode>(RepValue)) 7614 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 7615 7616 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 7617 continue; 7618 } 7619 7620 unsigned C; 7621 switch (PromOp.getOpcode()) { 7622 default: C = 0; break; 7623 case ISD::SELECT: C = 1; break; 7624 case ISD::SELECT_CC: C = 2; break; 7625 } 7626 7627 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 7628 PromOp.getOperand(C).getValueType() != MVT::i1) || 7629 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 7630 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 7631 // The to-be-promoted operands of this node have not yet been 7632 // promoted (this should be rare because we're going through the 7633 // list backward, but if one of the operands has several users in 7634 // this cluster of to-be-promoted nodes, it is possible). 7635 PromOps.insert(PromOps.begin(), PromOp); 7636 continue; 7637 } 7638 7639 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 7640 PromOp.getNode()->op_end()); 7641 7642 // If there are any constant inputs, make sure they're replaced now. 7643 for (unsigned i = 0; i < 2; ++i) 7644 if (isa<ConstantSDNode>(Ops[C+i])) 7645 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 7646 7647 DAG.ReplaceAllUsesOfValueWith(PromOp, 7648 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 7649 } 7650 7651 // Now we're left with the initial truncation itself. 7652 if (N->getOpcode() == ISD::TRUNCATE) 7653 return N->getOperand(0); 7654 7655 // Otherwise, this is a comparison. The operands to be compared have just 7656 // changed type (to i1), but everything else is the same. 7657 return SDValue(N, 0); 7658 } 7659 7660 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 7661 DAGCombinerInfo &DCI) const { 7662 SelectionDAG &DAG = DCI.DAG; 7663 SDLoc dl(N); 7664 7665 // If we're tracking CR bits, we need to be careful that we don't have: 7666 // zext(binary-ops(trunc(x), trunc(y))) 7667 // or 7668 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 7669 // such that we're unnecessarily moving things into CR bits that can more 7670 // efficiently stay in GPRs. Note that if we're not certain that the high 7671 // bits are set as required by the final extension, we still may need to do 7672 // some masking to get the proper behavior. 7673 7674 // This same functionality is important on PPC64 when dealing with 7675 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 7676 // the return values of functions. Because it is so similar, it is handled 7677 // here as well. 7678 7679 if (N->getValueType(0) != MVT::i32 && 7680 N->getValueType(0) != MVT::i64) 7681 return SDValue(); 7682 7683 if (!((N->getOperand(0).getValueType() == MVT::i1 && 7684 Subtarget.useCRBits()) || 7685 (N->getOperand(0).getValueType() == MVT::i32 && 7686 Subtarget.isPPC64()))) 7687 return SDValue(); 7688 7689 if (N->getOperand(0).getOpcode() != ISD::AND && 7690 N->getOperand(0).getOpcode() != ISD::OR && 7691 N->getOperand(0).getOpcode() != ISD::XOR && 7692 N->getOperand(0).getOpcode() != ISD::SELECT && 7693 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 7694 return SDValue(); 7695 7696 SmallVector<SDValue, 4> Inputs; 7697 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 7698 SmallPtrSet<SDNode *, 16> Visited; 7699 7700 // Visit all inputs, collect all binary operations (and, or, xor and 7701 // select) that are all fed by truncations. 7702 while (!BinOps.empty()) { 7703 SDValue BinOp = BinOps.back(); 7704 BinOps.pop_back(); 7705 7706 if (!Visited.insert(BinOp.getNode())) 7707 continue; 7708 7709 PromOps.push_back(BinOp); 7710 7711 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 7712 // The condition of the select is not promoted. 7713 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 7714 continue; 7715 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 7716 continue; 7717 7718 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 7719 isa<ConstantSDNode>(BinOp.getOperand(i))) { 7720 Inputs.push_back(BinOp.getOperand(i)); 7721 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 7722 BinOp.getOperand(i).getOpcode() == ISD::OR || 7723 BinOp.getOperand(i).getOpcode() == ISD::XOR || 7724 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 7725 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 7726 BinOps.push_back(BinOp.getOperand(i)); 7727 } else { 7728 // We have an input that is not a truncation or another binary 7729 // operation; we'll abort this transformation. 7730 return SDValue(); 7731 } 7732 } 7733 } 7734 7735 // Make sure that this is a self-contained cluster of operations (which 7736 // is not quite the same thing as saying that everything has only one 7737 // use). 7738 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 7739 if (isa<ConstantSDNode>(Inputs[i])) 7740 continue; 7741 7742 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 7743 UE = Inputs[i].getNode()->use_end(); 7744 UI != UE; ++UI) { 7745 SDNode *User = *UI; 7746 if (User != N && !Visited.count(User)) 7747 return SDValue(); 7748 7749 // Make sure that we're not going to promote the non-output-value 7750 // operand(s) or SELECT or SELECT_CC. 7751 // FIXME: Although we could sometimes handle this, and it does occur in 7752 // practice that one of the condition inputs to the select is also one of 7753 // the outputs, we currently can't deal with this. 7754 if (User->getOpcode() == ISD::SELECT) { 7755 if (User->getOperand(0) == Inputs[i]) 7756 return SDValue(); 7757 } else if (User->getOpcode() == ISD::SELECT_CC) { 7758 if (User->getOperand(0) == Inputs[i] || 7759 User->getOperand(1) == Inputs[i]) 7760 return SDValue(); 7761 } 7762 } 7763 } 7764 7765 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 7766 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 7767 UE = PromOps[i].getNode()->use_end(); 7768 UI != UE; ++UI) { 7769 SDNode *User = *UI; 7770 if (User != N && !Visited.count(User)) 7771 return SDValue(); 7772 7773 // Make sure that we're not going to promote the non-output-value 7774 // operand(s) or SELECT or SELECT_CC. 7775 // FIXME: Although we could sometimes handle this, and it does occur in 7776 // practice that one of the condition inputs to the select is also one of 7777 // the outputs, we currently can't deal with this. 7778 if (User->getOpcode() == ISD::SELECT) { 7779 if (User->getOperand(0) == PromOps[i]) 7780 return SDValue(); 7781 } else if (User->getOpcode() == ISD::SELECT_CC) { 7782 if (User->getOperand(0) == PromOps[i] || 7783 User->getOperand(1) == PromOps[i]) 7784 return SDValue(); 7785 } 7786 } 7787 } 7788 7789 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 7790 bool ReallyNeedsExt = false; 7791 if (N->getOpcode() != ISD::ANY_EXTEND) { 7792 // If all of the inputs are not already sign/zero extended, then 7793 // we'll still need to do that at the end. 7794 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 7795 if (isa<ConstantSDNode>(Inputs[i])) 7796 continue; 7797 7798 unsigned OpBits = 7799 Inputs[i].getOperand(0).getValueSizeInBits(); 7800 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 7801 7802 if ((N->getOpcode() == ISD::ZERO_EXTEND && 7803 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 7804 APInt::getHighBitsSet(OpBits, 7805 OpBits-PromBits))) || 7806 (N->getOpcode() == ISD::SIGN_EXTEND && 7807 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 7808 (OpBits-(PromBits-1)))) { 7809 ReallyNeedsExt = true; 7810 break; 7811 } 7812 } 7813 } 7814 7815 // Replace all inputs, either with the truncation operand, or a 7816 // truncation or extension to the final output type. 7817 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 7818 // Constant inputs need to be replaced with the to-be-promoted nodes that 7819 // use them because they might have users outside of the cluster of 7820 // promoted nodes. 7821 if (isa<ConstantSDNode>(Inputs[i])) 7822 continue; 7823 7824 SDValue InSrc = Inputs[i].getOperand(0); 7825 if (Inputs[i].getValueType() == N->getValueType(0)) 7826 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 7827 else if (N->getOpcode() == ISD::SIGN_EXTEND) 7828 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 7829 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 7830 else if (N->getOpcode() == ISD::ZERO_EXTEND) 7831 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 7832 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 7833 else 7834 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 7835 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 7836 } 7837 7838 // Replace all operations (these are all the same, but have a different 7839 // (promoted) return type). DAG.getNode will validate that the types of 7840 // a binary operator match, so go through the list in reverse so that 7841 // we've likely promoted both operands first. 7842 while (!PromOps.empty()) { 7843 SDValue PromOp = PromOps.back(); 7844 PromOps.pop_back(); 7845 7846 unsigned C; 7847 switch (PromOp.getOpcode()) { 7848 default: C = 0; break; 7849 case ISD::SELECT: C = 1; break; 7850 case ISD::SELECT_CC: C = 2; break; 7851 } 7852 7853 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 7854 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 7855 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 7856 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 7857 // The to-be-promoted operands of this node have not yet been 7858 // promoted (this should be rare because we're going through the 7859 // list backward, but if one of the operands has several users in 7860 // this cluster of to-be-promoted nodes, it is possible). 7861 PromOps.insert(PromOps.begin(), PromOp); 7862 continue; 7863 } 7864 7865 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 7866 PromOp.getNode()->op_end()); 7867 7868 // If this node has constant inputs, then they'll need to be promoted here. 7869 for (unsigned i = 0; i < 2; ++i) { 7870 if (!isa<ConstantSDNode>(Ops[C+i])) 7871 continue; 7872 if (Ops[C+i].getValueType() == N->getValueType(0)) 7873 continue; 7874 7875 if (N->getOpcode() == ISD::SIGN_EXTEND) 7876 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 7877 else if (N->getOpcode() == ISD::ZERO_EXTEND) 7878 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 7879 else 7880 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 7881 } 7882 7883 DAG.ReplaceAllUsesOfValueWith(PromOp, 7884 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 7885 } 7886 7887 // Now we're left with the initial extension itself. 7888 if (!ReallyNeedsExt) 7889 return N->getOperand(0); 7890 7891 // To zero extend, just mask off everything except for the first bit (in the 7892 // i1 case). 7893 if (N->getOpcode() == ISD::ZERO_EXTEND) 7894 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 7895 DAG.getConstant(APInt::getLowBitsSet( 7896 N->getValueSizeInBits(0), PromBits), 7897 N->getValueType(0))); 7898 7899 assert(N->getOpcode() == ISD::SIGN_EXTEND && 7900 "Invalid extension type"); 7901 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0)); 7902 SDValue ShiftCst = 7903 DAG.getConstant(N->getValueSizeInBits(0)-PromBits, ShiftAmountTy); 7904 return DAG.getNode(ISD::SRA, dl, N->getValueType(0), 7905 DAG.getNode(ISD::SHL, dl, N->getValueType(0), 7906 N->getOperand(0), ShiftCst), ShiftCst); 7907 } 7908 7909 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 7910 DAGCombinerInfo &DCI) const { 7911 const TargetMachine &TM = getTargetMachine(); 7912 SelectionDAG &DAG = DCI.DAG; 7913 SDLoc dl(N); 7914 switch (N->getOpcode()) { 7915 default: break; 7916 case PPCISD::SHL: 7917 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 7918 if (C->isNullValue()) // 0 << V -> 0. 7919 return N->getOperand(0); 7920 } 7921 break; 7922 case PPCISD::SRL: 7923 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 7924 if (C->isNullValue()) // 0 >>u V -> 0. 7925 return N->getOperand(0); 7926 } 7927 break; 7928 case PPCISD::SRA: 7929 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 7930 if (C->isNullValue() || // 0 >>s V -> 0. 7931 C->isAllOnesValue()) // -1 >>s V -> -1. 7932 return N->getOperand(0); 7933 } 7934 break; 7935 case ISD::SIGN_EXTEND: 7936 case ISD::ZERO_EXTEND: 7937 case ISD::ANY_EXTEND: 7938 return DAGCombineExtBoolTrunc(N, DCI); 7939 case ISD::TRUNCATE: 7940 case ISD::SETCC: 7941 case ISD::SELECT_CC: 7942 return DAGCombineTruncBoolExt(N, DCI); 7943 case ISD::FDIV: { 7944 assert(TM.Options.UnsafeFPMath && 7945 "Reciprocal estimates require UnsafeFPMath"); 7946 7947 if (N->getOperand(1).getOpcode() == ISD::FSQRT) { 7948 SDValue RV = 7949 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0), DCI); 7950 if (RV.getNode()) { 7951 DCI.AddToWorklist(RV.getNode()); 7952 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7953 N->getOperand(0), RV); 7954 } 7955 } else if (N->getOperand(1).getOpcode() == ISD::FP_EXTEND && 7956 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 7957 SDValue RV = 7958 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 7959 DCI); 7960 if (RV.getNode()) { 7961 DCI.AddToWorklist(RV.getNode()); 7962 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N->getOperand(1)), 7963 N->getValueType(0), RV); 7964 DCI.AddToWorklist(RV.getNode()); 7965 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7966 N->getOperand(0), RV); 7967 } 7968 } else if (N->getOperand(1).getOpcode() == ISD::FP_ROUND && 7969 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 7970 SDValue RV = 7971 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 7972 DCI); 7973 if (RV.getNode()) { 7974 DCI.AddToWorklist(RV.getNode()); 7975 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N->getOperand(1)), 7976 N->getValueType(0), RV, 7977 N->getOperand(1).getOperand(1)); 7978 DCI.AddToWorklist(RV.getNode()); 7979 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7980 N->getOperand(0), RV); 7981 } 7982 } 7983 7984 SDValue RV = DAGCombineFastRecip(N->getOperand(1), DCI); 7985 if (RV.getNode()) { 7986 DCI.AddToWorklist(RV.getNode()); 7987 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 7988 N->getOperand(0), RV); 7989 } 7990 7991 } 7992 break; 7993 case ISD::FSQRT: { 7994 assert(TM.Options.UnsafeFPMath && 7995 "Reciprocal estimates require UnsafeFPMath"); 7996 7997 // Compute this as 1/(1/sqrt(X)), which is the reciprocal of the 7998 // reciprocal sqrt. 7999 SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(0), DCI); 8000 if (RV.getNode()) { 8001 DCI.AddToWorklist(RV.getNode()); 8002 RV = DAGCombineFastRecip(RV, DCI); 8003 if (RV.getNode()) { 8004 // Unfortunately, RV is now NaN if the input was exactly 0. Select out 8005 // this case and force the answer to 0. 8006 8007 EVT VT = RV.getValueType(); 8008 8009 SDValue Zero = DAG.getConstantFP(0.0, VT.getScalarType()); 8010 if (VT.isVector()) { 8011 assert(VT.getVectorNumElements() == 4 && "Unknown vector type"); 8012 Zero = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Zero, Zero, Zero, Zero); 8013 } 8014 8015 SDValue ZeroCmp = 8016 DAG.getSetCC(dl, getSetCCResultType(*DAG.getContext(), VT), 8017 N->getOperand(0), Zero, ISD::SETEQ); 8018 DCI.AddToWorklist(ZeroCmp.getNode()); 8019 DCI.AddToWorklist(RV.getNode()); 8020 8021 RV = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, dl, VT, 8022 ZeroCmp, Zero, RV); 8023 return RV; 8024 } 8025 } 8026 8027 } 8028 break; 8029 case ISD::SINT_TO_FP: 8030 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 8031 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 8032 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 8033 // We allow the src/dst to be either f32/f64, but the intermediate 8034 // type must be i64. 8035 if (N->getOperand(0).getValueType() == MVT::i64 && 8036 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 8037 SDValue Val = N->getOperand(0).getOperand(0); 8038 if (Val.getValueType() == MVT::f32) { 8039 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 8040 DCI.AddToWorklist(Val.getNode()); 8041 } 8042 8043 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val); 8044 DCI.AddToWorklist(Val.getNode()); 8045 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val); 8046 DCI.AddToWorklist(Val.getNode()); 8047 if (N->getValueType(0) == MVT::f32) { 8048 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 8049 DAG.getIntPtrConstant(0)); 8050 DCI.AddToWorklist(Val.getNode()); 8051 } 8052 return Val; 8053 } else if (N->getOperand(0).getValueType() == MVT::i32) { 8054 // If the intermediate type is i32, we can avoid the load/store here 8055 // too. 8056 } 8057 } 8058 } 8059 break; 8060 case ISD::STORE: 8061 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 8062 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 8063 !cast<StoreSDNode>(N)->isTruncatingStore() && 8064 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 8065 N->getOperand(1).getValueType() == MVT::i32 && 8066 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 8067 SDValue Val = N->getOperand(1).getOperand(0); 8068 if (Val.getValueType() == MVT::f32) { 8069 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 8070 DCI.AddToWorklist(Val.getNode()); 8071 } 8072 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 8073 DCI.AddToWorklist(Val.getNode()); 8074 8075 SDValue Ops[] = { 8076 N->getOperand(0), Val, N->getOperand(2), 8077 DAG.getValueType(N->getOperand(1).getValueType()) 8078 }; 8079 8080 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 8081 DAG.getVTList(MVT::Other), Ops, 8082 cast<StoreSDNode>(N)->getMemoryVT(), 8083 cast<StoreSDNode>(N)->getMemOperand()); 8084 DCI.AddToWorklist(Val.getNode()); 8085 return Val; 8086 } 8087 8088 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 8089 if (cast<StoreSDNode>(N)->isUnindexed() && 8090 N->getOperand(1).getOpcode() == ISD::BSWAP && 8091 N->getOperand(1).getNode()->hasOneUse() && 8092 (N->getOperand(1).getValueType() == MVT::i32 || 8093 N->getOperand(1).getValueType() == MVT::i16 || 8094 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 8095 TM.getSubtarget<PPCSubtarget>().isPPC64() && 8096 N->getOperand(1).getValueType() == MVT::i64))) { 8097 SDValue BSwapOp = N->getOperand(1).getOperand(0); 8098 // Do an any-extend to 32-bits if this is a half-word input. 8099 if (BSwapOp.getValueType() == MVT::i16) 8100 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 8101 8102 SDValue Ops[] = { 8103 N->getOperand(0), BSwapOp, N->getOperand(2), 8104 DAG.getValueType(N->getOperand(1).getValueType()) 8105 }; 8106 return 8107 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 8108 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 8109 cast<StoreSDNode>(N)->getMemOperand()); 8110 } 8111 break; 8112 case ISD::LOAD: { 8113 LoadSDNode *LD = cast<LoadSDNode>(N); 8114 EVT VT = LD->getValueType(0); 8115 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 8116 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty); 8117 if (ISD::isNON_EXTLoad(N) && VT.isVector() && 8118 TM.getSubtarget<PPCSubtarget>().hasAltivec() && 8119 (VT == MVT::v16i8 || VT == MVT::v8i16 || 8120 VT == MVT::v4i32 || VT == MVT::v4f32) && 8121 LD->getAlignment() < ABIAlignment) { 8122 // This is a type-legal unaligned Altivec load. 8123 SDValue Chain = LD->getChain(); 8124 SDValue Ptr = LD->getBasePtr(); 8125 bool isLittleEndian = Subtarget.isLittleEndian(); 8126 8127 // This implements the loading of unaligned vectors as described in 8128 // the venerable Apple Velocity Engine overview. Specifically: 8129 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 8130 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 8131 // 8132 // The general idea is to expand a sequence of one or more unaligned 8133 // loads into an alignment-based permutation-control instruction (lvsl 8134 // or lvsr), a series of regular vector loads (which always truncate 8135 // their input address to an aligned address), and a series of 8136 // permutations. The results of these permutations are the requested 8137 // loaded values. The trick is that the last "extra" load is not taken 8138 // from the address you might suspect (sizeof(vector) bytes after the 8139 // last requested load), but rather sizeof(vector) - 1 bytes after the 8140 // last requested vector. The point of this is to avoid a page fault if 8141 // the base address happened to be aligned. This works because if the 8142 // base address is aligned, then adding less than a full vector length 8143 // will cause the last vector in the sequence to be (re)loaded. 8144 // Otherwise, the next vector will be fetched as you might suspect was 8145 // necessary. 8146 8147 // We might be able to reuse the permutation generation from 8148 // a different base address offset from this one by an aligned amount. 8149 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 8150 // optimization later. 8151 Intrinsic::ID Intr = (isLittleEndian ? 8152 Intrinsic::ppc_altivec_lvsr : 8153 Intrinsic::ppc_altivec_lvsl); 8154 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, MVT::v16i8); 8155 8156 // Refine the alignment of the original load (a "new" load created here 8157 // which was identical to the first except for the alignment would be 8158 // merged with the existing node regardless). 8159 MachineFunction &MF = DAG.getMachineFunction(); 8160 MachineMemOperand *MMO = 8161 MF.getMachineMemOperand(LD->getPointerInfo(), 8162 LD->getMemOperand()->getFlags(), 8163 LD->getMemoryVT().getStoreSize(), 8164 ABIAlignment); 8165 LD->refineAlignment(MMO); 8166 SDValue BaseLoad = SDValue(LD, 0); 8167 8168 // Note that the value of IncOffset (which is provided to the next 8169 // load's pointer info offset value, and thus used to calculate the 8170 // alignment), and the value of IncValue (which is actually used to 8171 // increment the pointer value) are different! This is because we 8172 // require the next load to appear to be aligned, even though it 8173 // is actually offset from the base pointer by a lesser amount. 8174 int IncOffset = VT.getSizeInBits() / 8; 8175 int IncValue = IncOffset; 8176 8177 // Walk (both up and down) the chain looking for another load at the real 8178 // (aligned) offset (the alignment of the other load does not matter in 8179 // this case). If found, then do not use the offset reduction trick, as 8180 // that will prevent the loads from being later combined (as they would 8181 // otherwise be duplicates). 8182 if (!findConsecutiveLoad(LD, DAG)) 8183 --IncValue; 8184 8185 SDValue Increment = DAG.getConstant(IncValue, getPointerTy()); 8186 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 8187 8188 SDValue ExtraLoad = 8189 DAG.getLoad(VT, dl, Chain, Ptr, 8190 LD->getPointerInfo().getWithOffset(IncOffset), 8191 LD->isVolatile(), LD->isNonTemporal(), 8192 LD->isInvariant(), ABIAlignment); 8193 8194 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 8195 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 8196 8197 if (BaseLoad.getValueType() != MVT::v4i32) 8198 BaseLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, BaseLoad); 8199 8200 if (ExtraLoad.getValueType() != MVT::v4i32) 8201 ExtraLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ExtraLoad); 8202 8203 // Because vperm has a big-endian bias, we must reverse the order 8204 // of the input vectors and complement the permute control vector 8205 // when generating little endian code. We have already handled the 8206 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 8207 // and ExtraLoad here. 8208 SDValue Perm; 8209 if (isLittleEndian) 8210 Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm, 8211 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 8212 else 8213 Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm, 8214 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 8215 8216 if (VT != MVT::v4i32) 8217 Perm = DAG.getNode(ISD::BITCAST, dl, VT, Perm); 8218 8219 // Now we need to be really careful about how we update the users of the 8220 // original load. We cannot just call DCI.CombineTo (or 8221 // DAG.ReplaceAllUsesWith for that matter), because the load still has 8222 // uses created here (the permutation for example) that need to stay. 8223 SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8224 while (UI != UE) { 8225 SDUse &Use = UI.getUse(); 8226 SDNode *User = *UI; 8227 // Note: BaseLoad is checked here because it might not be N, but a 8228 // bitcast of N. 8229 if (User == Perm.getNode() || User == BaseLoad.getNode() || 8230 User == TF.getNode() || Use.getResNo() > 1) { 8231 ++UI; 8232 continue; 8233 } 8234 8235 SDValue To = Use.getResNo() ? TF : Perm; 8236 ++UI; 8237 8238 SmallVector<SDValue, 8> Ops; 8239 for (SDNode::op_iterator O = User->op_begin(), 8240 OE = User->op_end(); O != OE; ++O) { 8241 if (*O == Use) 8242 Ops.push_back(To); 8243 else 8244 Ops.push_back(*O); 8245 } 8246 8247 DAG.UpdateNodeOperands(User, Ops); 8248 } 8249 8250 return SDValue(N, 0); 8251 } 8252 } 8253 break; 8254 case ISD::INTRINSIC_WO_CHAIN: { 8255 bool isLittleEndian = Subtarget.isLittleEndian(); 8256 Intrinsic::ID Intr = (isLittleEndian ? 8257 Intrinsic::ppc_altivec_lvsr : 8258 Intrinsic::ppc_altivec_lvsl); 8259 if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() == Intr && 8260 N->getOperand(1)->getOpcode() == ISD::ADD) { 8261 SDValue Add = N->getOperand(1); 8262 8263 if (DAG.MaskedValueIsZero(Add->getOperand(1), 8264 APInt::getAllOnesValue(4 /* 16 byte alignment */).zext( 8265 Add.getValueType().getScalarType().getSizeInBits()))) { 8266 SDNode *BasePtr = Add->getOperand(0).getNode(); 8267 for (SDNode::use_iterator UI = BasePtr->use_begin(), 8268 UE = BasePtr->use_end(); UI != UE; ++UI) { 8269 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 8270 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == 8271 Intr) { 8272 // We've found another LVSL/LVSR, and this address is an aligned 8273 // multiple of that one. The results will be the same, so use the 8274 // one we've just found instead. 8275 8276 return SDValue(*UI, 0); 8277 } 8278 } 8279 } 8280 } 8281 } 8282 8283 break; 8284 case ISD::BSWAP: 8285 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 8286 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 8287 N->getOperand(0).hasOneUse() && 8288 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 8289 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 8290 TM.getSubtarget<PPCSubtarget>().isPPC64() && 8291 N->getValueType(0) == MVT::i64))) { 8292 SDValue Load = N->getOperand(0); 8293 LoadSDNode *LD = cast<LoadSDNode>(Load); 8294 // Create the byte-swapping load. 8295 SDValue Ops[] = { 8296 LD->getChain(), // Chain 8297 LD->getBasePtr(), // Ptr 8298 DAG.getValueType(N->getValueType(0)) // VT 8299 }; 8300 SDValue BSLoad = 8301 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 8302 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 8303 MVT::i64 : MVT::i32, MVT::Other), 8304 Ops, LD->getMemoryVT(), LD->getMemOperand()); 8305 8306 // If this is an i16 load, insert the truncate. 8307 SDValue ResVal = BSLoad; 8308 if (N->getValueType(0) == MVT::i16) 8309 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 8310 8311 // First, combine the bswap away. This makes the value produced by the 8312 // load dead. 8313 DCI.CombineTo(N, ResVal); 8314 8315 // Next, combine the load away, we give it a bogus result value but a real 8316 // chain result. The result value is dead because the bswap is dead. 8317 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 8318 8319 // Return N so it doesn't get rechecked! 8320 return SDValue(N, 0); 8321 } 8322 8323 break; 8324 case PPCISD::VCMP: { 8325 // If a VCMPo node already exists with exactly the same operands as this 8326 // node, use its result instead of this node (VCMPo computes both a CR6 and 8327 // a normal output). 8328 // 8329 if (!N->getOperand(0).hasOneUse() && 8330 !N->getOperand(1).hasOneUse() && 8331 !N->getOperand(2).hasOneUse()) { 8332 8333 // Scan all of the users of the LHS, looking for VCMPo's that match. 8334 SDNode *VCMPoNode = nullptr; 8335 8336 SDNode *LHSN = N->getOperand(0).getNode(); 8337 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 8338 UI != E; ++UI) 8339 if (UI->getOpcode() == PPCISD::VCMPo && 8340 UI->getOperand(1) == N->getOperand(1) && 8341 UI->getOperand(2) == N->getOperand(2) && 8342 UI->getOperand(0) == N->getOperand(0)) { 8343 VCMPoNode = *UI; 8344 break; 8345 } 8346 8347 // If there is no VCMPo node, or if the flag value has a single use, don't 8348 // transform this. 8349 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 8350 break; 8351 8352 // Look at the (necessarily single) use of the flag value. If it has a 8353 // chain, this transformation is more complex. Note that multiple things 8354 // could use the value result, which we should ignore. 8355 SDNode *FlagUser = nullptr; 8356 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 8357 FlagUser == nullptr; ++UI) { 8358 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 8359 SDNode *User = *UI; 8360 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 8361 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 8362 FlagUser = User; 8363 break; 8364 } 8365 } 8366 } 8367 8368 // If the user is a MFOCRF instruction, we know this is safe. 8369 // Otherwise we give up for right now. 8370 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 8371 return SDValue(VCMPoNode, 0); 8372 } 8373 break; 8374 } 8375 case ISD::BRCOND: { 8376 SDValue Cond = N->getOperand(1); 8377 SDValue Target = N->getOperand(2); 8378 8379 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 8380 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 8381 Intrinsic::ppc_is_decremented_ctr_nonzero) { 8382 8383 // We now need to make the intrinsic dead (it cannot be instruction 8384 // selected). 8385 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 8386 assert(Cond.getNode()->hasOneUse() && 8387 "Counter decrement has more than one use"); 8388 8389 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 8390 N->getOperand(0), Target); 8391 } 8392 } 8393 break; 8394 case ISD::BR_CC: { 8395 // If this is a branch on an altivec predicate comparison, lower this so 8396 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 8397 // lowering is done pre-legalize, because the legalizer lowers the predicate 8398 // compare down to code that is difficult to reassemble. 8399 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 8400 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 8401 8402 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 8403 // value. If so, pass-through the AND to get to the intrinsic. 8404 if (LHS.getOpcode() == ISD::AND && 8405 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 8406 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 8407 Intrinsic::ppc_is_decremented_ctr_nonzero && 8408 isa<ConstantSDNode>(LHS.getOperand(1)) && 8409 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 8410 isZero()) 8411 LHS = LHS.getOperand(0); 8412 8413 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 8414 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 8415 Intrinsic::ppc_is_decremented_ctr_nonzero && 8416 isa<ConstantSDNode>(RHS)) { 8417 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 8418 "Counter decrement comparison is not EQ or NE"); 8419 8420 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 8421 bool isBDNZ = (CC == ISD::SETEQ && Val) || 8422 (CC == ISD::SETNE && !Val); 8423 8424 // We now need to make the intrinsic dead (it cannot be instruction 8425 // selected). 8426 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 8427 assert(LHS.getNode()->hasOneUse() && 8428 "Counter decrement has more than one use"); 8429 8430 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 8431 N->getOperand(0), N->getOperand(4)); 8432 } 8433 8434 int CompareOpc; 8435 bool isDot; 8436 8437 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 8438 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 8439 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 8440 assert(isDot && "Can't compare against a vector result!"); 8441 8442 // If this is a comparison against something other than 0/1, then we know 8443 // that the condition is never/always true. 8444 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 8445 if (Val != 0 && Val != 1) { 8446 if (CC == ISD::SETEQ) // Cond never true, remove branch. 8447 return N->getOperand(0); 8448 // Always !=, turn it into an unconditional branch. 8449 return DAG.getNode(ISD::BR, dl, MVT::Other, 8450 N->getOperand(0), N->getOperand(4)); 8451 } 8452 8453 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 8454 8455 // Create the PPCISD altivec 'dot' comparison node. 8456 SDValue Ops[] = { 8457 LHS.getOperand(2), // LHS of compare 8458 LHS.getOperand(3), // RHS of compare 8459 DAG.getConstant(CompareOpc, MVT::i32) 8460 }; 8461 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 8462 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 8463 8464 // Unpack the result based on how the target uses it. 8465 PPC::Predicate CompOpc; 8466 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 8467 default: // Can't happen, don't crash on invalid number though. 8468 case 0: // Branch on the value of the EQ bit of CR6. 8469 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 8470 break; 8471 case 1: // Branch on the inverted value of the EQ bit of CR6. 8472 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 8473 break; 8474 case 2: // Branch on the value of the LT bit of CR6. 8475 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 8476 break; 8477 case 3: // Branch on the inverted value of the LT bit of CR6. 8478 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 8479 break; 8480 } 8481 8482 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 8483 DAG.getConstant(CompOpc, MVT::i32), 8484 DAG.getRegister(PPC::CR6, MVT::i32), 8485 N->getOperand(4), CompNode.getValue(1)); 8486 } 8487 break; 8488 } 8489 } 8490 8491 return SDValue(); 8492 } 8493 8494 //===----------------------------------------------------------------------===// 8495 // Inline Assembly Support 8496 //===----------------------------------------------------------------------===// 8497 8498 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 8499 APInt &KnownZero, 8500 APInt &KnownOne, 8501 const SelectionDAG &DAG, 8502 unsigned Depth) const { 8503 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 8504 switch (Op.getOpcode()) { 8505 default: break; 8506 case PPCISD::LBRX: { 8507 // lhbrx is known to have the top bits cleared out. 8508 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 8509 KnownZero = 0xFFFF0000; 8510 break; 8511 } 8512 case ISD::INTRINSIC_WO_CHAIN: { 8513 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 8514 default: break; 8515 case Intrinsic::ppc_altivec_vcmpbfp_p: 8516 case Intrinsic::ppc_altivec_vcmpeqfp_p: 8517 case Intrinsic::ppc_altivec_vcmpequb_p: 8518 case Intrinsic::ppc_altivec_vcmpequh_p: 8519 case Intrinsic::ppc_altivec_vcmpequw_p: 8520 case Intrinsic::ppc_altivec_vcmpgefp_p: 8521 case Intrinsic::ppc_altivec_vcmpgtfp_p: 8522 case Intrinsic::ppc_altivec_vcmpgtsb_p: 8523 case Intrinsic::ppc_altivec_vcmpgtsh_p: 8524 case Intrinsic::ppc_altivec_vcmpgtsw_p: 8525 case Intrinsic::ppc_altivec_vcmpgtub_p: 8526 case Intrinsic::ppc_altivec_vcmpgtuh_p: 8527 case Intrinsic::ppc_altivec_vcmpgtuw_p: 8528 KnownZero = ~1U; // All bits but the low one are known to be zero. 8529 break; 8530 } 8531 } 8532 } 8533 } 8534 8535 8536 /// getConstraintType - Given a constraint, return the type of 8537 /// constraint it is for this target. 8538 PPCTargetLowering::ConstraintType 8539 PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 8540 if (Constraint.size() == 1) { 8541 switch (Constraint[0]) { 8542 default: break; 8543 case 'b': 8544 case 'r': 8545 case 'f': 8546 case 'v': 8547 case 'y': 8548 return C_RegisterClass; 8549 case 'Z': 8550 // FIXME: While Z does indicate a memory constraint, it specifically 8551 // indicates an r+r address (used in conjunction with the 'y' modifier 8552 // in the replacement string). Currently, we're forcing the base 8553 // register to be r0 in the asm printer (which is interpreted as zero) 8554 // and forming the complete address in the second register. This is 8555 // suboptimal. 8556 return C_Memory; 8557 } 8558 } else if (Constraint == "wc") { // individual CR bits. 8559 return C_RegisterClass; 8560 } else if (Constraint == "wa" || Constraint == "wd" || 8561 Constraint == "wf" || Constraint == "ws") { 8562 return C_RegisterClass; // VSX registers. 8563 } 8564 return TargetLowering::getConstraintType(Constraint); 8565 } 8566 8567 /// Examine constraint type and operand type and determine a weight value. 8568 /// This object must already have been set up with the operand type 8569 /// and the current alternative constraint selected. 8570 TargetLowering::ConstraintWeight 8571 PPCTargetLowering::getSingleConstraintMatchWeight( 8572 AsmOperandInfo &info, const char *constraint) const { 8573 ConstraintWeight weight = CW_Invalid; 8574 Value *CallOperandVal = info.CallOperandVal; 8575 // If we don't have a value, we can't do a match, 8576 // but allow it at the lowest weight. 8577 if (!CallOperandVal) 8578 return CW_Default; 8579 Type *type = CallOperandVal->getType(); 8580 8581 // Look at the constraint type. 8582 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 8583 return CW_Register; // an individual CR bit. 8584 else if ((StringRef(constraint) == "wa" || 8585 StringRef(constraint) == "wd" || 8586 StringRef(constraint) == "wf") && 8587 type->isVectorTy()) 8588 return CW_Register; 8589 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 8590 return CW_Register; 8591 8592 switch (*constraint) { 8593 default: 8594 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 8595 break; 8596 case 'b': 8597 if (type->isIntegerTy()) 8598 weight = CW_Register; 8599 break; 8600 case 'f': 8601 if (type->isFloatTy()) 8602 weight = CW_Register; 8603 break; 8604 case 'd': 8605 if (type->isDoubleTy()) 8606 weight = CW_Register; 8607 break; 8608 case 'v': 8609 if (type->isVectorTy()) 8610 weight = CW_Register; 8611 break; 8612 case 'y': 8613 weight = CW_Register; 8614 break; 8615 case 'Z': 8616 weight = CW_Memory; 8617 break; 8618 } 8619 return weight; 8620 } 8621 8622 std::pair<unsigned, const TargetRegisterClass*> 8623 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 8624 MVT VT) const { 8625 if (Constraint.size() == 1) { 8626 // GCC RS6000 Constraint Letters 8627 switch (Constraint[0]) { 8628 case 'b': // R1-R31 8629 if (VT == MVT::i64 && Subtarget.isPPC64()) 8630 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 8631 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 8632 case 'r': // R0-R31 8633 if (VT == MVT::i64 && Subtarget.isPPC64()) 8634 return std::make_pair(0U, &PPC::G8RCRegClass); 8635 return std::make_pair(0U, &PPC::GPRCRegClass); 8636 case 'f': 8637 if (VT == MVT::f32 || VT == MVT::i32) 8638 return std::make_pair(0U, &PPC::F4RCRegClass); 8639 if (VT == MVT::f64 || VT == MVT::i64) 8640 return std::make_pair(0U, &PPC::F8RCRegClass); 8641 break; 8642 case 'v': 8643 return std::make_pair(0U, &PPC::VRRCRegClass); 8644 case 'y': // crrc 8645 return std::make_pair(0U, &PPC::CRRCRegClass); 8646 } 8647 } else if (Constraint == "wc") { // an individual CR bit. 8648 return std::make_pair(0U, &PPC::CRBITRCRegClass); 8649 } else if (Constraint == "wa" || Constraint == "wd" || 8650 Constraint == "wf") { 8651 return std::make_pair(0U, &PPC::VSRCRegClass); 8652 } else if (Constraint == "ws") { 8653 return std::make_pair(0U, &PPC::VSFRCRegClass); 8654 } 8655 8656 std::pair<unsigned, const TargetRegisterClass*> R = 8657 TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 8658 8659 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 8660 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 8661 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 8662 // register. 8663 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 8664 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 8665 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 8666 PPC::GPRCRegClass.contains(R.first)) { 8667 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 8668 return std::make_pair(TRI->getMatchingSuperReg(R.first, 8669 PPC::sub_32, &PPC::G8RCRegClass), 8670 &PPC::G8RCRegClass); 8671 } 8672 8673 return R; 8674 } 8675 8676 8677 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 8678 /// vector. If it is invalid, don't add anything to Ops. 8679 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 8680 std::string &Constraint, 8681 std::vector<SDValue>&Ops, 8682 SelectionDAG &DAG) const { 8683 SDValue Result; 8684 8685 // Only support length 1 constraints. 8686 if (Constraint.length() > 1) return; 8687 8688 char Letter = Constraint[0]; 8689 switch (Letter) { 8690 default: break; 8691 case 'I': 8692 case 'J': 8693 case 'K': 8694 case 'L': 8695 case 'M': 8696 case 'N': 8697 case 'O': 8698 case 'P': { 8699 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 8700 if (!CST) return; // Must be an immediate to match. 8701 unsigned Value = CST->getZExtValue(); 8702 switch (Letter) { 8703 default: llvm_unreachable("Unknown constraint letter!"); 8704 case 'I': // "I" is a signed 16-bit constant. 8705 if ((short)Value == (int)Value) 8706 Result = DAG.getTargetConstant(Value, Op.getValueType()); 8707 break; 8708 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 8709 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 8710 if ((short)Value == 0) 8711 Result = DAG.getTargetConstant(Value, Op.getValueType()); 8712 break; 8713 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 8714 if ((Value >> 16) == 0) 8715 Result = DAG.getTargetConstant(Value, Op.getValueType()); 8716 break; 8717 case 'M': // "M" is a constant that is greater than 31. 8718 if (Value > 31) 8719 Result = DAG.getTargetConstant(Value, Op.getValueType()); 8720 break; 8721 case 'N': // "N" is a positive constant that is an exact power of two. 8722 if ((int)Value > 0 && isPowerOf2_32(Value)) 8723 Result = DAG.getTargetConstant(Value, Op.getValueType()); 8724 break; 8725 case 'O': // "O" is the constant zero. 8726 if (Value == 0) 8727 Result = DAG.getTargetConstant(Value, Op.getValueType()); 8728 break; 8729 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 8730 if ((short)-Value == (int)-Value) 8731 Result = DAG.getTargetConstant(Value, Op.getValueType()); 8732 break; 8733 } 8734 break; 8735 } 8736 } 8737 8738 if (Result.getNode()) { 8739 Ops.push_back(Result); 8740 return; 8741 } 8742 8743 // Handle standard constraint letters. 8744 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 8745 } 8746 8747 // isLegalAddressingMode - Return true if the addressing mode represented 8748 // by AM is legal for this target, for a load/store of the specified type. 8749 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 8750 Type *Ty) const { 8751 // FIXME: PPC does not allow r+i addressing modes for vectors! 8752 8753 // PPC allows a sign-extended 16-bit immediate field. 8754 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 8755 return false; 8756 8757 // No global is ever allowed as a base. 8758 if (AM.BaseGV) 8759 return false; 8760 8761 // PPC only support r+r, 8762 switch (AM.Scale) { 8763 case 0: // "r+i" or just "i", depending on HasBaseReg. 8764 break; 8765 case 1: 8766 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 8767 return false; 8768 // Otherwise we have r+r or r+i. 8769 break; 8770 case 2: 8771 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 8772 return false; 8773 // Allow 2*r as r+r. 8774 break; 8775 default: 8776 // No other scales are supported. 8777 return false; 8778 } 8779 8780 return true; 8781 } 8782 8783 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 8784 SelectionDAG &DAG) const { 8785 MachineFunction &MF = DAG.getMachineFunction(); 8786 MachineFrameInfo *MFI = MF.getFrameInfo(); 8787 MFI->setReturnAddressIsTaken(true); 8788 8789 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 8790 return SDValue(); 8791 8792 SDLoc dl(Op); 8793 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8794 8795 // Make sure the function does not optimize away the store of the RA to 8796 // the stack. 8797 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 8798 FuncInfo->setLRStoreRequired(); 8799 bool isPPC64 = Subtarget.isPPC64(); 8800 bool isDarwinABI = Subtarget.isDarwinABI(); 8801 8802 if (Depth > 0) { 8803 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 8804 SDValue Offset = 8805 8806 DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI), 8807 isPPC64? MVT::i64 : MVT::i32); 8808 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 8809 DAG.getNode(ISD::ADD, dl, getPointerTy(), 8810 FrameAddr, Offset), 8811 MachinePointerInfo(), false, false, false, 0); 8812 } 8813 8814 // Just load the return address off the stack. 8815 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 8816 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 8817 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 8818 } 8819 8820 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 8821 SelectionDAG &DAG) const { 8822 SDLoc dl(Op); 8823 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8824 8825 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 8826 bool isPPC64 = PtrVT == MVT::i64; 8827 8828 MachineFunction &MF = DAG.getMachineFunction(); 8829 MachineFrameInfo *MFI = MF.getFrameInfo(); 8830 MFI->setFrameAddressIsTaken(true); 8831 8832 // Naked functions never have a frame pointer, and so we use r1. For all 8833 // other functions, this decision must be delayed until during PEI. 8834 unsigned FrameReg; 8835 if (MF.getFunction()->getAttributes().hasAttribute( 8836 AttributeSet::FunctionIndex, Attribute::Naked)) 8837 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 8838 else 8839 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 8840 8841 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 8842 PtrVT); 8843 while (Depth--) 8844 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 8845 FrameAddr, MachinePointerInfo(), false, false, 8846 false, 0); 8847 return FrameAddr; 8848 } 8849 8850 // FIXME? Maybe this could be a TableGen attribute on some registers and 8851 // this table could be generated automatically from RegInfo. 8852 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, 8853 EVT VT) const { 8854 bool isPPC64 = Subtarget.isPPC64(); 8855 bool isDarwinABI = Subtarget.isDarwinABI(); 8856 8857 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 8858 (!isPPC64 && VT != MVT::i32)) 8859 report_fatal_error("Invalid register global variable type"); 8860 8861 bool is64Bit = isPPC64 && VT == MVT::i64; 8862 unsigned Reg = StringSwitch<unsigned>(RegName) 8863 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 8864 .Case("r2", isDarwinABI ? 0 : (is64Bit ? PPC::X2 : PPC::R2)) 8865 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 8866 (is64Bit ? PPC::X13 : PPC::R13)) 8867 .Default(0); 8868 8869 if (Reg) 8870 return Reg; 8871 report_fatal_error("Invalid register name global variable"); 8872 } 8873 8874 bool 8875 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 8876 // The PowerPC target isn't yet aware of offsets. 8877 return false; 8878 } 8879 8880 /// getOptimalMemOpType - Returns the target specific optimal type for load 8881 /// and store operations as a result of memset, memcpy, and memmove 8882 /// lowering. If DstAlign is zero that means it's safe to destination 8883 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 8884 /// means there isn't a need to check it against alignment requirement, 8885 /// probably because the source does not need to be loaded. If 'IsMemset' is 8886 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 8887 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 8888 /// source is constant so it does not need to be loaded. 8889 /// It returns EVT::Other if the type should be determined using generic 8890 /// target-independent logic. 8891 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 8892 unsigned DstAlign, unsigned SrcAlign, 8893 bool IsMemset, bool ZeroMemset, 8894 bool MemcpyStrSrc, 8895 MachineFunction &MF) const { 8896 if (Subtarget.isPPC64()) { 8897 return MVT::i64; 8898 } else { 8899 return MVT::i32; 8900 } 8901 } 8902 8903 /// \brief Returns true if it is beneficial to convert a load of a constant 8904 /// to just the constant itself. 8905 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 8906 Type *Ty) const { 8907 assert(Ty->isIntegerTy()); 8908 8909 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 8910 if (BitSize == 0 || BitSize > 64) 8911 return false; 8912 return true; 8913 } 8914 8915 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 8916 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 8917 return false; 8918 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 8919 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 8920 return NumBits1 == 64 && NumBits2 == 32; 8921 } 8922 8923 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 8924 if (!VT1.isInteger() || !VT2.isInteger()) 8925 return false; 8926 unsigned NumBits1 = VT1.getSizeInBits(); 8927 unsigned NumBits2 = VT2.getSizeInBits(); 8928 return NumBits1 == 64 && NumBits2 == 32; 8929 } 8930 8931 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 8932 return isInt<16>(Imm) || isUInt<16>(Imm); 8933 } 8934 8935 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 8936 return isInt<16>(Imm) || isUInt<16>(Imm); 8937 } 8938 8939 bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 8940 unsigned, 8941 bool *Fast) const { 8942 if (DisablePPCUnaligned) 8943 return false; 8944 8945 // PowerPC supports unaligned memory access for simple non-vector types. 8946 // Although accessing unaligned addresses is not as efficient as accessing 8947 // aligned addresses, it is generally more efficient than manual expansion, 8948 // and generally only traps for software emulation when crossing page 8949 // boundaries. 8950 8951 if (!VT.isSimple()) 8952 return false; 8953 8954 if (VT.getSimpleVT().isVector()) { 8955 if (Subtarget.hasVSX()) { 8956 if (VT != MVT::v2f64 && VT != MVT::v2i64) 8957 return false; 8958 } else { 8959 return false; 8960 } 8961 } 8962 8963 if (VT == MVT::ppcf128) 8964 return false; 8965 8966 if (Fast) 8967 *Fast = true; 8968 8969 return true; 8970 } 8971 8972 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 8973 VT = VT.getScalarType(); 8974 8975 if (!VT.isSimple()) 8976 return false; 8977 8978 switch (VT.getSimpleVT().SimpleTy) { 8979 case MVT::f32: 8980 case MVT::f64: 8981 return true; 8982 default: 8983 break; 8984 } 8985 8986 return false; 8987 } 8988 8989 bool 8990 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 8991 EVT VT , unsigned DefinedValues) const { 8992 if (VT == MVT::v2i64) 8993 return false; 8994 8995 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 8996 } 8997 8998 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 8999 if (DisableILPPref || Subtarget.enableMachineScheduler()) 9000 return TargetLowering::getSchedulingPreference(N); 9001 9002 return Sched::ILP; 9003 } 9004 9005 // Create a fast isel object. 9006 FastISel * 9007 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 9008 const TargetLibraryInfo *LibInfo) const { 9009 return PPC::createFastISel(FuncInfo, LibInfo); 9010 } 9011