1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCCallingConv.h" 17 #include "PPCCCState.h" 18 #include "PPCMachineFunctionInfo.h" 19 #include "PPCPerfectShuffle.h" 20 #include "PPCTargetMachine.h" 21 #include "PPCTargetObjectFile.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/StringSwitch.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/CodeGen/CallingConvLower.h" 27 #include "llvm/CodeGen/MachineFrameInfo.h" 28 #include "llvm/CodeGen/MachineFunction.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineLoopInfo.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/SelectionDAG.h" 33 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/Intrinsics.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/ErrorHandling.h" 41 #include "llvm/Support/Format.h" 42 #include "llvm/Support/MathExtras.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetOptions.h" 45 #include <list> 46 47 using namespace llvm; 48 49 #define DEBUG_TYPE "ppc-lowering" 50 51 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 52 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 53 54 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 55 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 56 57 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 58 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 59 60 static cl::opt<bool> DisableSCO("disable-ppc-sco", 61 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 62 63 STATISTIC(NumTailCalls, "Number of tail calls"); 64 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 65 66 // FIXME: Remove this once the bug has been fixed! 67 extern cl::opt<bool> ANDIGlueBug; 68 69 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 70 const PPCSubtarget &STI) 71 : TargetLowering(TM), Subtarget(STI) { 72 // Use _setjmp/_longjmp instead of setjmp/longjmp. 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(true); 75 76 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 77 // arguments are at least 4/8 bytes aligned. 78 bool isPPC64 = Subtarget.isPPC64(); 79 setMinStackArgumentAlignment(isPPC64 ? 8:4); 80 81 // Set up the register classes. 82 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 83 if (!Subtarget.useSoftFloat()) { 84 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 85 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 86 } 87 88 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 89 for (MVT VT : MVT::integer_valuetypes()) { 90 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 91 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 92 } 93 94 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 95 96 // PowerPC has pre-inc load and store's. 97 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 98 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 99 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 100 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 101 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 102 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 103 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 104 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 105 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 106 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 107 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 108 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 109 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 110 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 111 112 if (Subtarget.useCRBits()) { 113 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 114 115 if (isPPC64 || Subtarget.hasFPCVT()) { 116 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 117 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 118 isPPC64 ? MVT::i64 : MVT::i32); 119 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 120 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 121 isPPC64 ? MVT::i64 : MVT::i32); 122 } else { 123 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 124 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 125 } 126 127 // PowerPC does not support direct load / store of condition registers 128 setOperationAction(ISD::LOAD, MVT::i1, Custom); 129 setOperationAction(ISD::STORE, MVT::i1, Custom); 130 131 // FIXME: Remove this once the ANDI glue bug is fixed: 132 if (ANDIGlueBug) 133 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 134 135 for (MVT VT : MVT::integer_valuetypes()) { 136 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 137 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 138 setTruncStoreAction(VT, MVT::i1, Expand); 139 } 140 141 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 142 } 143 144 // This is used in the ppcf128->int sequence. Note it has different semantics 145 // from FP_ROUND: that rounds to nearest, this rounds to zero. 146 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 147 148 // We do not currently implement these libm ops for PowerPC. 149 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 150 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 151 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 152 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 153 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 154 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 155 156 // PowerPC has no SREM/UREM instructions 157 setOperationAction(ISD::SREM, MVT::i32, Expand); 158 setOperationAction(ISD::UREM, MVT::i32, Expand); 159 setOperationAction(ISD::SREM, MVT::i64, Expand); 160 setOperationAction(ISD::UREM, MVT::i64, Expand); 161 162 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 163 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 164 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 165 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 166 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 167 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 168 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 169 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 170 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 171 172 // We don't support sin/cos/sqrt/fmod/pow 173 setOperationAction(ISD::FSIN , MVT::f64, Expand); 174 setOperationAction(ISD::FCOS , MVT::f64, Expand); 175 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 176 setOperationAction(ISD::FREM , MVT::f64, Expand); 177 setOperationAction(ISD::FPOW , MVT::f64, Expand); 178 setOperationAction(ISD::FMA , MVT::f64, Legal); 179 setOperationAction(ISD::FSIN , MVT::f32, Expand); 180 setOperationAction(ISD::FCOS , MVT::f32, Expand); 181 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 182 setOperationAction(ISD::FREM , MVT::f32, Expand); 183 setOperationAction(ISD::FPOW , MVT::f32, Expand); 184 setOperationAction(ISD::FMA , MVT::f32, Legal); 185 186 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 187 188 // If we're enabling GP optimizations, use hardware square root 189 if (!Subtarget.hasFSQRT() && 190 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 191 Subtarget.hasFRE())) 192 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 193 194 if (!Subtarget.hasFSQRT() && 195 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 196 Subtarget.hasFRES())) 197 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 198 199 if (Subtarget.hasFCPSGN()) { 200 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 201 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 202 } else { 203 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 204 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 205 } 206 207 if (Subtarget.hasFPRND()) { 208 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 209 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 210 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 211 setOperationAction(ISD::FROUND, MVT::f64, Legal); 212 213 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 214 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 215 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 216 setOperationAction(ISD::FROUND, MVT::f32, Legal); 217 } 218 219 // PowerPC does not have BSWAP, CTPOP or CTTZ 220 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 221 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 222 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 223 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 224 225 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 226 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 227 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 228 } else { 229 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 230 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 231 } 232 233 // PowerPC does not have ROTR 234 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 235 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 236 237 if (!Subtarget.useCRBits()) { 238 // PowerPC does not have Select 239 setOperationAction(ISD::SELECT, MVT::i32, Expand); 240 setOperationAction(ISD::SELECT, MVT::i64, Expand); 241 setOperationAction(ISD::SELECT, MVT::f32, Expand); 242 setOperationAction(ISD::SELECT, MVT::f64, Expand); 243 } 244 245 // PowerPC wants to turn select_cc of FP into fsel when possible. 246 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 247 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 248 249 // PowerPC wants to optimize integer setcc a bit 250 if (!Subtarget.useCRBits()) 251 setOperationAction(ISD::SETCC, MVT::i32, Custom); 252 253 // PowerPC does not have BRCOND which requires SetCC 254 if (!Subtarget.useCRBits()) 255 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 256 257 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 258 259 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 260 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 261 262 // PowerPC does not have [U|S]INT_TO_FP 263 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 264 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 265 266 if (Subtarget.hasDirectMove() && isPPC64) { 267 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 268 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 269 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 270 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 271 } else { 272 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 273 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 274 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 275 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 276 } 277 278 // We cannot sextinreg(i1). Expand to shifts. 279 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 280 281 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 282 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 283 // support continuation, user-level threading, and etc.. As a result, no 284 // other SjLj exception interfaces are implemented and please don't build 285 // your own exception handling based on them. 286 // LLVM/Clang supports zero-cost DWARF exception handling. 287 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 288 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 289 290 // We want to legalize GlobalAddress and ConstantPool nodes into the 291 // appropriate instructions to materialize the address. 292 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 293 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 294 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 295 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 296 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 297 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 298 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 299 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 300 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 301 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 302 303 // TRAP is legal. 304 setOperationAction(ISD::TRAP, MVT::Other, Legal); 305 306 // TRAMPOLINE is custom lowered. 307 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 308 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 309 310 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 311 setOperationAction(ISD::VASTART , MVT::Other, Custom); 312 313 if (Subtarget.isSVR4ABI()) { 314 if (isPPC64) { 315 // VAARG always uses double-word chunks, so promote anything smaller. 316 setOperationAction(ISD::VAARG, MVT::i1, Promote); 317 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 318 setOperationAction(ISD::VAARG, MVT::i8, Promote); 319 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 320 setOperationAction(ISD::VAARG, MVT::i16, Promote); 321 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 322 setOperationAction(ISD::VAARG, MVT::i32, Promote); 323 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 324 setOperationAction(ISD::VAARG, MVT::Other, Expand); 325 } else { 326 // VAARG is custom lowered with the 32-bit SVR4 ABI. 327 setOperationAction(ISD::VAARG, MVT::Other, Custom); 328 setOperationAction(ISD::VAARG, MVT::i64, Custom); 329 } 330 } else 331 setOperationAction(ISD::VAARG, MVT::Other, Expand); 332 333 if (Subtarget.isSVR4ABI() && !isPPC64) 334 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 335 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 336 else 337 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 338 339 // Use the default implementation. 340 setOperationAction(ISD::VAEND , MVT::Other, Expand); 341 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 342 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 343 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 344 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 345 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 346 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 347 348 // We want to custom lower some of our intrinsics. 349 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 350 351 // To handle counter-based loop conditions. 352 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 353 354 // Comparisons that require checking two conditions. 355 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 356 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 357 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 358 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 359 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 360 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 361 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 362 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 363 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 364 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 365 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 366 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 367 368 if (Subtarget.has64BitSupport()) { 369 // They also have instructions for converting between i64 and fp. 370 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 371 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 372 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 373 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 374 // This is just the low 32 bits of a (signed) fp->i64 conversion. 375 // We cannot do this with Promote because i64 is not a legal type. 376 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 377 378 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 379 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 380 } else { 381 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 382 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 383 } 384 385 // With the instructions enabled under FPCVT, we can do everything. 386 if (Subtarget.hasFPCVT()) { 387 if (Subtarget.has64BitSupport()) { 388 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 389 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 390 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 391 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 392 } 393 394 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 395 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 396 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 397 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 398 } 399 400 if (Subtarget.use64BitRegs()) { 401 // 64-bit PowerPC implementations can support i64 types directly 402 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 403 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 404 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 405 // 64-bit PowerPC wants to expand i128 shifts itself. 406 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 407 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 408 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 409 } else { 410 // 32-bit PowerPC wants to expand i64 shifts itself. 411 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 412 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 413 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 414 } 415 416 if (Subtarget.hasAltivec()) { 417 // First set operation action for all vector types to expand. Then we 418 // will selectively turn on ones that can be effectively codegen'd. 419 for (MVT VT : MVT::vector_valuetypes()) { 420 // add/sub are legal for all supported vector VT's. 421 setOperationAction(ISD::ADD, VT, Legal); 422 setOperationAction(ISD::SUB, VT, Legal); 423 424 // Vector instructions introduced in P8 425 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 426 setOperationAction(ISD::CTPOP, VT, Legal); 427 setOperationAction(ISD::CTLZ, VT, Legal); 428 } 429 else { 430 setOperationAction(ISD::CTPOP, VT, Expand); 431 setOperationAction(ISD::CTLZ, VT, Expand); 432 } 433 434 // We promote all shuffles to v16i8. 435 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 436 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 437 438 // We promote all non-typed operations to v4i32. 439 setOperationAction(ISD::AND , VT, Promote); 440 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 441 setOperationAction(ISD::OR , VT, Promote); 442 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 443 setOperationAction(ISD::XOR , VT, Promote); 444 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 445 setOperationAction(ISD::LOAD , VT, Promote); 446 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 447 setOperationAction(ISD::SELECT, VT, Promote); 448 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 449 setOperationAction(ISD::SELECT_CC, VT, Promote); 450 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 451 setOperationAction(ISD::STORE, VT, Promote); 452 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 453 454 // No other operations are legal. 455 setOperationAction(ISD::MUL , VT, Expand); 456 setOperationAction(ISD::SDIV, VT, Expand); 457 setOperationAction(ISD::SREM, VT, Expand); 458 setOperationAction(ISD::UDIV, VT, Expand); 459 setOperationAction(ISD::UREM, VT, Expand); 460 setOperationAction(ISD::FDIV, VT, Expand); 461 setOperationAction(ISD::FREM, VT, Expand); 462 setOperationAction(ISD::FNEG, VT, Expand); 463 setOperationAction(ISD::FSQRT, VT, Expand); 464 setOperationAction(ISD::FLOG, VT, Expand); 465 setOperationAction(ISD::FLOG10, VT, Expand); 466 setOperationAction(ISD::FLOG2, VT, Expand); 467 setOperationAction(ISD::FEXP, VT, Expand); 468 setOperationAction(ISD::FEXP2, VT, Expand); 469 setOperationAction(ISD::FSIN, VT, Expand); 470 setOperationAction(ISD::FCOS, VT, Expand); 471 setOperationAction(ISD::FABS, VT, Expand); 472 setOperationAction(ISD::FPOWI, VT, Expand); 473 setOperationAction(ISD::FFLOOR, VT, Expand); 474 setOperationAction(ISD::FCEIL, VT, Expand); 475 setOperationAction(ISD::FTRUNC, VT, Expand); 476 setOperationAction(ISD::FRINT, VT, Expand); 477 setOperationAction(ISD::FNEARBYINT, VT, Expand); 478 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 479 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 480 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 481 setOperationAction(ISD::MULHU, VT, Expand); 482 setOperationAction(ISD::MULHS, VT, Expand); 483 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 484 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 485 setOperationAction(ISD::UDIVREM, VT, Expand); 486 setOperationAction(ISD::SDIVREM, VT, Expand); 487 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 488 setOperationAction(ISD::FPOW, VT, Expand); 489 setOperationAction(ISD::BSWAP, VT, Expand); 490 setOperationAction(ISD::CTTZ, VT, Expand); 491 setOperationAction(ISD::VSELECT, VT, Expand); 492 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 493 setOperationAction(ISD::ROTL, VT, Expand); 494 setOperationAction(ISD::ROTR, VT, Expand); 495 496 for (MVT InnerVT : MVT::vector_valuetypes()) { 497 setTruncStoreAction(VT, InnerVT, Expand); 498 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 499 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 500 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 501 } 502 } 503 504 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 505 // with merges, splats, etc. 506 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 507 508 setOperationAction(ISD::AND , MVT::v4i32, Legal); 509 setOperationAction(ISD::OR , MVT::v4i32, Legal); 510 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 511 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 512 setOperationAction(ISD::SELECT, MVT::v4i32, 513 Subtarget.useCRBits() ? Legal : Expand); 514 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 515 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 516 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 517 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 518 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 519 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 520 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 521 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 522 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 523 524 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 525 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 526 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 527 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 528 529 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 530 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 531 532 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 533 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 534 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 535 } 536 537 if (Subtarget.hasP8Altivec()) 538 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 539 else 540 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 541 542 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 543 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 544 545 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 546 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 547 548 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 549 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 550 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 551 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 552 553 // Altivec does not contain unordered floating-point compare instructions 554 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 555 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 556 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 557 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 558 559 if (Subtarget.hasVSX()) { 560 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 561 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 562 if (Subtarget.hasP8Vector()) { 563 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 564 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 565 } 566 if (Subtarget.hasDirectMove() && isPPC64) { 567 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 568 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 569 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 570 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 571 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 572 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 573 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 574 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 575 } 576 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 577 578 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 579 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 580 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 581 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 582 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 583 584 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 585 586 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 587 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 588 589 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 590 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 591 592 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 593 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 594 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 595 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 596 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 597 598 // Share the Altivec comparison restrictions. 599 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 600 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 601 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 602 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 603 604 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 605 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 606 607 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 608 609 if (Subtarget.hasP8Vector()) 610 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 611 612 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 613 614 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 615 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 616 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 617 618 if (Subtarget.hasP8Altivec()) { 619 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 620 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 621 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 622 623 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 624 } 625 else { 626 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 627 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 628 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 629 630 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 631 632 // VSX v2i64 only supports non-arithmetic operations. 633 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 634 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 635 } 636 637 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 638 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 639 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 640 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 641 642 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 643 644 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 645 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 646 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 647 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 648 649 // Vector operation legalization checks the result type of 650 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 651 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 652 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 653 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 654 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 655 656 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 657 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 658 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 659 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 660 661 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 662 } 663 664 if (Subtarget.hasP8Altivec()) { 665 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 666 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 667 } 668 } 669 670 if (Subtarget.hasQPX()) { 671 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 672 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 673 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 674 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 675 676 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 677 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 678 679 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 680 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 681 682 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 683 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 684 685 if (!Subtarget.useCRBits()) 686 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 687 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 688 689 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 690 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 691 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 692 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 693 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 694 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 695 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 696 697 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 698 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 699 700 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 701 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 702 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 703 704 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 705 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 706 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 707 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 708 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 709 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 710 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 711 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 712 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 713 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 714 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 715 716 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 717 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 718 719 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 720 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 721 722 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 723 724 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 725 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 726 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 727 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 728 729 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 730 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 731 732 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 733 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 734 735 if (!Subtarget.useCRBits()) 736 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 737 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 738 739 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 740 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 741 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 742 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 743 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 744 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 745 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 746 747 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 748 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 749 750 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 751 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 752 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 753 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 754 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 755 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 756 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 757 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 758 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 759 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 760 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 761 762 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 763 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 764 765 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 766 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 767 768 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 769 770 setOperationAction(ISD::AND , MVT::v4i1, Legal); 771 setOperationAction(ISD::OR , MVT::v4i1, Legal); 772 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 773 774 if (!Subtarget.useCRBits()) 775 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 776 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 777 778 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 779 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 780 781 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 782 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 783 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 784 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 785 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 786 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 787 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 788 789 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 790 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 791 792 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 793 794 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 795 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 796 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 797 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 798 799 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 800 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 801 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 802 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 803 804 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 805 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 806 807 // These need to set FE_INEXACT, and so cannot be vectorized here. 808 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 809 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 810 811 if (TM.Options.UnsafeFPMath) { 812 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 813 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 814 815 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 816 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 817 } else { 818 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 819 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 820 821 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 822 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 823 } 824 } 825 826 if (Subtarget.has64BitSupport()) 827 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 828 829 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 830 831 if (!isPPC64) { 832 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 833 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 834 } 835 836 setBooleanContents(ZeroOrOneBooleanContent); 837 838 if (Subtarget.hasAltivec()) { 839 // Altivec instructions set fields to all zeros or all ones. 840 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 841 } 842 843 if (!isPPC64) { 844 // These libcalls are not available in 32-bit. 845 setLibcallName(RTLIB::SHL_I128, nullptr); 846 setLibcallName(RTLIB::SRL_I128, nullptr); 847 setLibcallName(RTLIB::SRA_I128, nullptr); 848 } 849 850 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 851 852 // We have target-specific dag combine patterns for the following nodes: 853 setTargetDAGCombine(ISD::SINT_TO_FP); 854 if (Subtarget.hasFPCVT()) 855 setTargetDAGCombine(ISD::UINT_TO_FP); 856 setTargetDAGCombine(ISD::LOAD); 857 setTargetDAGCombine(ISD::STORE); 858 setTargetDAGCombine(ISD::BR_CC); 859 if (Subtarget.useCRBits()) 860 setTargetDAGCombine(ISD::BRCOND); 861 setTargetDAGCombine(ISD::BSWAP); 862 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 863 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 864 setTargetDAGCombine(ISD::INTRINSIC_VOID); 865 866 setTargetDAGCombine(ISD::SIGN_EXTEND); 867 setTargetDAGCombine(ISD::ZERO_EXTEND); 868 setTargetDAGCombine(ISD::ANY_EXTEND); 869 870 if (Subtarget.useCRBits()) { 871 setTargetDAGCombine(ISD::TRUNCATE); 872 setTargetDAGCombine(ISD::SETCC); 873 setTargetDAGCombine(ISD::SELECT_CC); 874 } 875 876 // Use reciprocal estimates. 877 if (TM.Options.UnsafeFPMath) { 878 setTargetDAGCombine(ISD::FDIV); 879 setTargetDAGCombine(ISD::FSQRT); 880 } 881 882 // Darwin long double math library functions have $LDBL128 appended. 883 if (Subtarget.isDarwin()) { 884 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 885 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 886 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 887 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 888 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 889 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 890 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 891 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 892 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 893 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 894 } 895 896 // With 32 condition bits, we don't need to sink (and duplicate) compares 897 // aggressively in CodeGenPrep. 898 if (Subtarget.useCRBits()) { 899 setHasMultipleConditionRegisters(); 900 setJumpIsExpensive(); 901 } 902 903 setMinFunctionAlignment(2); 904 if (Subtarget.isDarwin()) 905 setPrefFunctionAlignment(4); 906 907 switch (Subtarget.getDarwinDirective()) { 908 default: break; 909 case PPC::DIR_970: 910 case PPC::DIR_A2: 911 case PPC::DIR_E500mc: 912 case PPC::DIR_E5500: 913 case PPC::DIR_PWR4: 914 case PPC::DIR_PWR5: 915 case PPC::DIR_PWR5X: 916 case PPC::DIR_PWR6: 917 case PPC::DIR_PWR6X: 918 case PPC::DIR_PWR7: 919 case PPC::DIR_PWR8: 920 case PPC::DIR_PWR9: 921 setPrefFunctionAlignment(4); 922 setPrefLoopAlignment(4); 923 break; 924 } 925 926 927 if (Subtarget.enableMachineScheduler()) 928 setSchedulingPreference(Sched::Source); 929 else 930 setSchedulingPreference(Sched::Hybrid); 931 932 computeRegisterProperties(STI.getRegisterInfo()); 933 934 // The Freescale cores do better with aggressive inlining of memcpy and 935 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 936 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 937 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 938 MaxStoresPerMemset = 32; 939 MaxStoresPerMemsetOptSize = 16; 940 MaxStoresPerMemcpy = 32; 941 MaxStoresPerMemcpyOptSize = 8; 942 MaxStoresPerMemmove = 32; 943 MaxStoresPerMemmoveOptSize = 8; 944 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 945 // The A2 also benefits from (very) aggressive inlining of memcpy and 946 // friends. The overhead of a the function call, even when warm, can be 947 // over one hundred cycles. 948 MaxStoresPerMemset = 128; 949 MaxStoresPerMemcpy = 128; 950 MaxStoresPerMemmove = 128; 951 } 952 } 953 954 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 955 /// the desired ByVal argument alignment. 956 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 957 unsigned MaxMaxAlign) { 958 if (MaxAlign == MaxMaxAlign) 959 return; 960 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 961 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 962 MaxAlign = 32; 963 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 964 MaxAlign = 16; 965 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 966 unsigned EltAlign = 0; 967 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 968 if (EltAlign > MaxAlign) 969 MaxAlign = EltAlign; 970 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 971 for (auto *EltTy : STy->elements()) { 972 unsigned EltAlign = 0; 973 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 974 if (EltAlign > MaxAlign) 975 MaxAlign = EltAlign; 976 if (MaxAlign == MaxMaxAlign) 977 break; 978 } 979 } 980 } 981 982 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 983 /// function arguments in the caller parameter area. 984 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 985 const DataLayout &DL) const { 986 // Darwin passes everything on 4 byte boundary. 987 if (Subtarget.isDarwin()) 988 return 4; 989 990 // 16byte and wider vectors are passed on 16byte boundary. 991 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 992 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 993 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 994 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 995 return Align; 996 } 997 998 bool PPCTargetLowering::useSoftFloat() const { 999 return Subtarget.useSoftFloat(); 1000 } 1001 1002 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1003 switch ((PPCISD::NodeType)Opcode) { 1004 case PPCISD::FIRST_NUMBER: break; 1005 case PPCISD::FSEL: return "PPCISD::FSEL"; 1006 case PPCISD::FCFID: return "PPCISD::FCFID"; 1007 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1008 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1009 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1010 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1011 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1012 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1013 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1014 case PPCISD::FRE: return "PPCISD::FRE"; 1015 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1016 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1017 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1018 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1019 case PPCISD::VPERM: return "PPCISD::VPERM"; 1020 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1021 case PPCISD::CMPB: return "PPCISD::CMPB"; 1022 case PPCISD::Hi: return "PPCISD::Hi"; 1023 case PPCISD::Lo: return "PPCISD::Lo"; 1024 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1025 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1026 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1027 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1028 case PPCISD::SRL: return "PPCISD::SRL"; 1029 case PPCISD::SRA: return "PPCISD::SRA"; 1030 case PPCISD::SHL: return "PPCISD::SHL"; 1031 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1032 case PPCISD::CALL: return "PPCISD::CALL"; 1033 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1034 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1035 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1036 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1037 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1038 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1039 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1040 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1041 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1042 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1043 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1044 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1045 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1046 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1047 case PPCISD::VCMP: return "PPCISD::VCMP"; 1048 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1049 case PPCISD::LBRX: return "PPCISD::LBRX"; 1050 case PPCISD::STBRX: return "PPCISD::STBRX"; 1051 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1052 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1053 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1054 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1055 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1056 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1057 case PPCISD::BDZ: return "PPCISD::BDZ"; 1058 case PPCISD::MFFS: return "PPCISD::MFFS"; 1059 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1060 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1061 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1062 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1063 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1064 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1065 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1066 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1067 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1068 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1069 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1070 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1071 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1072 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1073 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1074 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1075 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1076 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1077 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1078 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1079 case PPCISD::SC: return "PPCISD::SC"; 1080 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1081 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1082 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1083 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1084 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1085 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1086 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1087 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1088 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1089 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1090 } 1091 return nullptr; 1092 } 1093 1094 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1095 EVT VT) const { 1096 if (!VT.isVector()) 1097 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1098 1099 if (Subtarget.hasQPX()) 1100 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1101 1102 return VT.changeVectorElementTypeToInteger(); 1103 } 1104 1105 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1106 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1107 return true; 1108 } 1109 1110 //===----------------------------------------------------------------------===// 1111 // Node matching predicates, for use by the tblgen matching code. 1112 //===----------------------------------------------------------------------===// 1113 1114 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1115 static bool isFloatingPointZero(SDValue Op) { 1116 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1117 return CFP->getValueAPF().isZero(); 1118 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1119 // Maybe this has already been legalized into the constant pool? 1120 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1121 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1122 return CFP->getValueAPF().isZero(); 1123 } 1124 return false; 1125 } 1126 1127 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1128 /// true if Op is undef or if it matches the specified value. 1129 static bool isConstantOrUndef(int Op, int Val) { 1130 return Op < 0 || Op == Val; 1131 } 1132 1133 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1134 /// VPKUHUM instruction. 1135 /// The ShuffleKind distinguishes between big-endian operations with 1136 /// two different inputs (0), either-endian operations with two identical 1137 /// inputs (1), and little-endian operations with two different inputs (2). 1138 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1139 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1140 SelectionDAG &DAG) { 1141 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1142 if (ShuffleKind == 0) { 1143 if (IsLE) 1144 return false; 1145 for (unsigned i = 0; i != 16; ++i) 1146 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1147 return false; 1148 } else if (ShuffleKind == 2) { 1149 if (!IsLE) 1150 return false; 1151 for (unsigned i = 0; i != 16; ++i) 1152 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1153 return false; 1154 } else if (ShuffleKind == 1) { 1155 unsigned j = IsLE ? 0 : 1; 1156 for (unsigned i = 0; i != 8; ++i) 1157 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1158 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1159 return false; 1160 } 1161 return true; 1162 } 1163 1164 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1165 /// VPKUWUM instruction. 1166 /// The ShuffleKind distinguishes between big-endian operations with 1167 /// two different inputs (0), either-endian operations with two identical 1168 /// inputs (1), and little-endian operations with two different inputs (2). 1169 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1170 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1171 SelectionDAG &DAG) { 1172 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1173 if (ShuffleKind == 0) { 1174 if (IsLE) 1175 return false; 1176 for (unsigned i = 0; i != 16; i += 2) 1177 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1178 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1179 return false; 1180 } else if (ShuffleKind == 2) { 1181 if (!IsLE) 1182 return false; 1183 for (unsigned i = 0; i != 16; i += 2) 1184 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1185 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1186 return false; 1187 } else if (ShuffleKind == 1) { 1188 unsigned j = IsLE ? 0 : 2; 1189 for (unsigned i = 0; i != 8; i += 2) 1190 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1191 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1192 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1193 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1194 return false; 1195 } 1196 return true; 1197 } 1198 1199 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1200 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1201 /// current subtarget. 1202 /// 1203 /// The ShuffleKind distinguishes between big-endian operations with 1204 /// two different inputs (0), either-endian operations with two identical 1205 /// inputs (1), and little-endian operations with two different inputs (2). 1206 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1207 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1208 SelectionDAG &DAG) { 1209 const PPCSubtarget& Subtarget = 1210 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1211 if (!Subtarget.hasP8Vector()) 1212 return false; 1213 1214 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1215 if (ShuffleKind == 0) { 1216 if (IsLE) 1217 return false; 1218 for (unsigned i = 0; i != 16; i += 4) 1219 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1220 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1221 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1222 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1223 return false; 1224 } else if (ShuffleKind == 2) { 1225 if (!IsLE) 1226 return false; 1227 for (unsigned i = 0; i != 16; i += 4) 1228 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1229 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1230 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1231 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1232 return false; 1233 } else if (ShuffleKind == 1) { 1234 unsigned j = IsLE ? 0 : 4; 1235 for (unsigned i = 0; i != 8; i += 4) 1236 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1237 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1238 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1239 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1240 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1241 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1242 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1243 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1244 return false; 1245 } 1246 return true; 1247 } 1248 1249 /// isVMerge - Common function, used to match vmrg* shuffles. 1250 /// 1251 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1252 unsigned LHSStart, unsigned RHSStart) { 1253 if (N->getValueType(0) != MVT::v16i8) 1254 return false; 1255 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1256 "Unsupported merge size!"); 1257 1258 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1259 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1260 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1261 LHSStart+j+i*UnitSize) || 1262 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1263 RHSStart+j+i*UnitSize)) 1264 return false; 1265 } 1266 return true; 1267 } 1268 1269 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1270 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1271 /// The ShuffleKind distinguishes between big-endian merges with two 1272 /// different inputs (0), either-endian merges with two identical inputs (1), 1273 /// and little-endian merges with two different inputs (2). For the latter, 1274 /// the input operands are swapped (see PPCInstrAltivec.td). 1275 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1276 unsigned ShuffleKind, SelectionDAG &DAG) { 1277 if (DAG.getDataLayout().isLittleEndian()) { 1278 if (ShuffleKind == 1) // unary 1279 return isVMerge(N, UnitSize, 0, 0); 1280 else if (ShuffleKind == 2) // swapped 1281 return isVMerge(N, UnitSize, 0, 16); 1282 else 1283 return false; 1284 } else { 1285 if (ShuffleKind == 1) // unary 1286 return isVMerge(N, UnitSize, 8, 8); 1287 else if (ShuffleKind == 0) // normal 1288 return isVMerge(N, UnitSize, 8, 24); 1289 else 1290 return false; 1291 } 1292 } 1293 1294 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1295 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1296 /// The ShuffleKind distinguishes between big-endian merges with two 1297 /// different inputs (0), either-endian merges with two identical inputs (1), 1298 /// and little-endian merges with two different inputs (2). For the latter, 1299 /// the input operands are swapped (see PPCInstrAltivec.td). 1300 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1301 unsigned ShuffleKind, SelectionDAG &DAG) { 1302 if (DAG.getDataLayout().isLittleEndian()) { 1303 if (ShuffleKind == 1) // unary 1304 return isVMerge(N, UnitSize, 8, 8); 1305 else if (ShuffleKind == 2) // swapped 1306 return isVMerge(N, UnitSize, 8, 24); 1307 else 1308 return false; 1309 } else { 1310 if (ShuffleKind == 1) // unary 1311 return isVMerge(N, UnitSize, 0, 0); 1312 else if (ShuffleKind == 0) // normal 1313 return isVMerge(N, UnitSize, 0, 16); 1314 else 1315 return false; 1316 } 1317 } 1318 1319 /** 1320 * \brief Common function used to match vmrgew and vmrgow shuffles 1321 * 1322 * The indexOffset determines whether to look for even or odd words in 1323 * the shuffle mask. This is based on the of the endianness of the target 1324 * machine. 1325 * - Little Endian: 1326 * - Use offset of 0 to check for odd elements 1327 * - Use offset of 4 to check for even elements 1328 * - Big Endian: 1329 * - Use offset of 0 to check for even elements 1330 * - Use offset of 4 to check for odd elements 1331 * A detailed description of the vector element ordering for little endian and 1332 * big endian can be found at 1333 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1334 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1335 * compiler differences mean to you 1336 * 1337 * The mask to the shuffle vector instruction specifies the indices of the 1338 * elements from the two input vectors to place in the result. The elements are 1339 * numbered in array-access order, starting with the first vector. These vectors 1340 * are always of type v16i8, thus each vector will contain 16 elements of size 1341 * 8. More info on the shuffle vector can be found in the 1342 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1343 * Language Reference. 1344 * 1345 * The RHSStartValue indicates whether the same input vectors are used (unary) 1346 * or two different input vectors are used, based on the following: 1347 * - If the instruction uses the same vector for both inputs, the range of the 1348 * indices will be 0 to 15. In this case, the RHSStart value passed should 1349 * be 0. 1350 * - If the instruction has two different vectors then the range of the 1351 * indices will be 0 to 31. In this case, the RHSStart value passed should 1352 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1353 * to 31 specify elements in the second vector). 1354 * 1355 * \param[in] N The shuffle vector SD Node to analyze 1356 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1357 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1358 * vector to the shuffle_vector instruction 1359 * \return true iff this shuffle vector represents an even or odd word merge 1360 */ 1361 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1362 unsigned RHSStartValue) { 1363 if (N->getValueType(0) != MVT::v16i8) 1364 return false; 1365 1366 for (unsigned i = 0; i < 2; ++i) 1367 for (unsigned j = 0; j < 4; ++j) 1368 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1369 i*RHSStartValue+j+IndexOffset) || 1370 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1371 i*RHSStartValue+j+IndexOffset+8)) 1372 return false; 1373 return true; 1374 } 1375 1376 /** 1377 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1378 * vmrgow instructions. 1379 * 1380 * \param[in] N The shuffle vector SD Node to analyze 1381 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1382 * \param[in] ShuffleKind Identify the type of merge: 1383 * - 0 = big-endian merge with two different inputs; 1384 * - 1 = either-endian merge with two identical inputs; 1385 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1386 * little-endian merges). 1387 * \param[in] DAG The current SelectionDAG 1388 * \return true iff this shuffle mask 1389 */ 1390 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1391 unsigned ShuffleKind, SelectionDAG &DAG) { 1392 if (DAG.getDataLayout().isLittleEndian()) { 1393 unsigned indexOffset = CheckEven ? 4 : 0; 1394 if (ShuffleKind == 1) // Unary 1395 return isVMerge(N, indexOffset, 0); 1396 else if (ShuffleKind == 2) // swapped 1397 return isVMerge(N, indexOffset, 16); 1398 else 1399 return false; 1400 } 1401 else { 1402 unsigned indexOffset = CheckEven ? 0 : 4; 1403 if (ShuffleKind == 1) // Unary 1404 return isVMerge(N, indexOffset, 0); 1405 else if (ShuffleKind == 0) // Normal 1406 return isVMerge(N, indexOffset, 16); 1407 else 1408 return false; 1409 } 1410 return false; 1411 } 1412 1413 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1414 /// amount, otherwise return -1. 1415 /// The ShuffleKind distinguishes between big-endian operations with two 1416 /// different inputs (0), either-endian operations with two identical inputs 1417 /// (1), and little-endian operations with two different inputs (2). For the 1418 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1419 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1420 SelectionDAG &DAG) { 1421 if (N->getValueType(0) != MVT::v16i8) 1422 return -1; 1423 1424 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1425 1426 // Find the first non-undef value in the shuffle mask. 1427 unsigned i; 1428 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1429 /*search*/; 1430 1431 if (i == 16) return -1; // all undef. 1432 1433 // Otherwise, check to see if the rest of the elements are consecutively 1434 // numbered from this value. 1435 unsigned ShiftAmt = SVOp->getMaskElt(i); 1436 if (ShiftAmt < i) return -1; 1437 1438 ShiftAmt -= i; 1439 bool isLE = DAG.getDataLayout().isLittleEndian(); 1440 1441 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1442 // Check the rest of the elements to see if they are consecutive. 1443 for (++i; i != 16; ++i) 1444 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1445 return -1; 1446 } else if (ShuffleKind == 1) { 1447 // Check the rest of the elements to see if they are consecutive. 1448 for (++i; i != 16; ++i) 1449 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1450 return -1; 1451 } else 1452 return -1; 1453 1454 if (isLE) 1455 ShiftAmt = 16 - ShiftAmt; 1456 1457 return ShiftAmt; 1458 } 1459 1460 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1461 /// specifies a splat of a single element that is suitable for input to 1462 /// VSPLTB/VSPLTH/VSPLTW. 1463 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1464 assert(N->getValueType(0) == MVT::v16i8 && 1465 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1466 1467 // The consecutive indices need to specify an element, not part of two 1468 // different elements. So abandon ship early if this isn't the case. 1469 if (N->getMaskElt(0) % EltSize != 0) 1470 return false; 1471 1472 // This is a splat operation if each element of the permute is the same, and 1473 // if the value doesn't reference the second vector. 1474 unsigned ElementBase = N->getMaskElt(0); 1475 1476 // FIXME: Handle UNDEF elements too! 1477 if (ElementBase >= 16) 1478 return false; 1479 1480 // Check that the indices are consecutive, in the case of a multi-byte element 1481 // splatted with a v16i8 mask. 1482 for (unsigned i = 1; i != EltSize; ++i) 1483 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1484 return false; 1485 1486 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1487 if (N->getMaskElt(i) < 0) continue; 1488 for (unsigned j = 0; j != EltSize; ++j) 1489 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1490 return false; 1491 } 1492 return true; 1493 } 1494 1495 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1496 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1497 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1498 SelectionDAG &DAG) { 1499 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1500 assert(isSplatShuffleMask(SVOp, EltSize)); 1501 if (DAG.getDataLayout().isLittleEndian()) 1502 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1503 else 1504 return SVOp->getMaskElt(0) / EltSize; 1505 } 1506 1507 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1508 /// by using a vspltis[bhw] instruction of the specified element size, return 1509 /// the constant being splatted. The ByteSize field indicates the number of 1510 /// bytes of each element [124] -> [bhw]. 1511 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1512 SDValue OpVal(nullptr, 0); 1513 1514 // If ByteSize of the splat is bigger than the element size of the 1515 // build_vector, then we have a case where we are checking for a splat where 1516 // multiple elements of the buildvector are folded together into a single 1517 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1518 unsigned EltSize = 16/N->getNumOperands(); 1519 if (EltSize < ByteSize) { 1520 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1521 SDValue UniquedVals[4]; 1522 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1523 1524 // See if all of the elements in the buildvector agree across. 1525 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1526 if (N->getOperand(i).isUndef()) continue; 1527 // If the element isn't a constant, bail fully out. 1528 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1529 1530 1531 if (!UniquedVals[i&(Multiple-1)].getNode()) 1532 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1533 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1534 return SDValue(); // no match. 1535 } 1536 1537 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1538 // either constant or undef values that are identical for each chunk. See 1539 // if these chunks can form into a larger vspltis*. 1540 1541 // Check to see if all of the leading entries are either 0 or -1. If 1542 // neither, then this won't fit into the immediate field. 1543 bool LeadingZero = true; 1544 bool LeadingOnes = true; 1545 for (unsigned i = 0; i != Multiple-1; ++i) { 1546 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1547 1548 LeadingZero &= isNullConstant(UniquedVals[i]); 1549 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1550 } 1551 // Finally, check the least significant entry. 1552 if (LeadingZero) { 1553 if (!UniquedVals[Multiple-1].getNode()) 1554 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1555 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1556 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1557 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1558 } 1559 if (LeadingOnes) { 1560 if (!UniquedVals[Multiple-1].getNode()) 1561 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1562 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1563 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1564 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1565 } 1566 1567 return SDValue(); 1568 } 1569 1570 // Check to see if this buildvec has a single non-undef value in its elements. 1571 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1572 if (N->getOperand(i).isUndef()) continue; 1573 if (!OpVal.getNode()) 1574 OpVal = N->getOperand(i); 1575 else if (OpVal != N->getOperand(i)) 1576 return SDValue(); 1577 } 1578 1579 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1580 1581 unsigned ValSizeInBytes = EltSize; 1582 uint64_t Value = 0; 1583 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1584 Value = CN->getZExtValue(); 1585 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1586 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1587 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1588 } 1589 1590 // If the splat value is larger than the element value, then we can never do 1591 // this splat. The only case that we could fit the replicated bits into our 1592 // immediate field for would be zero, and we prefer to use vxor for it. 1593 if (ValSizeInBytes < ByteSize) return SDValue(); 1594 1595 // If the element value is larger than the splat value, check if it consists 1596 // of a repeated bit pattern of size ByteSize. 1597 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1598 return SDValue(); 1599 1600 // Properly sign extend the value. 1601 int MaskVal = SignExtend32(Value, ByteSize * 8); 1602 1603 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1604 if (MaskVal == 0) return SDValue(); 1605 1606 // Finally, if this value fits in a 5 bit sext field, return it 1607 if (SignExtend32<5>(MaskVal) == MaskVal) 1608 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1609 return SDValue(); 1610 } 1611 1612 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1613 /// amount, otherwise return -1. 1614 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1615 EVT VT = N->getValueType(0); 1616 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1617 return -1; 1618 1619 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1620 1621 // Find the first non-undef value in the shuffle mask. 1622 unsigned i; 1623 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1624 /*search*/; 1625 1626 if (i == 4) return -1; // all undef. 1627 1628 // Otherwise, check to see if the rest of the elements are consecutively 1629 // numbered from this value. 1630 unsigned ShiftAmt = SVOp->getMaskElt(i); 1631 if (ShiftAmt < i) return -1; 1632 ShiftAmt -= i; 1633 1634 // Check the rest of the elements to see if they are consecutive. 1635 for (++i; i != 4; ++i) 1636 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1637 return -1; 1638 1639 return ShiftAmt; 1640 } 1641 1642 //===----------------------------------------------------------------------===// 1643 // Addressing Mode Selection 1644 //===----------------------------------------------------------------------===// 1645 1646 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1647 /// or 64-bit immediate, and if the value can be accurately represented as a 1648 /// sign extension from a 16-bit value. If so, this returns true and the 1649 /// immediate. 1650 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1651 if (!isa<ConstantSDNode>(N)) 1652 return false; 1653 1654 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1655 if (N->getValueType(0) == MVT::i32) 1656 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1657 else 1658 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1659 } 1660 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1661 return isIntS16Immediate(Op.getNode(), Imm); 1662 } 1663 1664 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1665 /// can be represented as an indexed [r+r] operation. Returns false if it 1666 /// can be more efficiently represented with [r+imm]. 1667 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1668 SDValue &Index, 1669 SelectionDAG &DAG) const { 1670 short imm = 0; 1671 if (N.getOpcode() == ISD::ADD) { 1672 if (isIntS16Immediate(N.getOperand(1), imm)) 1673 return false; // r+i 1674 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1675 return false; // r+i 1676 1677 Base = N.getOperand(0); 1678 Index = N.getOperand(1); 1679 return true; 1680 } else if (N.getOpcode() == ISD::OR) { 1681 if (isIntS16Immediate(N.getOperand(1), imm)) 1682 return false; // r+i can fold it if we can. 1683 1684 // If this is an or of disjoint bitfields, we can codegen this as an add 1685 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1686 // disjoint. 1687 APInt LHSKnownZero, LHSKnownOne; 1688 APInt RHSKnownZero, RHSKnownOne; 1689 DAG.computeKnownBits(N.getOperand(0), 1690 LHSKnownZero, LHSKnownOne); 1691 1692 if (LHSKnownZero.getBoolValue()) { 1693 DAG.computeKnownBits(N.getOperand(1), 1694 RHSKnownZero, RHSKnownOne); 1695 // If all of the bits are known zero on the LHS or RHS, the add won't 1696 // carry. 1697 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1698 Base = N.getOperand(0); 1699 Index = N.getOperand(1); 1700 return true; 1701 } 1702 } 1703 } 1704 1705 return false; 1706 } 1707 1708 // If we happen to be doing an i64 load or store into a stack slot that has 1709 // less than a 4-byte alignment, then the frame-index elimination may need to 1710 // use an indexed load or store instruction (because the offset may not be a 1711 // multiple of 4). The extra register needed to hold the offset comes from the 1712 // register scavenger, and it is possible that the scavenger will need to use 1713 // an emergency spill slot. As a result, we need to make sure that a spill slot 1714 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1715 // stack slot. 1716 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1717 // FIXME: This does not handle the LWA case. 1718 if (VT != MVT::i64) 1719 return; 1720 1721 // NOTE: We'll exclude negative FIs here, which come from argument 1722 // lowering, because there are no known test cases triggering this problem 1723 // using packed structures (or similar). We can remove this exclusion if 1724 // we find such a test case. The reason why this is so test-case driven is 1725 // because this entire 'fixup' is only to prevent crashes (from the 1726 // register scavenger) on not-really-valid inputs. For example, if we have: 1727 // %a = alloca i1 1728 // %b = bitcast i1* %a to i64* 1729 // store i64* a, i64 b 1730 // then the store should really be marked as 'align 1', but is not. If it 1731 // were marked as 'align 1' then the indexed form would have been 1732 // instruction-selected initially, and the problem this 'fixup' is preventing 1733 // won't happen regardless. 1734 if (FrameIdx < 0) 1735 return; 1736 1737 MachineFunction &MF = DAG.getMachineFunction(); 1738 MachineFrameInfo *MFI = MF.getFrameInfo(); 1739 1740 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1741 if (Align >= 4) 1742 return; 1743 1744 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1745 FuncInfo->setHasNonRISpills(); 1746 } 1747 1748 /// Returns true if the address N can be represented by a base register plus 1749 /// a signed 16-bit displacement [r+imm], and if it is not better 1750 /// represented as reg+reg. If Aligned is true, only accept displacements 1751 /// suitable for STD and friends, i.e. multiples of 4. 1752 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1753 SDValue &Base, 1754 SelectionDAG &DAG, 1755 bool Aligned) const { 1756 // FIXME dl should come from parent load or store, not from address 1757 SDLoc dl(N); 1758 // If this can be more profitably realized as r+r, fail. 1759 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1760 return false; 1761 1762 if (N.getOpcode() == ISD::ADD) { 1763 short imm = 0; 1764 if (isIntS16Immediate(N.getOperand(1), imm) && 1765 (!Aligned || (imm & 3) == 0)) { 1766 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1767 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1768 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1769 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1770 } else { 1771 Base = N.getOperand(0); 1772 } 1773 return true; // [r+i] 1774 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1775 // Match LOAD (ADD (X, Lo(G))). 1776 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1777 && "Cannot handle constant offsets yet!"); 1778 Disp = N.getOperand(1).getOperand(0); // The global address. 1779 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1780 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1781 Disp.getOpcode() == ISD::TargetConstantPool || 1782 Disp.getOpcode() == ISD::TargetJumpTable); 1783 Base = N.getOperand(0); 1784 return true; // [&g+r] 1785 } 1786 } else if (N.getOpcode() == ISD::OR) { 1787 short imm = 0; 1788 if (isIntS16Immediate(N.getOperand(1), imm) && 1789 (!Aligned || (imm & 3) == 0)) { 1790 // If this is an or of disjoint bitfields, we can codegen this as an add 1791 // (for better address arithmetic) if the LHS and RHS of the OR are 1792 // provably disjoint. 1793 APInt LHSKnownZero, LHSKnownOne; 1794 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1795 1796 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1797 // If all of the bits are known zero on the LHS or RHS, the add won't 1798 // carry. 1799 if (FrameIndexSDNode *FI = 1800 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1801 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1802 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1803 } else { 1804 Base = N.getOperand(0); 1805 } 1806 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1807 return true; 1808 } 1809 } 1810 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1811 // Loading from a constant address. 1812 1813 // If this address fits entirely in a 16-bit sext immediate field, codegen 1814 // this as "d, 0" 1815 short Imm; 1816 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1817 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1818 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1819 CN->getValueType(0)); 1820 return true; 1821 } 1822 1823 // Handle 32-bit sext immediates with LIS + addr mode. 1824 if ((CN->getValueType(0) == MVT::i32 || 1825 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1826 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1827 int Addr = (int)CN->getZExtValue(); 1828 1829 // Otherwise, break this down into an LIS + disp. 1830 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1831 1832 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1833 MVT::i32); 1834 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1835 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1836 return true; 1837 } 1838 } 1839 1840 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 1841 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1842 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1843 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1844 } else 1845 Base = N; 1846 return true; // [r+0] 1847 } 1848 1849 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1850 /// represented as an indexed [r+r] operation. 1851 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1852 SDValue &Index, 1853 SelectionDAG &DAG) const { 1854 // Check to see if we can easily represent this as an [r+r] address. This 1855 // will fail if it thinks that the address is more profitably represented as 1856 // reg+imm, e.g. where imm = 0. 1857 if (SelectAddressRegReg(N, Base, Index, DAG)) 1858 return true; 1859 1860 // If the operand is an addition, always emit this as [r+r], since this is 1861 // better (for code size, and execution, as the memop does the add for free) 1862 // than emitting an explicit add. 1863 if (N.getOpcode() == ISD::ADD) { 1864 Base = N.getOperand(0); 1865 Index = N.getOperand(1); 1866 return true; 1867 } 1868 1869 // Otherwise, do it the hard way, using R0 as the base register. 1870 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1871 N.getValueType()); 1872 Index = N; 1873 return true; 1874 } 1875 1876 /// getPreIndexedAddressParts - returns true by value, base pointer and 1877 /// offset pointer and addressing mode by reference if the node's address 1878 /// can be legally represented as pre-indexed load / store address. 1879 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1880 SDValue &Offset, 1881 ISD::MemIndexedMode &AM, 1882 SelectionDAG &DAG) const { 1883 if (DisablePPCPreinc) return false; 1884 1885 bool isLoad = true; 1886 SDValue Ptr; 1887 EVT VT; 1888 unsigned Alignment; 1889 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1890 Ptr = LD->getBasePtr(); 1891 VT = LD->getMemoryVT(); 1892 Alignment = LD->getAlignment(); 1893 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1894 Ptr = ST->getBasePtr(); 1895 VT = ST->getMemoryVT(); 1896 Alignment = ST->getAlignment(); 1897 isLoad = false; 1898 } else 1899 return false; 1900 1901 // PowerPC doesn't have preinc load/store instructions for vectors (except 1902 // for QPX, which does have preinc r+r forms). 1903 if (VT.isVector()) { 1904 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 1905 return false; 1906 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 1907 AM = ISD::PRE_INC; 1908 return true; 1909 } 1910 } 1911 1912 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1913 1914 // Common code will reject creating a pre-inc form if the base pointer 1915 // is a frame index, or if N is a store and the base pointer is either 1916 // the same as or a predecessor of the value being stored. Check for 1917 // those situations here, and try with swapped Base/Offset instead. 1918 bool Swap = false; 1919 1920 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1921 Swap = true; 1922 else if (!isLoad) { 1923 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1924 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1925 Swap = true; 1926 } 1927 1928 if (Swap) 1929 std::swap(Base, Offset); 1930 1931 AM = ISD::PRE_INC; 1932 return true; 1933 } 1934 1935 // LDU/STU can only handle immediates that are a multiple of 4. 1936 if (VT != MVT::i64) { 1937 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1938 return false; 1939 } else { 1940 // LDU/STU need an address with at least 4-byte alignment. 1941 if (Alignment < 4) 1942 return false; 1943 1944 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1945 return false; 1946 } 1947 1948 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1949 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1950 // sext i32 to i64 when addr mode is r+i. 1951 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1952 LD->getExtensionType() == ISD::SEXTLOAD && 1953 isa<ConstantSDNode>(Offset)) 1954 return false; 1955 } 1956 1957 AM = ISD::PRE_INC; 1958 return true; 1959 } 1960 1961 //===----------------------------------------------------------------------===// 1962 // LowerOperation implementation 1963 //===----------------------------------------------------------------------===// 1964 1965 /// GetLabelAccessInfo - Return true if we should reference labels using a 1966 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1967 static bool GetLabelAccessInfo(const TargetMachine &TM, 1968 const PPCSubtarget &Subtarget, 1969 unsigned &HiOpFlags, unsigned &LoOpFlags, 1970 const GlobalValue *GV = nullptr) { 1971 HiOpFlags = PPCII::MO_HA; 1972 LoOpFlags = PPCII::MO_LO; 1973 1974 // Don't use the pic base if not in PIC relocation model. 1975 bool isPIC = TM.getRelocationModel() == Reloc::PIC_; 1976 1977 if (isPIC) { 1978 HiOpFlags |= PPCII::MO_PIC_FLAG; 1979 LoOpFlags |= PPCII::MO_PIC_FLAG; 1980 } 1981 1982 // If this is a reference to a global value that requires a non-lazy-ptr, make 1983 // sure that instruction lowering adds it. 1984 if (GV && Subtarget.hasLazyResolverStub(GV)) { 1985 HiOpFlags |= PPCII::MO_NLP_FLAG; 1986 LoOpFlags |= PPCII::MO_NLP_FLAG; 1987 1988 if (GV->hasHiddenVisibility()) { 1989 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1990 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1991 } 1992 } 1993 1994 return isPIC; 1995 } 1996 1997 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1998 SelectionDAG &DAG) { 1999 SDLoc DL(HiPart); 2000 EVT PtrVT = HiPart.getValueType(); 2001 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2002 2003 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2004 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2005 2006 // With PIC, the first instruction is actually "GR+hi(&G)". 2007 if (isPIC) 2008 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2009 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2010 2011 // Generate non-pic code that has direct accesses to the constant pool. 2012 // The address of the global is just (hi(&g)+lo(&g)). 2013 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2014 } 2015 2016 static void setUsesTOCBasePtr(MachineFunction &MF) { 2017 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2018 FuncInfo->setUsesTOCBasePtr(); 2019 } 2020 2021 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2022 setUsesTOCBasePtr(DAG.getMachineFunction()); 2023 } 2024 2025 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2026 SDValue GA) { 2027 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2028 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2029 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2030 2031 SDValue Ops[] = { GA, Reg }; 2032 return DAG.getMemIntrinsicNode( 2033 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2034 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2035 false, 0); 2036 } 2037 2038 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2039 SelectionDAG &DAG) const { 2040 EVT PtrVT = Op.getValueType(); 2041 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2042 const Constant *C = CP->getConstVal(); 2043 2044 // 64-bit SVR4 ABI code is always position-independent. 2045 // The actual address of the GlobalValue is stored in the TOC. 2046 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2047 setUsesTOCBasePtr(DAG); 2048 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2049 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2050 } 2051 2052 unsigned MOHiFlag, MOLoFlag; 2053 bool isPIC = 2054 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2055 2056 if (isPIC && Subtarget.isSVR4ABI()) { 2057 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2058 PPCII::MO_PIC_FLAG); 2059 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2060 } 2061 2062 SDValue CPIHi = 2063 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2064 SDValue CPILo = 2065 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2066 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 2067 } 2068 2069 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2070 EVT PtrVT = Op.getValueType(); 2071 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2072 2073 // 64-bit SVR4 ABI code is always position-independent. 2074 // The actual address of the GlobalValue is stored in the TOC. 2075 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2076 setUsesTOCBasePtr(DAG); 2077 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2078 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2079 } 2080 2081 unsigned MOHiFlag, MOLoFlag; 2082 bool isPIC = 2083 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2084 2085 if (isPIC && Subtarget.isSVR4ABI()) { 2086 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2087 PPCII::MO_PIC_FLAG); 2088 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2089 } 2090 2091 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2092 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2093 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 2094 } 2095 2096 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2097 SelectionDAG &DAG) const { 2098 EVT PtrVT = Op.getValueType(); 2099 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2100 const BlockAddress *BA = BASDN->getBlockAddress(); 2101 2102 // 64-bit SVR4 ABI code is always position-independent. 2103 // The actual BlockAddress is stored in the TOC. 2104 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2105 setUsesTOCBasePtr(DAG); 2106 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2107 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2108 } 2109 2110 unsigned MOHiFlag, MOLoFlag; 2111 bool isPIC = 2112 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2113 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2114 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2115 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 2116 } 2117 2118 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2119 SelectionDAG &DAG) const { 2120 2121 // FIXME: TLS addresses currently use medium model code sequences, 2122 // which is the most useful form. Eventually support for small and 2123 // large models could be added if users need it, at the cost of 2124 // additional complexity. 2125 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2126 if (DAG.getTarget().Options.EmulatedTLS) 2127 return LowerToTLSEmulatedModel(GA, DAG); 2128 2129 SDLoc dl(GA); 2130 const GlobalValue *GV = GA->getGlobal(); 2131 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2132 bool is64bit = Subtarget.isPPC64(); 2133 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2134 PICLevel::Level picLevel = M->getPICLevel(); 2135 2136 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2137 2138 if (Model == TLSModel::LocalExec) { 2139 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2140 PPCII::MO_TPREL_HA); 2141 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2142 PPCII::MO_TPREL_LO); 2143 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2144 is64bit ? MVT::i64 : MVT::i32); 2145 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2146 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2147 } 2148 2149 if (Model == TLSModel::InitialExec) { 2150 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2151 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2152 PPCII::MO_TLS); 2153 SDValue GOTPtr; 2154 if (is64bit) { 2155 setUsesTOCBasePtr(DAG); 2156 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2157 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2158 PtrVT, GOTReg, TGA); 2159 } else 2160 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2161 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2162 PtrVT, TGA, GOTPtr); 2163 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2164 } 2165 2166 if (Model == TLSModel::GeneralDynamic) { 2167 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2168 SDValue GOTPtr; 2169 if (is64bit) { 2170 setUsesTOCBasePtr(DAG); 2171 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2172 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2173 GOTReg, TGA); 2174 } else { 2175 if (picLevel == PICLevel::Small) 2176 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2177 else 2178 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2179 } 2180 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2181 GOTPtr, TGA, TGA); 2182 } 2183 2184 if (Model == TLSModel::LocalDynamic) { 2185 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2186 SDValue GOTPtr; 2187 if (is64bit) { 2188 setUsesTOCBasePtr(DAG); 2189 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2190 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2191 GOTReg, TGA); 2192 } else { 2193 if (picLevel == PICLevel::Small) 2194 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2195 else 2196 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2197 } 2198 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2199 PtrVT, GOTPtr, TGA, TGA); 2200 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2201 PtrVT, TLSAddr, TGA); 2202 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2203 } 2204 2205 llvm_unreachable("Unknown TLS model!"); 2206 } 2207 2208 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2209 SelectionDAG &DAG) const { 2210 EVT PtrVT = Op.getValueType(); 2211 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2212 SDLoc DL(GSDN); 2213 const GlobalValue *GV = GSDN->getGlobal(); 2214 2215 // 64-bit SVR4 ABI code is always position-independent. 2216 // The actual address of the GlobalValue is stored in the TOC. 2217 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2218 setUsesTOCBasePtr(DAG); 2219 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2220 return getTOCEntry(DAG, DL, true, GA); 2221 } 2222 2223 unsigned MOHiFlag, MOLoFlag; 2224 bool isPIC = 2225 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag, GV); 2226 2227 if (isPIC && Subtarget.isSVR4ABI()) { 2228 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2229 GSDN->getOffset(), 2230 PPCII::MO_PIC_FLAG); 2231 return getTOCEntry(DAG, DL, false, GA); 2232 } 2233 2234 SDValue GAHi = 2235 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2236 SDValue GALo = 2237 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2238 2239 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 2240 2241 // If the global reference is actually to a non-lazy-pointer, we have to do an 2242 // extra load to get the address of the global. 2243 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2244 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 2245 false, false, false, 0); 2246 return Ptr; 2247 } 2248 2249 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2250 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2251 SDLoc dl(Op); 2252 2253 if (Op.getValueType() == MVT::v2i64) { 2254 // When the operands themselves are v2i64 values, we need to do something 2255 // special because VSX has no underlying comparison operations for these. 2256 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2257 // Equality can be handled by casting to the legal type for Altivec 2258 // comparisons, everything else needs to be expanded. 2259 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2260 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2261 DAG.getSetCC(dl, MVT::v4i32, 2262 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2263 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2264 CC)); 2265 } 2266 2267 return SDValue(); 2268 } 2269 2270 // We handle most of these in the usual way. 2271 return Op; 2272 } 2273 2274 // If we're comparing for equality to zero, expose the fact that this is 2275 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 2276 // fold the new nodes. 2277 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2278 if (C->isNullValue() && CC == ISD::SETEQ) { 2279 EVT VT = Op.getOperand(0).getValueType(); 2280 SDValue Zext = Op.getOperand(0); 2281 if (VT.bitsLT(MVT::i32)) { 2282 VT = MVT::i32; 2283 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 2284 } 2285 unsigned Log2b = Log2_32(VT.getSizeInBits()); 2286 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 2287 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 2288 DAG.getConstant(Log2b, dl, MVT::i32)); 2289 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 2290 } 2291 // Leave comparisons against 0 and -1 alone for now, since they're usually 2292 // optimized. FIXME: revisit this when we can custom lower all setcc 2293 // optimizations. 2294 if (C->isAllOnesValue() || C->isNullValue()) 2295 return SDValue(); 2296 } 2297 2298 // If we have an integer seteq/setne, turn it into a compare against zero 2299 // by xor'ing the rhs with the lhs, which is faster than setting a 2300 // condition register, reading it back out, and masking the correct bit. The 2301 // normal approach here uses sub to do this instead of xor. Using xor exposes 2302 // the result to other bit-twiddling opportunities. 2303 EVT LHSVT = Op.getOperand(0).getValueType(); 2304 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2305 EVT VT = Op.getValueType(); 2306 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2307 Op.getOperand(1)); 2308 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2309 } 2310 return SDValue(); 2311 } 2312 2313 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 2314 const PPCSubtarget &Subtarget) const { 2315 SDNode *Node = Op.getNode(); 2316 EVT VT = Node->getValueType(0); 2317 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2318 SDValue InChain = Node->getOperand(0); 2319 SDValue VAListPtr = Node->getOperand(1); 2320 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2321 SDLoc dl(Node); 2322 2323 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2324 2325 // gpr_index 2326 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2327 VAListPtr, MachinePointerInfo(SV), MVT::i8, 2328 false, false, false, 0); 2329 InChain = GprIndex.getValue(1); 2330 2331 if (VT == MVT::i64) { 2332 // Check if GprIndex is even 2333 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2334 DAG.getConstant(1, dl, MVT::i32)); 2335 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2336 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2337 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2338 DAG.getConstant(1, dl, MVT::i32)); 2339 // Align GprIndex to be even if it isn't 2340 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2341 GprIndex); 2342 } 2343 2344 // fpr index is 1 byte after gpr 2345 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2346 DAG.getConstant(1, dl, MVT::i32)); 2347 2348 // fpr 2349 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2350 FprPtr, MachinePointerInfo(SV), MVT::i8, 2351 false, false, false, 0); 2352 InChain = FprIndex.getValue(1); 2353 2354 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2355 DAG.getConstant(8, dl, MVT::i32)); 2356 2357 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2358 DAG.getConstant(4, dl, MVT::i32)); 2359 2360 // areas 2361 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 2362 MachinePointerInfo(), false, false, 2363 false, 0); 2364 InChain = OverflowArea.getValue(1); 2365 2366 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 2367 MachinePointerInfo(), false, false, 2368 false, 0); 2369 InChain = RegSaveArea.getValue(1); 2370 2371 // select overflow_area if index > 8 2372 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2373 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2374 2375 // adjustment constant gpr_index * 4/8 2376 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2377 VT.isInteger() ? GprIndex : FprIndex, 2378 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2379 MVT::i32)); 2380 2381 // OurReg = RegSaveArea + RegConstant 2382 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2383 RegConstant); 2384 2385 // Floating types are 32 bytes into RegSaveArea 2386 if (VT.isFloatingPoint()) 2387 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2388 DAG.getConstant(32, dl, MVT::i32)); 2389 2390 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2391 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2392 VT.isInteger() ? GprIndex : FprIndex, 2393 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2394 MVT::i32)); 2395 2396 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2397 VT.isInteger() ? VAListPtr : FprPtr, 2398 MachinePointerInfo(SV), 2399 MVT::i8, false, false, 0); 2400 2401 // determine if we should load from reg_save_area or overflow_area 2402 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2403 2404 // increase overflow_area by 4/8 if gpr/fpr > 8 2405 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2406 DAG.getConstant(VT.isInteger() ? 4 : 8, 2407 dl, MVT::i32)); 2408 2409 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2410 OverflowAreaPlusN); 2411 2412 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 2413 OverflowAreaPtr, 2414 MachinePointerInfo(), 2415 MVT::i32, false, false, 0); 2416 2417 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 2418 false, false, false, 0); 2419 } 2420 2421 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 2422 const PPCSubtarget &Subtarget) const { 2423 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2424 2425 // We have to copy the entire va_list struct: 2426 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2427 return DAG.getMemcpy(Op.getOperand(0), Op, 2428 Op.getOperand(1), Op.getOperand(2), 2429 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2430 false, MachinePointerInfo(), MachinePointerInfo()); 2431 } 2432 2433 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2434 SelectionDAG &DAG) const { 2435 return Op.getOperand(0); 2436 } 2437 2438 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2439 SelectionDAG &DAG) const { 2440 SDValue Chain = Op.getOperand(0); 2441 SDValue Trmp = Op.getOperand(1); // trampoline 2442 SDValue FPtr = Op.getOperand(2); // nested function 2443 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2444 SDLoc dl(Op); 2445 2446 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2447 bool isPPC64 = (PtrVT == MVT::i64); 2448 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2449 2450 TargetLowering::ArgListTy Args; 2451 TargetLowering::ArgListEntry Entry; 2452 2453 Entry.Ty = IntPtrTy; 2454 Entry.Node = Trmp; Args.push_back(Entry); 2455 2456 // TrampSize == (isPPC64 ? 48 : 40); 2457 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2458 isPPC64 ? MVT::i64 : MVT::i32); 2459 Args.push_back(Entry); 2460 2461 Entry.Node = FPtr; Args.push_back(Entry); 2462 Entry.Node = Nest; Args.push_back(Entry); 2463 2464 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2465 TargetLowering::CallLoweringInfo CLI(DAG); 2466 CLI.setDebugLoc(dl).setChain(Chain) 2467 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2468 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2469 std::move(Args), 0); 2470 2471 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2472 return CallResult.second; 2473 } 2474 2475 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 2476 const PPCSubtarget &Subtarget) const { 2477 MachineFunction &MF = DAG.getMachineFunction(); 2478 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2479 2480 SDLoc dl(Op); 2481 2482 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2483 // vastart just stores the address of the VarArgsFrameIndex slot into the 2484 // memory location argument. 2485 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2486 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2487 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2488 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2489 MachinePointerInfo(SV), 2490 false, false, 0); 2491 } 2492 2493 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2494 // We suppose the given va_list is already allocated. 2495 // 2496 // typedef struct { 2497 // char gpr; /* index into the array of 8 GPRs 2498 // * stored in the register save area 2499 // * gpr=0 corresponds to r3, 2500 // * gpr=1 to r4, etc. 2501 // */ 2502 // char fpr; /* index into the array of 8 FPRs 2503 // * stored in the register save area 2504 // * fpr=0 corresponds to f1, 2505 // * fpr=1 to f2, etc. 2506 // */ 2507 // char *overflow_arg_area; 2508 // /* location on stack that holds 2509 // * the next overflow argument 2510 // */ 2511 // char *reg_save_area; 2512 // /* where r3:r10 and f1:f8 (if saved) 2513 // * are stored 2514 // */ 2515 // } va_list[1]; 2516 2517 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2518 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2519 2520 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2521 2522 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2523 PtrVT); 2524 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2525 PtrVT); 2526 2527 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2528 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2529 2530 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2531 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2532 2533 uint64_t FPROffset = 1; 2534 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2535 2536 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2537 2538 // Store first byte : number of int regs 2539 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2540 Op.getOperand(1), 2541 MachinePointerInfo(SV), 2542 MVT::i8, false, false, 0); 2543 uint64_t nextOffset = FPROffset; 2544 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2545 ConstFPROffset); 2546 2547 // Store second byte : number of float regs 2548 SDValue secondStore = 2549 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2550 MachinePointerInfo(SV, nextOffset), MVT::i8, 2551 false, false, 0); 2552 nextOffset += StackOffset; 2553 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2554 2555 // Store second word : arguments given on stack 2556 SDValue thirdStore = 2557 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2558 MachinePointerInfo(SV, nextOffset), 2559 false, false, 0); 2560 nextOffset += FrameOffset; 2561 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2562 2563 // Store third word : arguments given in registers 2564 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2565 MachinePointerInfo(SV, nextOffset), 2566 false, false, 0); 2567 2568 } 2569 2570 #include "PPCGenCallingConv.inc" 2571 2572 // Function whose sole purpose is to kill compiler warnings 2573 // stemming from unused functions included from PPCGenCallingConv.inc. 2574 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2575 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2576 } 2577 2578 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2579 CCValAssign::LocInfo &LocInfo, 2580 ISD::ArgFlagsTy &ArgFlags, 2581 CCState &State) { 2582 return true; 2583 } 2584 2585 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2586 MVT &LocVT, 2587 CCValAssign::LocInfo &LocInfo, 2588 ISD::ArgFlagsTy &ArgFlags, 2589 CCState &State) { 2590 static const MCPhysReg ArgRegs[] = { 2591 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2592 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2593 }; 2594 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2595 2596 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2597 2598 // Skip one register if the first unallocated register has an even register 2599 // number and there are still argument registers available which have not been 2600 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2601 // need to skip a register if RegNum is odd. 2602 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2603 State.AllocateReg(ArgRegs[RegNum]); 2604 } 2605 2606 // Always return false here, as this function only makes sure that the first 2607 // unallocated register has an odd register number and does not actually 2608 // allocate a register for the current argument. 2609 return false; 2610 } 2611 2612 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2613 MVT &LocVT, 2614 CCValAssign::LocInfo &LocInfo, 2615 ISD::ArgFlagsTy &ArgFlags, 2616 CCState &State) { 2617 static const MCPhysReg ArgRegs[] = { 2618 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2619 PPC::F8 2620 }; 2621 2622 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2623 2624 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2625 2626 // If there is only one Floating-point register left we need to put both f64 2627 // values of a split ppc_fp128 value on the stack. 2628 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2629 State.AllocateReg(ArgRegs[RegNum]); 2630 } 2631 2632 // Always return false here, as this function only makes sure that the two f64 2633 // values a ppc_fp128 value is split into are both passed in registers or both 2634 // passed on the stack and does not actually allocate a register for the 2635 // current argument. 2636 return false; 2637 } 2638 2639 /// FPR - The set of FP registers that should be allocated for arguments, 2640 /// on Darwin. 2641 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2642 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2643 PPC::F11, PPC::F12, PPC::F13}; 2644 2645 /// QFPR - The set of QPX registers that should be allocated for arguments. 2646 static const MCPhysReg QFPR[] = { 2647 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2648 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2649 2650 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2651 /// the stack. 2652 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2653 unsigned PtrByteSize) { 2654 unsigned ArgSize = ArgVT.getStoreSize(); 2655 if (Flags.isByVal()) 2656 ArgSize = Flags.getByValSize(); 2657 2658 // Round up to multiples of the pointer size, except for array members, 2659 // which are always packed. 2660 if (!Flags.isInConsecutiveRegs()) 2661 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2662 2663 return ArgSize; 2664 } 2665 2666 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2667 /// on the stack. 2668 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2669 ISD::ArgFlagsTy Flags, 2670 unsigned PtrByteSize) { 2671 unsigned Align = PtrByteSize; 2672 2673 // Altivec parameters are padded to a 16 byte boundary. 2674 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2675 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2676 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2677 ArgVT == MVT::v1i128) 2678 Align = 16; 2679 // QPX vector types stored in double-precision are padded to a 32 byte 2680 // boundary. 2681 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2682 Align = 32; 2683 2684 // ByVal parameters are aligned as requested. 2685 if (Flags.isByVal()) { 2686 unsigned BVAlign = Flags.getByValAlign(); 2687 if (BVAlign > PtrByteSize) { 2688 if (BVAlign % PtrByteSize != 0) 2689 llvm_unreachable( 2690 "ByVal alignment is not a multiple of the pointer size"); 2691 2692 Align = BVAlign; 2693 } 2694 } 2695 2696 // Array members are always packed to their original alignment. 2697 if (Flags.isInConsecutiveRegs()) { 2698 // If the array member was split into multiple registers, the first 2699 // needs to be aligned to the size of the full type. (Except for 2700 // ppcf128, which is only aligned as its f64 components.) 2701 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2702 Align = OrigVT.getStoreSize(); 2703 else 2704 Align = ArgVT.getStoreSize(); 2705 } 2706 2707 return Align; 2708 } 2709 2710 /// CalculateStackSlotUsed - Return whether this argument will use its 2711 /// stack slot (instead of being passed in registers). ArgOffset, 2712 /// AvailableFPRs, and AvailableVRs must hold the current argument 2713 /// position, and will be updated to account for this argument. 2714 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2715 ISD::ArgFlagsTy Flags, 2716 unsigned PtrByteSize, 2717 unsigned LinkageSize, 2718 unsigned ParamAreaSize, 2719 unsigned &ArgOffset, 2720 unsigned &AvailableFPRs, 2721 unsigned &AvailableVRs, bool HasQPX) { 2722 bool UseMemory = false; 2723 2724 // Respect alignment of argument on the stack. 2725 unsigned Align = 2726 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2727 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2728 // If there's no space left in the argument save area, we must 2729 // use memory (this check also catches zero-sized arguments). 2730 if (ArgOffset >= LinkageSize + ParamAreaSize) 2731 UseMemory = true; 2732 2733 // Allocate argument on the stack. 2734 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2735 if (Flags.isInConsecutiveRegsLast()) 2736 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2737 // If we overran the argument save area, we must use memory 2738 // (this check catches arguments passed partially in memory) 2739 if (ArgOffset > LinkageSize + ParamAreaSize) 2740 UseMemory = true; 2741 2742 // However, if the argument is actually passed in an FPR or a VR, 2743 // we don't use memory after all. 2744 if (!Flags.isByVal()) { 2745 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2746 // QPX registers overlap with the scalar FP registers. 2747 (HasQPX && (ArgVT == MVT::v4f32 || 2748 ArgVT == MVT::v4f64 || 2749 ArgVT == MVT::v4i1))) 2750 if (AvailableFPRs > 0) { 2751 --AvailableFPRs; 2752 return false; 2753 } 2754 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2755 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2756 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2757 ArgVT == MVT::v1i128) 2758 if (AvailableVRs > 0) { 2759 --AvailableVRs; 2760 return false; 2761 } 2762 } 2763 2764 return UseMemory; 2765 } 2766 2767 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2768 /// ensure minimum alignment required for target. 2769 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2770 unsigned NumBytes) { 2771 unsigned TargetAlign = Lowering->getStackAlignment(); 2772 unsigned AlignMask = TargetAlign - 1; 2773 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2774 return NumBytes; 2775 } 2776 2777 SDValue PPCTargetLowering::LowerFormalArguments( 2778 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2779 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2780 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2781 if (Subtarget.isSVR4ABI()) { 2782 if (Subtarget.isPPC64()) 2783 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2784 dl, DAG, InVals); 2785 else 2786 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2787 dl, DAG, InVals); 2788 } else { 2789 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2790 dl, DAG, InVals); 2791 } 2792 } 2793 2794 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 2795 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2796 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2797 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2798 2799 // 32-bit SVR4 ABI Stack Frame Layout: 2800 // +-----------------------------------+ 2801 // +--> | Back chain | 2802 // | +-----------------------------------+ 2803 // | | Floating-point register save area | 2804 // | +-----------------------------------+ 2805 // | | General register save area | 2806 // | +-----------------------------------+ 2807 // | | CR save word | 2808 // | +-----------------------------------+ 2809 // | | VRSAVE save word | 2810 // | +-----------------------------------+ 2811 // | | Alignment padding | 2812 // | +-----------------------------------+ 2813 // | | Vector register save area | 2814 // | +-----------------------------------+ 2815 // | | Local variable space | 2816 // | +-----------------------------------+ 2817 // | | Parameter list area | 2818 // | +-----------------------------------+ 2819 // | | LR save word | 2820 // | +-----------------------------------+ 2821 // SP--> +--- | Back chain | 2822 // +-----------------------------------+ 2823 // 2824 // Specifications: 2825 // System V Application Binary Interface PowerPC Processor Supplement 2826 // AltiVec Technology Programming Interface Manual 2827 2828 MachineFunction &MF = DAG.getMachineFunction(); 2829 MachineFrameInfo *MFI = MF.getFrameInfo(); 2830 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2831 2832 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2833 // Potential tail calls could cause overwriting of argument stack slots. 2834 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2835 (CallConv == CallingConv::Fast)); 2836 unsigned PtrByteSize = 4; 2837 2838 // Assign locations to all of the incoming arguments. 2839 SmallVector<CCValAssign, 16> ArgLocs; 2840 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2841 *DAG.getContext()); 2842 2843 // Reserve space for the linkage area on the stack. 2844 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2845 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2846 if (Subtarget.useSoftFloat()) 2847 CCInfo.PreAnalyzeFormalArguments(Ins); 2848 2849 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2850 CCInfo.clearWasPPCF128(); 2851 2852 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2853 CCValAssign &VA = ArgLocs[i]; 2854 2855 // Arguments stored in registers. 2856 if (VA.isRegLoc()) { 2857 const TargetRegisterClass *RC; 2858 EVT ValVT = VA.getValVT(); 2859 2860 switch (ValVT.getSimpleVT().SimpleTy) { 2861 default: 2862 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2863 case MVT::i1: 2864 case MVT::i32: 2865 RC = &PPC::GPRCRegClass; 2866 break; 2867 case MVT::f32: 2868 if (Subtarget.hasP8Vector()) 2869 RC = &PPC::VSSRCRegClass; 2870 else 2871 RC = &PPC::F4RCRegClass; 2872 break; 2873 case MVT::f64: 2874 if (Subtarget.hasVSX()) 2875 RC = &PPC::VSFRCRegClass; 2876 else 2877 RC = &PPC::F8RCRegClass; 2878 break; 2879 case MVT::v16i8: 2880 case MVT::v8i16: 2881 case MVT::v4i32: 2882 RC = &PPC::VRRCRegClass; 2883 break; 2884 case MVT::v4f32: 2885 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 2886 break; 2887 case MVT::v2f64: 2888 case MVT::v2i64: 2889 RC = &PPC::VSHRCRegClass; 2890 break; 2891 case MVT::v4f64: 2892 RC = &PPC::QFRCRegClass; 2893 break; 2894 case MVT::v4i1: 2895 RC = &PPC::QBRCRegClass; 2896 break; 2897 } 2898 2899 // Transform the arguments stored in physical registers into virtual ones. 2900 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2901 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2902 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2903 2904 if (ValVT == MVT::i1) 2905 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2906 2907 InVals.push_back(ArgValue); 2908 } else { 2909 // Argument stored in memory. 2910 assert(VA.isMemLoc()); 2911 2912 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2913 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2914 isImmutable); 2915 2916 // Create load nodes to retrieve arguments from the stack. 2917 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2918 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2919 MachinePointerInfo(), 2920 false, false, false, 0)); 2921 } 2922 } 2923 2924 // Assign locations to all of the incoming aggregate by value arguments. 2925 // Aggregates passed by value are stored in the local variable space of the 2926 // caller's stack frame, right above the parameter list area. 2927 SmallVector<CCValAssign, 16> ByValArgLocs; 2928 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2929 ByValArgLocs, *DAG.getContext()); 2930 2931 // Reserve stack space for the allocations in CCInfo. 2932 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2933 2934 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2935 2936 // Area that is at least reserved in the caller of this function. 2937 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2938 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2939 2940 // Set the size that is at least reserved in caller of this function. Tail 2941 // call optimized function's reserved stack space needs to be aligned so that 2942 // taking the difference between two stack areas will result in an aligned 2943 // stack. 2944 MinReservedArea = 2945 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 2946 FuncInfo->setMinReservedArea(MinReservedArea); 2947 2948 SmallVector<SDValue, 8> MemOps; 2949 2950 // If the function takes variable number of arguments, make a frame index for 2951 // the start of the first vararg value... for expansion of llvm.va_start. 2952 if (isVarArg) { 2953 static const MCPhysReg GPArgRegs[] = { 2954 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2955 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2956 }; 2957 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2958 2959 static const MCPhysReg FPArgRegs[] = { 2960 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2961 PPC::F8 2962 }; 2963 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2964 2965 if (Subtarget.useSoftFloat()) 2966 NumFPArgRegs = 0; 2967 2968 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 2969 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 2970 2971 // Make room for NumGPArgRegs and NumFPArgRegs. 2972 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2973 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 2974 2975 FuncInfo->setVarArgsStackOffset( 2976 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2977 CCInfo.getNextStackOffset(), true)); 2978 2979 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2980 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2981 2982 // The fixed integer arguments of a variadic function are stored to the 2983 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2984 // the result of va_next. 2985 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2986 // Get an existing live-in vreg, or add a new one. 2987 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2988 if (!VReg) 2989 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2990 2991 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2992 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2993 MachinePointerInfo(), false, false, 0); 2994 MemOps.push_back(Store); 2995 // Increment the address by four for the next argument to store 2996 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 2997 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2998 } 2999 3000 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3001 // is set. 3002 // The double arguments are stored to the VarArgsFrameIndex 3003 // on the stack. 3004 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3005 // Get an existing live-in vreg, or add a new one. 3006 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3007 if (!VReg) 3008 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3009 3010 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3011 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3012 MachinePointerInfo(), false, false, 0); 3013 MemOps.push_back(Store); 3014 // Increment the address by eight for the next argument to store 3015 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3016 PtrVT); 3017 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3018 } 3019 } 3020 3021 if (!MemOps.empty()) 3022 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3023 3024 return Chain; 3025 } 3026 3027 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3028 // value to MVT::i64 and then truncate to the correct register size. 3029 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3030 EVT ObjectVT, SelectionDAG &DAG, 3031 SDValue ArgVal, 3032 const SDLoc &dl) const { 3033 if (Flags.isSExt()) 3034 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3035 DAG.getValueType(ObjectVT)); 3036 else if (Flags.isZExt()) 3037 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3038 DAG.getValueType(ObjectVT)); 3039 3040 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3041 } 3042 3043 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3044 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3045 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3046 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3047 // TODO: add description of PPC stack frame format, or at least some docs. 3048 // 3049 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3050 bool isLittleEndian = Subtarget.isLittleEndian(); 3051 MachineFunction &MF = DAG.getMachineFunction(); 3052 MachineFrameInfo *MFI = MF.getFrameInfo(); 3053 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3054 3055 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3056 "fastcc not supported on varargs functions"); 3057 3058 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3059 // Potential tail calls could cause overwriting of argument stack slots. 3060 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3061 (CallConv == CallingConv::Fast)); 3062 unsigned PtrByteSize = 8; 3063 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3064 3065 static const MCPhysReg GPR[] = { 3066 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3067 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3068 }; 3069 static const MCPhysReg VR[] = { 3070 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3071 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3072 }; 3073 static const MCPhysReg VSRH[] = { 3074 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 3075 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 3076 }; 3077 3078 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3079 const unsigned Num_FPR_Regs = 13; 3080 const unsigned Num_VR_Regs = array_lengthof(VR); 3081 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3082 3083 // Do a first pass over the arguments to determine whether the ABI 3084 // guarantees that our caller has allocated the parameter save area 3085 // on its stack frame. In the ELFv1 ABI, this is always the case; 3086 // in the ELFv2 ABI, it is true if this is a vararg function or if 3087 // any parameter is located in a stack slot. 3088 3089 bool HasParameterArea = !isELFv2ABI || isVarArg; 3090 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3091 unsigned NumBytes = LinkageSize; 3092 unsigned AvailableFPRs = Num_FPR_Regs; 3093 unsigned AvailableVRs = Num_VR_Regs; 3094 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3095 if (Ins[i].Flags.isNest()) 3096 continue; 3097 3098 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3099 PtrByteSize, LinkageSize, ParamAreaSize, 3100 NumBytes, AvailableFPRs, AvailableVRs, 3101 Subtarget.hasQPX())) 3102 HasParameterArea = true; 3103 } 3104 3105 // Add DAG nodes to load the arguments or copy them out of registers. On 3106 // entry to a function on PPC, the arguments start after the linkage area, 3107 // although the first ones are often in registers. 3108 3109 unsigned ArgOffset = LinkageSize; 3110 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3111 unsigned &QFPR_idx = FPR_idx; 3112 SmallVector<SDValue, 8> MemOps; 3113 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3114 unsigned CurArgIdx = 0; 3115 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3116 SDValue ArgVal; 3117 bool needsLoad = false; 3118 EVT ObjectVT = Ins[ArgNo].VT; 3119 EVT OrigVT = Ins[ArgNo].ArgVT; 3120 unsigned ObjSize = ObjectVT.getStoreSize(); 3121 unsigned ArgSize = ObjSize; 3122 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3123 if (Ins[ArgNo].isOrigArg()) { 3124 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3125 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3126 } 3127 // We re-align the argument offset for each argument, except when using the 3128 // fast calling convention, when we need to make sure we do that only when 3129 // we'll actually use a stack slot. 3130 unsigned CurArgOffset, Align; 3131 auto ComputeArgOffset = [&]() { 3132 /* Respect alignment of argument on the stack. */ 3133 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3134 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3135 CurArgOffset = ArgOffset; 3136 }; 3137 3138 if (CallConv != CallingConv::Fast) { 3139 ComputeArgOffset(); 3140 3141 /* Compute GPR index associated with argument offset. */ 3142 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3143 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3144 } 3145 3146 // FIXME the codegen can be much improved in some cases. 3147 // We do not have to keep everything in memory. 3148 if (Flags.isByVal()) { 3149 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3150 3151 if (CallConv == CallingConv::Fast) 3152 ComputeArgOffset(); 3153 3154 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3155 ObjSize = Flags.getByValSize(); 3156 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3157 // Empty aggregate parameters do not take up registers. Examples: 3158 // struct { } a; 3159 // union { } b; 3160 // int c[0]; 3161 // etc. However, we have to provide a place-holder in InVals, so 3162 // pretend we have an 8-byte item at the current address for that 3163 // purpose. 3164 if (!ObjSize) { 3165 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3166 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3167 InVals.push_back(FIN); 3168 continue; 3169 } 3170 3171 // Create a stack object covering all stack doublewords occupied 3172 // by the argument. If the argument is (fully or partially) on 3173 // the stack, or if the argument is fully in registers but the 3174 // caller has allocated the parameter save anyway, we can refer 3175 // directly to the caller's stack frame. Otherwise, create a 3176 // local copy in our own frame. 3177 int FI; 3178 if (HasParameterArea || 3179 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3180 FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true); 3181 else 3182 FI = MFI->CreateStackObject(ArgSize, Align, false); 3183 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3184 3185 // Handle aggregates smaller than 8 bytes. 3186 if (ObjSize < PtrByteSize) { 3187 // The value of the object is its address, which differs from the 3188 // address of the enclosing doubleword on big-endian systems. 3189 SDValue Arg = FIN; 3190 if (!isLittleEndian) { 3191 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3192 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3193 } 3194 InVals.push_back(Arg); 3195 3196 if (GPR_idx != Num_GPR_Regs) { 3197 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3198 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3199 SDValue Store; 3200 3201 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3202 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3203 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3204 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3205 MachinePointerInfo(&*FuncArg), ObjType, 3206 false, false, 0); 3207 } else { 3208 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3209 // store the whole register as-is to the parameter save area 3210 // slot. 3211 Store = 3212 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3213 MachinePointerInfo(&*FuncArg), false, false, 0); 3214 } 3215 3216 MemOps.push_back(Store); 3217 } 3218 // Whether we copied from a register or not, advance the offset 3219 // into the parameter save area by a full doubleword. 3220 ArgOffset += PtrByteSize; 3221 continue; 3222 } 3223 3224 // The value of the object is its address, which is the address of 3225 // its first stack doubleword. 3226 InVals.push_back(FIN); 3227 3228 // Store whatever pieces of the object are in registers to memory. 3229 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3230 if (GPR_idx == Num_GPR_Regs) 3231 break; 3232 3233 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3234 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3235 SDValue Addr = FIN; 3236 if (j) { 3237 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3238 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3239 } 3240 SDValue Store = 3241 DAG.getStore(Val.getValue(1), dl, Val, Addr, 3242 MachinePointerInfo(&*FuncArg, j), false, false, 0); 3243 MemOps.push_back(Store); 3244 ++GPR_idx; 3245 } 3246 ArgOffset += ArgSize; 3247 continue; 3248 } 3249 3250 switch (ObjectVT.getSimpleVT().SimpleTy) { 3251 default: llvm_unreachable("Unhandled argument type!"); 3252 case MVT::i1: 3253 case MVT::i32: 3254 case MVT::i64: 3255 if (Flags.isNest()) { 3256 // The 'nest' parameter, if any, is passed in R11. 3257 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3258 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3259 3260 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3261 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3262 3263 break; 3264 } 3265 3266 // These can be scalar arguments or elements of an integer array type 3267 // passed directly. Clang may use those instead of "byval" aggregate 3268 // types to avoid forcing arguments to memory unnecessarily. 3269 if (GPR_idx != Num_GPR_Regs) { 3270 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3271 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3272 3273 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3274 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3275 // value to MVT::i64 and then truncate to the correct register size. 3276 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3277 } else { 3278 if (CallConv == CallingConv::Fast) 3279 ComputeArgOffset(); 3280 3281 needsLoad = true; 3282 ArgSize = PtrByteSize; 3283 } 3284 if (CallConv != CallingConv::Fast || needsLoad) 3285 ArgOffset += 8; 3286 break; 3287 3288 case MVT::f32: 3289 case MVT::f64: 3290 // These can be scalar arguments or elements of a float array type 3291 // passed directly. The latter are used to implement ELFv2 homogenous 3292 // float aggregates. 3293 if (FPR_idx != Num_FPR_Regs) { 3294 unsigned VReg; 3295 3296 if (ObjectVT == MVT::f32) 3297 VReg = MF.addLiveIn(FPR[FPR_idx], 3298 Subtarget.hasP8Vector() 3299 ? &PPC::VSSRCRegClass 3300 : &PPC::F4RCRegClass); 3301 else 3302 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3303 ? &PPC::VSFRCRegClass 3304 : &PPC::F8RCRegClass); 3305 3306 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3307 ++FPR_idx; 3308 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3309 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3310 // once we support fp <-> gpr moves. 3311 3312 // This can only ever happen in the presence of f32 array types, 3313 // since otherwise we never run out of FPRs before running out 3314 // of GPRs. 3315 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3316 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3317 3318 if (ObjectVT == MVT::f32) { 3319 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3320 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3321 DAG.getConstant(32, dl, MVT::i32)); 3322 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3323 } 3324 3325 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3326 } else { 3327 if (CallConv == CallingConv::Fast) 3328 ComputeArgOffset(); 3329 3330 needsLoad = true; 3331 } 3332 3333 // When passing an array of floats, the array occupies consecutive 3334 // space in the argument area; only round up to the next doubleword 3335 // at the end of the array. Otherwise, each float takes 8 bytes. 3336 if (CallConv != CallingConv::Fast || needsLoad) { 3337 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3338 ArgOffset += ArgSize; 3339 if (Flags.isInConsecutiveRegsLast()) 3340 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3341 } 3342 break; 3343 case MVT::v4f32: 3344 case MVT::v4i32: 3345 case MVT::v8i16: 3346 case MVT::v16i8: 3347 case MVT::v2f64: 3348 case MVT::v2i64: 3349 case MVT::v1i128: 3350 if (!Subtarget.hasQPX()) { 3351 // These can be scalar arguments or elements of a vector array type 3352 // passed directly. The latter are used to implement ELFv2 homogenous 3353 // vector aggregates. 3354 if (VR_idx != Num_VR_Regs) { 3355 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 3356 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 3357 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3358 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3359 ++VR_idx; 3360 } else { 3361 if (CallConv == CallingConv::Fast) 3362 ComputeArgOffset(); 3363 3364 needsLoad = true; 3365 } 3366 if (CallConv != CallingConv::Fast || needsLoad) 3367 ArgOffset += 16; 3368 break; 3369 } // not QPX 3370 3371 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3372 "Invalid QPX parameter type"); 3373 /* fall through */ 3374 3375 case MVT::v4f64: 3376 case MVT::v4i1: 3377 // QPX vectors are treated like their scalar floating-point subregisters 3378 // (except that they're larger). 3379 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3380 if (QFPR_idx != Num_QFPR_Regs) { 3381 const TargetRegisterClass *RC; 3382 switch (ObjectVT.getSimpleVT().SimpleTy) { 3383 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3384 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3385 default: RC = &PPC::QBRCRegClass; break; 3386 } 3387 3388 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3389 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3390 ++QFPR_idx; 3391 } else { 3392 if (CallConv == CallingConv::Fast) 3393 ComputeArgOffset(); 3394 needsLoad = true; 3395 } 3396 if (CallConv != CallingConv::Fast || needsLoad) 3397 ArgOffset += Sz; 3398 break; 3399 } 3400 3401 // We need to load the argument to a virtual register if we determined 3402 // above that we ran out of physical registers of the appropriate type. 3403 if (needsLoad) { 3404 if (ObjSize < ArgSize && !isLittleEndian) 3405 CurArgOffset += ArgSize - ObjSize; 3406 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3407 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3408 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3409 false, false, false, 0); 3410 } 3411 3412 InVals.push_back(ArgVal); 3413 } 3414 3415 // Area that is at least reserved in the caller of this function. 3416 unsigned MinReservedArea; 3417 if (HasParameterArea) 3418 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3419 else 3420 MinReservedArea = LinkageSize; 3421 3422 // Set the size that is at least reserved in caller of this function. Tail 3423 // call optimized functions' reserved stack space needs to be aligned so that 3424 // taking the difference between two stack areas will result in an aligned 3425 // stack. 3426 MinReservedArea = 3427 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3428 FuncInfo->setMinReservedArea(MinReservedArea); 3429 3430 // If the function takes variable number of arguments, make a frame index for 3431 // the start of the first vararg value... for expansion of llvm.va_start. 3432 if (isVarArg) { 3433 int Depth = ArgOffset; 3434 3435 FuncInfo->setVarArgsFrameIndex( 3436 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 3437 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3438 3439 // If this function is vararg, store any remaining integer argument regs 3440 // to their spots on the stack so that they may be loaded by deferencing the 3441 // result of va_next. 3442 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3443 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3444 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3445 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3446 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3447 MachinePointerInfo(), false, false, 0); 3448 MemOps.push_back(Store); 3449 // Increment the address by four for the next argument to store 3450 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3451 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3452 } 3453 } 3454 3455 if (!MemOps.empty()) 3456 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3457 3458 return Chain; 3459 } 3460 3461 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 3462 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3463 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3464 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3465 // TODO: add description of PPC stack frame format, or at least some docs. 3466 // 3467 MachineFunction &MF = DAG.getMachineFunction(); 3468 MachineFrameInfo *MFI = MF.getFrameInfo(); 3469 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3470 3471 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3472 bool isPPC64 = PtrVT == MVT::i64; 3473 // Potential tail calls could cause overwriting of argument stack slots. 3474 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3475 (CallConv == CallingConv::Fast)); 3476 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3477 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3478 unsigned ArgOffset = LinkageSize; 3479 // Area that is at least reserved in caller of this function. 3480 unsigned MinReservedArea = ArgOffset; 3481 3482 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3483 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3484 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3485 }; 3486 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3487 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3488 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3489 }; 3490 static const MCPhysReg VR[] = { 3491 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3492 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3493 }; 3494 3495 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3496 const unsigned Num_FPR_Regs = 13; 3497 const unsigned Num_VR_Regs = array_lengthof( VR); 3498 3499 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3500 3501 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3502 3503 // In 32-bit non-varargs functions, the stack space for vectors is after the 3504 // stack space for non-vectors. We do not use this space unless we have 3505 // too many vectors to fit in registers, something that only occurs in 3506 // constructed examples:), but we have to walk the arglist to figure 3507 // that out...for the pathological case, compute VecArgOffset as the 3508 // start of the vector parameter area. Computing VecArgOffset is the 3509 // entire point of the following loop. 3510 unsigned VecArgOffset = ArgOffset; 3511 if (!isVarArg && !isPPC64) { 3512 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3513 ++ArgNo) { 3514 EVT ObjectVT = Ins[ArgNo].VT; 3515 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3516 3517 if (Flags.isByVal()) { 3518 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3519 unsigned ObjSize = Flags.getByValSize(); 3520 unsigned ArgSize = 3521 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3522 VecArgOffset += ArgSize; 3523 continue; 3524 } 3525 3526 switch(ObjectVT.getSimpleVT().SimpleTy) { 3527 default: llvm_unreachable("Unhandled argument type!"); 3528 case MVT::i1: 3529 case MVT::i32: 3530 case MVT::f32: 3531 VecArgOffset += 4; 3532 break; 3533 case MVT::i64: // PPC64 3534 case MVT::f64: 3535 // FIXME: We are guaranteed to be !isPPC64 at this point. 3536 // Does MVT::i64 apply? 3537 VecArgOffset += 8; 3538 break; 3539 case MVT::v4f32: 3540 case MVT::v4i32: 3541 case MVT::v8i16: 3542 case MVT::v16i8: 3543 // Nothing to do, we're only looking at Nonvector args here. 3544 break; 3545 } 3546 } 3547 } 3548 // We've found where the vector parameter area in memory is. Skip the 3549 // first 12 parameters; these don't use that memory. 3550 VecArgOffset = ((VecArgOffset+15)/16)*16; 3551 VecArgOffset += 12*16; 3552 3553 // Add DAG nodes to load the arguments or copy them out of registers. On 3554 // entry to a function on PPC, the arguments start after the linkage area, 3555 // although the first ones are often in registers. 3556 3557 SmallVector<SDValue, 8> MemOps; 3558 unsigned nAltivecParamsAtEnd = 0; 3559 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3560 unsigned CurArgIdx = 0; 3561 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3562 SDValue ArgVal; 3563 bool needsLoad = false; 3564 EVT ObjectVT = Ins[ArgNo].VT; 3565 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3566 unsigned ArgSize = ObjSize; 3567 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3568 if (Ins[ArgNo].isOrigArg()) { 3569 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3570 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3571 } 3572 unsigned CurArgOffset = ArgOffset; 3573 3574 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3575 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3576 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3577 if (isVarArg || isPPC64) { 3578 MinReservedArea = ((MinReservedArea+15)/16)*16; 3579 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3580 Flags, 3581 PtrByteSize); 3582 } else nAltivecParamsAtEnd++; 3583 } else 3584 // Calculate min reserved area. 3585 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3586 Flags, 3587 PtrByteSize); 3588 3589 // FIXME the codegen can be much improved in some cases. 3590 // We do not have to keep everything in memory. 3591 if (Flags.isByVal()) { 3592 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3593 3594 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3595 ObjSize = Flags.getByValSize(); 3596 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3597 // Objects of size 1 and 2 are right justified, everything else is 3598 // left justified. This means the memory address is adjusted forwards. 3599 if (ObjSize==1 || ObjSize==2) { 3600 CurArgOffset = CurArgOffset + (4 - ObjSize); 3601 } 3602 // The value of the object is its address. 3603 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true); 3604 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3605 InVals.push_back(FIN); 3606 if (ObjSize==1 || ObjSize==2) { 3607 if (GPR_idx != Num_GPR_Regs) { 3608 unsigned VReg; 3609 if (isPPC64) 3610 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3611 else 3612 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3613 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3614 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3615 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3616 MachinePointerInfo(&*FuncArg), 3617 ObjType, false, false, 0); 3618 MemOps.push_back(Store); 3619 ++GPR_idx; 3620 } 3621 3622 ArgOffset += PtrByteSize; 3623 3624 continue; 3625 } 3626 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3627 // Store whatever pieces of the object are in registers 3628 // to memory. ArgOffset will be the address of the beginning 3629 // of the object. 3630 if (GPR_idx != Num_GPR_Regs) { 3631 unsigned VReg; 3632 if (isPPC64) 3633 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3634 else 3635 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3636 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3637 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3638 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3639 SDValue Store = 3640 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3641 MachinePointerInfo(&*FuncArg, j), false, false, 0); 3642 MemOps.push_back(Store); 3643 ++GPR_idx; 3644 ArgOffset += PtrByteSize; 3645 } else { 3646 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3647 break; 3648 } 3649 } 3650 continue; 3651 } 3652 3653 switch (ObjectVT.getSimpleVT().SimpleTy) { 3654 default: llvm_unreachable("Unhandled argument type!"); 3655 case MVT::i1: 3656 case MVT::i32: 3657 if (!isPPC64) { 3658 if (GPR_idx != Num_GPR_Regs) { 3659 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3660 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3661 3662 if (ObjectVT == MVT::i1) 3663 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3664 3665 ++GPR_idx; 3666 } else { 3667 needsLoad = true; 3668 ArgSize = PtrByteSize; 3669 } 3670 // All int arguments reserve stack space in the Darwin ABI. 3671 ArgOffset += PtrByteSize; 3672 break; 3673 } 3674 // FALLTHROUGH 3675 case MVT::i64: // PPC64 3676 if (GPR_idx != Num_GPR_Regs) { 3677 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3678 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3679 3680 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3681 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3682 // value to MVT::i64 and then truncate to the correct register size. 3683 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3684 3685 ++GPR_idx; 3686 } else { 3687 needsLoad = true; 3688 ArgSize = PtrByteSize; 3689 } 3690 // All int arguments reserve stack space in the Darwin ABI. 3691 ArgOffset += 8; 3692 break; 3693 3694 case MVT::f32: 3695 case MVT::f64: 3696 // Every 4 bytes of argument space consumes one of the GPRs available for 3697 // argument passing. 3698 if (GPR_idx != Num_GPR_Regs) { 3699 ++GPR_idx; 3700 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3701 ++GPR_idx; 3702 } 3703 if (FPR_idx != Num_FPR_Regs) { 3704 unsigned VReg; 3705 3706 if (ObjectVT == MVT::f32) 3707 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3708 else 3709 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3710 3711 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3712 ++FPR_idx; 3713 } else { 3714 needsLoad = true; 3715 } 3716 3717 // All FP arguments reserve stack space in the Darwin ABI. 3718 ArgOffset += isPPC64 ? 8 : ObjSize; 3719 break; 3720 case MVT::v4f32: 3721 case MVT::v4i32: 3722 case MVT::v8i16: 3723 case MVT::v16i8: 3724 // Note that vector arguments in registers don't reserve stack space, 3725 // except in varargs functions. 3726 if (VR_idx != Num_VR_Regs) { 3727 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3728 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3729 if (isVarArg) { 3730 while ((ArgOffset % 16) != 0) { 3731 ArgOffset += PtrByteSize; 3732 if (GPR_idx != Num_GPR_Regs) 3733 GPR_idx++; 3734 } 3735 ArgOffset += 16; 3736 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3737 } 3738 ++VR_idx; 3739 } else { 3740 if (!isVarArg && !isPPC64) { 3741 // Vectors go after all the nonvectors. 3742 CurArgOffset = VecArgOffset; 3743 VecArgOffset += 16; 3744 } else { 3745 // Vectors are aligned. 3746 ArgOffset = ((ArgOffset+15)/16)*16; 3747 CurArgOffset = ArgOffset; 3748 ArgOffset += 16; 3749 } 3750 needsLoad = true; 3751 } 3752 break; 3753 } 3754 3755 // We need to load the argument to a virtual register if we determined above 3756 // that we ran out of physical registers of the appropriate type. 3757 if (needsLoad) { 3758 int FI = MFI->CreateFixedObject(ObjSize, 3759 CurArgOffset + (ArgSize - ObjSize), 3760 isImmutable); 3761 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3762 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3763 false, false, false, 0); 3764 } 3765 3766 InVals.push_back(ArgVal); 3767 } 3768 3769 // Allow for Altivec parameters at the end, if needed. 3770 if (nAltivecParamsAtEnd) { 3771 MinReservedArea = ((MinReservedArea+15)/16)*16; 3772 MinReservedArea += 16*nAltivecParamsAtEnd; 3773 } 3774 3775 // Area that is at least reserved in the caller of this function. 3776 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3777 3778 // Set the size that is at least reserved in caller of this function. Tail 3779 // call optimized functions' reserved stack space needs to be aligned so that 3780 // taking the difference between two stack areas will result in an aligned 3781 // stack. 3782 MinReservedArea = 3783 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3784 FuncInfo->setMinReservedArea(MinReservedArea); 3785 3786 // If the function takes variable number of arguments, make a frame index for 3787 // the start of the first vararg value... for expansion of llvm.va_start. 3788 if (isVarArg) { 3789 int Depth = ArgOffset; 3790 3791 FuncInfo->setVarArgsFrameIndex( 3792 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3793 Depth, true)); 3794 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3795 3796 // If this function is vararg, store any remaining integer argument regs 3797 // to their spots on the stack so that they may be loaded by deferencing the 3798 // result of va_next. 3799 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3800 unsigned VReg; 3801 3802 if (isPPC64) 3803 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3804 else 3805 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3806 3807 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3808 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3809 MachinePointerInfo(), false, false, 0); 3810 MemOps.push_back(Store); 3811 // Increment the address by four for the next argument to store 3812 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3813 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3814 } 3815 } 3816 3817 if (!MemOps.empty()) 3818 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3819 3820 return Chain; 3821 } 3822 3823 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3824 /// adjusted to accommodate the arguments for the tailcall. 3825 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3826 unsigned ParamSize) { 3827 3828 if (!isTailCall) return 0; 3829 3830 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3831 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3832 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3833 // Remember only if the new adjustement is bigger. 3834 if (SPDiff < FI->getTailCallSPDelta()) 3835 FI->setTailCallSPDelta(SPDiff); 3836 3837 return SPDiff; 3838 } 3839 3840 static bool isFunctionGlobalAddress(SDValue Callee); 3841 3842 static bool 3843 resideInSameModule(SDValue Callee, Reloc::Model RelMod) { 3844 // If !G, Callee can be an external symbol. 3845 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 3846 if (!G) return false; 3847 3848 const GlobalValue *GV = G->getGlobal(); 3849 3850 if (GV->isDeclaration()) return false; 3851 3852 switch(GV->getLinkage()) { 3853 default: llvm_unreachable("unknow linkage type"); 3854 case GlobalValue::AvailableExternallyLinkage: 3855 case GlobalValue::ExternalWeakLinkage: 3856 return false; 3857 3858 // Callee with weak linkage is allowed if it has hidden or protected 3859 // visibility 3860 case GlobalValue::LinkOnceAnyLinkage: 3861 case GlobalValue::LinkOnceODRLinkage: // e.g. c++ inline functions 3862 case GlobalValue::WeakAnyLinkage: 3863 case GlobalValue::WeakODRLinkage: // e.g. c++ template instantiation 3864 if (GV->hasDefaultVisibility()) 3865 return false; 3866 3867 case GlobalValue::ExternalLinkage: 3868 case GlobalValue::InternalLinkage: 3869 case GlobalValue::PrivateLinkage: 3870 break; 3871 } 3872 3873 // With '-fPIC', calling default visiblity function need insert 'nop' after 3874 // function call, no matter that function resides in same module or not, so 3875 // we treat it as in different module. 3876 if (RelMod == Reloc::PIC_ && GV->hasDefaultVisibility()) 3877 return false; 3878 3879 return true; 3880 } 3881 3882 static bool 3883 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 3884 const SmallVectorImpl<ISD::OutputArg> &Outs) { 3885 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 3886 3887 const unsigned PtrByteSize = 8; 3888 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3889 3890 static const MCPhysReg GPR[] = { 3891 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3892 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3893 }; 3894 static const MCPhysReg VR[] = { 3895 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3896 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3897 }; 3898 3899 const unsigned NumGPRs = array_lengthof(GPR); 3900 const unsigned NumFPRs = 13; 3901 const unsigned NumVRs = array_lengthof(VR); 3902 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 3903 3904 unsigned NumBytes = LinkageSize; 3905 unsigned AvailableFPRs = NumFPRs; 3906 unsigned AvailableVRs = NumVRs; 3907 3908 for (const ISD::OutputArg& Param : Outs) { 3909 if (Param.Flags.isNest()) continue; 3910 3911 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 3912 PtrByteSize, LinkageSize, ParamAreaSize, 3913 NumBytes, AvailableFPRs, AvailableVRs, 3914 Subtarget.hasQPX())) 3915 return true; 3916 } 3917 return false; 3918 } 3919 3920 static bool 3921 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS) { 3922 if (CS->arg_size() != CallerFn->getArgumentList().size()) 3923 return false; 3924 3925 ImmutableCallSite::arg_iterator CalleeArgIter = CS->arg_begin(); 3926 ImmutableCallSite::arg_iterator CalleeArgEnd = CS->arg_end(); 3927 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 3928 3929 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 3930 const Value* CalleeArg = *CalleeArgIter; 3931 const Value* CallerArg = &(*CallerArgIter); 3932 if (CalleeArg == CallerArg) 3933 continue; 3934 3935 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 3936 // tail call @callee([4 x i64] undef, [4 x i64] %b) 3937 // } 3938 // 1st argument of callee is undef and has the same type as caller. 3939 if (CalleeArg->getType() == CallerArg->getType() && 3940 isa<UndefValue>(CalleeArg)) 3941 continue; 3942 3943 return false; 3944 } 3945 3946 return true; 3947 } 3948 3949 bool 3950 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 3951 SDValue Callee, 3952 CallingConv::ID CalleeCC, 3953 ImmutableCallSite *CS, 3954 bool isVarArg, 3955 const SmallVectorImpl<ISD::OutputArg> &Outs, 3956 const SmallVectorImpl<ISD::InputArg> &Ins, 3957 SelectionDAG& DAG) const { 3958 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 3959 3960 if (DisableSCO && !TailCallOpt) return false; 3961 3962 // Variadic argument functions are not supported. 3963 if (isVarArg) return false; 3964 3965 MachineFunction &MF = DAG.getMachineFunction(); 3966 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3967 3968 // Tail or Sibling call optimization (TCO/SCO) needs callee and caller has 3969 // the same calling convention 3970 if (CallerCC != CalleeCC) return false; 3971 3972 // SCO support C calling convention 3973 if (CalleeCC != CallingConv::Fast && CalleeCC != CallingConv::C) 3974 return false; 3975 3976 // Functions containing by val parameters are not supported. 3977 if (std::any_of(Ins.begin(), Ins.end(), 3978 [](const ISD::InputArg& IA) { return IA.Flags.isByVal(); })) 3979 return false; 3980 3981 // No TCO/SCO on indirect call because Caller have to restore its TOC 3982 if (!isFunctionGlobalAddress(Callee) && 3983 !isa<ExternalSymbolSDNode>(Callee)) 3984 return false; 3985 3986 // Check if Callee resides in the same module, because for now, PPC64 SVR4 ABI 3987 // (ELFv1/ELFv2) doesn't allow tail calls to a symbol resides in another 3988 // module. 3989 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 3990 if (!resideInSameModule(Callee, getTargetMachine().getRelocationModel())) 3991 return false; 3992 3993 // TCO allows altering callee ABI, so we don't have to check further. 3994 if (CalleeCC == CallingConv::Fast && TailCallOpt) 3995 return true; 3996 3997 if (DisableSCO) return false; 3998 3999 // If callee use the same argument list that caller is using, then we can 4000 // apply SCO on this case. If it is not, then we need to check if callee needs 4001 // stack for passing arguments. 4002 if (!hasSameArgumentList(MF.getFunction(), CS) && 4003 needStackSlotPassParameters(Subtarget, Outs)) { 4004 return false; 4005 } 4006 4007 return true; 4008 } 4009 4010 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4011 /// for tail call optimization. Targets which want to do tail call 4012 /// optimization should implement this function. 4013 bool 4014 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4015 CallingConv::ID CalleeCC, 4016 bool isVarArg, 4017 const SmallVectorImpl<ISD::InputArg> &Ins, 4018 SelectionDAG& DAG) const { 4019 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4020 return false; 4021 4022 // Variable argument functions are not supported. 4023 if (isVarArg) 4024 return false; 4025 4026 MachineFunction &MF = DAG.getMachineFunction(); 4027 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4028 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4029 // Functions containing by val parameters are not supported. 4030 for (unsigned i = 0; i != Ins.size(); i++) { 4031 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4032 if (Flags.isByVal()) return false; 4033 } 4034 4035 // Non-PIC/GOT tail calls are supported. 4036 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4037 return true; 4038 4039 // At the moment we can only do local tail calls (in same module, hidden 4040 // or protected) if we are generating PIC. 4041 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4042 return G->getGlobal()->hasHiddenVisibility() 4043 || G->getGlobal()->hasProtectedVisibility(); 4044 } 4045 4046 return false; 4047 } 4048 4049 /// isCallCompatibleAddress - Return the immediate to use if the specified 4050 /// 32-bit value is representable in the immediate field of a BxA instruction. 4051 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4052 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4053 if (!C) return nullptr; 4054 4055 int Addr = C->getZExtValue(); 4056 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4057 SignExtend32<26>(Addr) != Addr) 4058 return nullptr; // Top 6 bits have to be sext of immediate. 4059 4060 return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op), 4061 DAG.getTargetLoweringInfo().getPointerTy( 4062 DAG.getDataLayout())).getNode(); 4063 } 4064 4065 namespace { 4066 4067 struct TailCallArgumentInfo { 4068 SDValue Arg; 4069 SDValue FrameIdxOp; 4070 int FrameIdx; 4071 4072 TailCallArgumentInfo() : FrameIdx(0) {} 4073 }; 4074 } 4075 4076 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4077 static void StoreTailCallArgumentsToStackSlot( 4078 SelectionDAG &DAG, SDValue Chain, 4079 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4080 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4081 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4082 SDValue Arg = TailCallArgs[i].Arg; 4083 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4084 int FI = TailCallArgs[i].FrameIdx; 4085 // Store relative to framepointer. 4086 MemOpChains.push_back(DAG.getStore( 4087 Chain, dl, Arg, FIN, 4088 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 4089 false, 0)); 4090 } 4091 } 4092 4093 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4094 /// the appropriate stack slot for the tail call optimized function call. 4095 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 4096 MachineFunction &MF, SDValue Chain, 4097 SDValue OldRetAddr, SDValue OldFP, 4098 int SPDiff, bool isPPC64, 4099 bool isDarwinABI, 4100 const SDLoc &dl) { 4101 if (SPDiff) { 4102 // Calculate the new stack slot for the return address. 4103 int SlotSize = isPPC64 ? 8 : 4; 4104 const PPCFrameLowering *FL = 4105 MF.getSubtarget<PPCSubtarget>().getFrameLowering(); 4106 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4107 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 4108 NewRetAddrLoc, true); 4109 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4110 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4111 Chain = DAG.getStore( 4112 Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4113 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewRetAddr), 4114 false, false, 0); 4115 4116 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4117 // slot as the FP is never overwritten. 4118 if (isDarwinABI) { 4119 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4120 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 4121 true); 4122 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4123 Chain = DAG.getStore( 4124 Chain, dl, OldFP, NewFramePtrIdx, 4125 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFPIdx), 4126 false, false, 0); 4127 } 4128 } 4129 return Chain; 4130 } 4131 4132 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4133 /// the position of the argument. 4134 static void 4135 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4136 SDValue Arg, int SPDiff, unsigned ArgOffset, 4137 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4138 int Offset = ArgOffset + SPDiff; 4139 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 4140 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 4141 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4142 SDValue FIN = DAG.getFrameIndex(FI, VT); 4143 TailCallArgumentInfo Info; 4144 Info.Arg = Arg; 4145 Info.FrameIdxOp = FIN; 4146 Info.FrameIdx = FI; 4147 TailCallArguments.push_back(Info); 4148 } 4149 4150 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4151 /// stack slot. Returns the chain as result and the loaded frame pointers in 4152 /// LROpOut/FPOpout. Used when tail calling. 4153 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4154 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4155 SDValue &FPOpOut, bool isDarwinABI, const SDLoc &dl) const { 4156 if (SPDiff) { 4157 // Load the LR and FP stack slot for later adjusting. 4158 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4159 LROpOut = getReturnAddrFrameIndex(DAG); 4160 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 4161 false, false, false, 0); 4162 Chain = SDValue(LROpOut.getNode(), 1); 4163 4164 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4165 // slot as the FP is never overwritten. 4166 if (isDarwinABI) { 4167 FPOpOut = getFramePointerFrameIndex(DAG); 4168 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 4169 false, false, false, 0); 4170 Chain = SDValue(FPOpOut.getNode(), 1); 4171 } 4172 } 4173 return Chain; 4174 } 4175 4176 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4177 /// by "Src" to address "Dst" of size "Size". Alignment information is 4178 /// specified by the specific parameter attribute. The copy will be passed as 4179 /// a byval function parameter. 4180 /// Sometimes what we are copying is the end of a larger object, the part that 4181 /// does not fit in registers. 4182 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4183 SDValue Chain, ISD::ArgFlagsTy Flags, 4184 SelectionDAG &DAG, const SDLoc &dl) { 4185 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4186 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4187 false, false, false, MachinePointerInfo(), 4188 MachinePointerInfo()); 4189 } 4190 4191 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4192 /// tail calls. 4193 static void LowerMemOpCallTo( 4194 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4195 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4196 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4197 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4198 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4199 if (!isTailCall) { 4200 if (isVector) { 4201 SDValue StackPtr; 4202 if (isPPC64) 4203 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4204 else 4205 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4206 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4207 DAG.getConstant(ArgOffset, dl, PtrVT)); 4208 } 4209 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4210 MachinePointerInfo(), false, false, 0)); 4211 // Calculate and remember argument location. 4212 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4213 TailCallArguments); 4214 } 4215 4216 static void 4217 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4218 const SDLoc &dl, bool isPPC64, int SPDiff, unsigned NumBytes, 4219 SDValue LROp, SDValue FPOp, bool isDarwinABI, 4220 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4221 MachineFunction &MF = DAG.getMachineFunction(); 4222 4223 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4224 // might overwrite each other in case of tail call optimization. 4225 SmallVector<SDValue, 8> MemOpChains2; 4226 // Do not flag preceding copytoreg stuff together with the following stuff. 4227 InFlag = SDValue(); 4228 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4229 MemOpChains2, dl); 4230 if (!MemOpChains2.empty()) 4231 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4232 4233 // Store the return address to the appropriate stack slot. 4234 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 4235 isPPC64, isDarwinABI, dl); 4236 4237 // Emit callseq_end just before tailcall node. 4238 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4239 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4240 InFlag = Chain.getValue(1); 4241 } 4242 4243 // Is this global address that of a function that can be called by name? (as 4244 // opposed to something that must hold a descriptor for an indirect call). 4245 static bool isFunctionGlobalAddress(SDValue Callee) { 4246 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4247 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4248 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4249 return false; 4250 4251 return G->getGlobal()->getValueType()->isFunctionTy(); 4252 } 4253 4254 return false; 4255 } 4256 4257 static unsigned 4258 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4259 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4260 bool IsPatchPoint, bool hasNest, 4261 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4262 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4263 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4264 4265 bool isPPC64 = Subtarget.isPPC64(); 4266 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4267 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4268 4269 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4270 NodeTys.push_back(MVT::Other); // Returns a chain 4271 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4272 4273 unsigned CallOpc = PPCISD::CALL; 4274 4275 bool needIndirectCall = true; 4276 if (!isSVR4ABI || !isPPC64) 4277 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4278 // If this is an absolute destination address, use the munged value. 4279 Callee = SDValue(Dest, 0); 4280 needIndirectCall = false; 4281 } 4282 4283 if (isFunctionGlobalAddress(Callee)) { 4284 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4285 // A call to a TLS address is actually an indirect call to a 4286 // thread-specific pointer. 4287 unsigned OpFlags = 0; 4288 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4289 (Subtarget.getTargetTriple().isMacOSX() && 4290 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 4291 !G->getGlobal()->isStrongDefinitionForLinker()) || 4292 (Subtarget.isTargetELF() && !isPPC64 && 4293 !G->getGlobal()->hasLocalLinkage() && 4294 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4295 // PC-relative references to external symbols should go through $stub, 4296 // unless we're building with the leopard linker or later, which 4297 // automatically synthesizes these stubs. 4298 OpFlags = PPCII::MO_PLT_OR_STUB; 4299 } 4300 4301 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4302 // every direct call is) turn it into a TargetGlobalAddress / 4303 // TargetExternalSymbol node so that legalize doesn't hack it. 4304 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4305 Callee.getValueType(), 0, OpFlags); 4306 needIndirectCall = false; 4307 } 4308 4309 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4310 unsigned char OpFlags = 0; 4311 4312 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4313 (Subtarget.getTargetTriple().isMacOSX() && 4314 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) || 4315 (Subtarget.isTargetELF() && !isPPC64 && 4316 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4317 // PC-relative references to external symbols should go through $stub, 4318 // unless we're building with the leopard linker or later, which 4319 // automatically synthesizes these stubs. 4320 OpFlags = PPCII::MO_PLT_OR_STUB; 4321 } 4322 4323 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4324 OpFlags); 4325 needIndirectCall = false; 4326 } 4327 4328 if (IsPatchPoint) { 4329 // We'll form an invalid direct call when lowering a patchpoint; the full 4330 // sequence for an indirect call is complicated, and many of the 4331 // instructions introduced might have side effects (and, thus, can't be 4332 // removed later). The call itself will be removed as soon as the 4333 // argument/return lowering is complete, so the fact that it has the wrong 4334 // kind of operands should not really matter. 4335 needIndirectCall = false; 4336 } 4337 4338 if (needIndirectCall) { 4339 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4340 // to do the call, we can't use PPCISD::CALL. 4341 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4342 4343 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4344 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4345 // entry point, but to the function descriptor (the function entry point 4346 // address is part of the function descriptor though). 4347 // The function descriptor is a three doubleword structure with the 4348 // following fields: function entry point, TOC base address and 4349 // environment pointer. 4350 // Thus for a call through a function pointer, the following actions need 4351 // to be performed: 4352 // 1. Save the TOC of the caller in the TOC save area of its stack 4353 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4354 // 2. Load the address of the function entry point from the function 4355 // descriptor. 4356 // 3. Load the TOC of the callee from the function descriptor into r2. 4357 // 4. Load the environment pointer from the function descriptor into 4358 // r11. 4359 // 5. Branch to the function entry point address. 4360 // 6. On return of the callee, the TOC of the caller needs to be 4361 // restored (this is done in FinishCall()). 4362 // 4363 // The loads are scheduled at the beginning of the call sequence, and the 4364 // register copies are flagged together to ensure that no other 4365 // operations can be scheduled in between. E.g. without flagging the 4366 // copies together, a TOC access in the caller could be scheduled between 4367 // the assignment of the callee TOC and the branch to the callee, which 4368 // results in the TOC access going through the TOC of the callee instead 4369 // of going through the TOC of the caller, which leads to incorrect code. 4370 4371 // Load the address of the function entry point from the function 4372 // descriptor. 4373 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4374 if (LDChain.getValueType() == MVT::Glue) 4375 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4376 4377 bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors(); 4378 4379 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4380 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4381 false, false, LoadsInv, 8); 4382 4383 // Load environment pointer into r11. 4384 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4385 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4386 SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, 4387 MPI.getWithOffset(16), false, false, 4388 LoadsInv, 8); 4389 4390 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4391 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4392 SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, 4393 MPI.getWithOffset(8), false, false, 4394 LoadsInv, 8); 4395 4396 setUsesTOCBasePtr(DAG); 4397 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4398 InFlag); 4399 Chain = TOCVal.getValue(0); 4400 InFlag = TOCVal.getValue(1); 4401 4402 // If the function call has an explicit 'nest' parameter, it takes the 4403 // place of the environment pointer. 4404 if (!hasNest) { 4405 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4406 InFlag); 4407 4408 Chain = EnvVal.getValue(0); 4409 InFlag = EnvVal.getValue(1); 4410 } 4411 4412 MTCTROps[0] = Chain; 4413 MTCTROps[1] = LoadFuncPtr; 4414 MTCTROps[2] = InFlag; 4415 } 4416 4417 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4418 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4419 InFlag = Chain.getValue(1); 4420 4421 NodeTys.clear(); 4422 NodeTys.push_back(MVT::Other); 4423 NodeTys.push_back(MVT::Glue); 4424 Ops.push_back(Chain); 4425 CallOpc = PPCISD::BCTRL; 4426 Callee.setNode(nullptr); 4427 // Add use of X11 (holding environment pointer) 4428 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4429 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4430 // Add CTR register as callee so a bctr can be emitted later. 4431 if (isTailCall) 4432 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4433 } 4434 4435 // If this is a direct call, pass the chain and the callee. 4436 if (Callee.getNode()) { 4437 Ops.push_back(Chain); 4438 Ops.push_back(Callee); 4439 } 4440 // If this is a tail call add stack pointer delta. 4441 if (isTailCall) 4442 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4443 4444 // Add argument registers to the end of the list so that they are known live 4445 // into the call. 4446 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4447 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4448 RegsToPass[i].second.getValueType())); 4449 4450 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4451 // into the call. 4452 if (isSVR4ABI && isPPC64 && !IsPatchPoint) { 4453 setUsesTOCBasePtr(DAG); 4454 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4455 } 4456 4457 return CallOpc; 4458 } 4459 4460 static 4461 bool isLocalCall(const SDValue &Callee) 4462 { 4463 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4464 return G->getGlobal()->isStrongDefinitionForLinker(); 4465 return false; 4466 } 4467 4468 SDValue PPCTargetLowering::LowerCallResult( 4469 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4470 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4471 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4472 4473 SmallVector<CCValAssign, 16> RVLocs; 4474 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4475 *DAG.getContext()); 4476 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4477 4478 // Copy all of the result registers out of their specified physreg. 4479 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4480 CCValAssign &VA = RVLocs[i]; 4481 assert(VA.isRegLoc() && "Can only return in registers!"); 4482 4483 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4484 VA.getLocReg(), VA.getLocVT(), InFlag); 4485 Chain = Val.getValue(1); 4486 InFlag = Val.getValue(2); 4487 4488 switch (VA.getLocInfo()) { 4489 default: llvm_unreachable("Unknown loc info!"); 4490 case CCValAssign::Full: break; 4491 case CCValAssign::AExt: 4492 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4493 break; 4494 case CCValAssign::ZExt: 4495 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4496 DAG.getValueType(VA.getValVT())); 4497 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4498 break; 4499 case CCValAssign::SExt: 4500 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4501 DAG.getValueType(VA.getValVT())); 4502 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4503 break; 4504 } 4505 4506 InVals.push_back(Val); 4507 } 4508 4509 return Chain; 4510 } 4511 4512 SDValue PPCTargetLowering::FinishCall( 4513 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 4514 bool IsPatchPoint, bool hasNest, SelectionDAG &DAG, 4515 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 4516 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 4517 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 4518 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite *CS) const { 4519 4520 std::vector<EVT> NodeTys; 4521 SmallVector<SDValue, 8> Ops; 4522 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4523 SPDiff, isTailCall, IsPatchPoint, hasNest, 4524 RegsToPass, Ops, NodeTys, CS, Subtarget); 4525 4526 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4527 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4528 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4529 4530 // When performing tail call optimization the callee pops its arguments off 4531 // the stack. Account for this here so these bytes can be pushed back on in 4532 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4533 int BytesCalleePops = 4534 (CallConv == CallingConv::Fast && 4535 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4536 4537 // Add a register mask operand representing the call-preserved registers. 4538 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4539 const uint32_t *Mask = 4540 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4541 assert(Mask && "Missing call preserved mask for calling convention"); 4542 Ops.push_back(DAG.getRegisterMask(Mask)); 4543 4544 if (InFlag.getNode()) 4545 Ops.push_back(InFlag); 4546 4547 // Emit tail call. 4548 if (isTailCall) { 4549 assert(((Callee.getOpcode() == ISD::Register && 4550 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4551 Callee.getOpcode() == ISD::TargetExternalSymbol || 4552 Callee.getOpcode() == ISD::TargetGlobalAddress || 4553 isa<ConstantSDNode>(Callee)) && 4554 "Expecting an global address, external symbol, absolute value or register"); 4555 4556 DAG.getMachineFunction().getFrameInfo()->setHasTailCall(); 4557 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4558 } 4559 4560 // Add a NOP immediately after the branch instruction when using the 64-bit 4561 // SVR4 ABI. At link time, if caller and callee are in a different module and 4562 // thus have a different TOC, the call will be replaced with a call to a stub 4563 // function which saves the current TOC, loads the TOC of the callee and 4564 // branches to the callee. The NOP will be replaced with a load instruction 4565 // which restores the TOC of the caller from the TOC save slot of the current 4566 // stack frame. If caller and callee belong to the same module (and have the 4567 // same TOC), the NOP will remain unchanged. 4568 4569 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4570 !IsPatchPoint) { 4571 if (CallOpc == PPCISD::BCTRL) { 4572 // This is a call through a function pointer. 4573 // Restore the caller TOC from the save area into R2. 4574 // See PrepareCall() for more information about calls through function 4575 // pointers in the 64-bit SVR4 ABI. 4576 // We are using a target-specific load with r2 hard coded, because the 4577 // result of a target-independent load would never go directly into r2, 4578 // since r2 is a reserved register (which prevents the register allocator 4579 // from allocating it), resulting in an additional register being 4580 // allocated and an unnecessary move instruction being generated. 4581 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4582 4583 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4584 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4585 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4586 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4587 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4588 4589 // The address needs to go after the chain input but before the flag (or 4590 // any other variadic arguments). 4591 Ops.insert(std::next(Ops.begin()), AddTOC); 4592 } else if ((CallOpc == PPCISD::CALL) && 4593 (!isLocalCall(Callee) || 4594 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) 4595 // Otherwise insert NOP for non-local calls. 4596 CallOpc = PPCISD::CALL_NOP; 4597 } 4598 4599 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4600 InFlag = Chain.getValue(1); 4601 4602 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4603 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4604 InFlag, dl); 4605 if (!Ins.empty()) 4606 InFlag = Chain.getValue(1); 4607 4608 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4609 Ins, dl, DAG, InVals); 4610 } 4611 4612 SDValue 4613 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4614 SmallVectorImpl<SDValue> &InVals) const { 4615 SelectionDAG &DAG = CLI.DAG; 4616 SDLoc &dl = CLI.DL; 4617 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4618 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4619 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4620 SDValue Chain = CLI.Chain; 4621 SDValue Callee = CLI.Callee; 4622 bool &isTailCall = CLI.IsTailCall; 4623 CallingConv::ID CallConv = CLI.CallConv; 4624 bool isVarArg = CLI.IsVarArg; 4625 bool IsPatchPoint = CLI.IsPatchPoint; 4626 ImmutableCallSite *CS = CLI.CS; 4627 4628 if (isTailCall) { 4629 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 4630 isTailCall = 4631 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 4632 isVarArg, Outs, Ins, DAG); 4633 else 4634 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4635 Ins, DAG); 4636 if (isTailCall) { 4637 ++NumTailCalls; 4638 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4639 ++NumSiblingCalls; 4640 4641 assert(isa<GlobalAddressSDNode>(Callee) && 4642 "Callee should be an llvm::Function object."); 4643 DEBUG( 4644 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 4645 const unsigned Width = 80 - strlen("TCO caller: ") 4646 - strlen(", callee linkage: 0, 0"); 4647 dbgs() << "TCO caller: " 4648 << left_justify(DAG.getMachineFunction().getName(), Width) 4649 << ", callee linkage: " 4650 << GV->getVisibility() << ", " << GV->getLinkage() << "\n" 4651 ); 4652 } 4653 } 4654 4655 if (!isTailCall && CS && CS->isMustTailCall()) 4656 report_fatal_error("failed to perform tail call elimination on a call " 4657 "site marked musttail"); 4658 4659 if (Subtarget.isSVR4ABI()) { 4660 if (Subtarget.isPPC64()) 4661 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4662 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4663 dl, DAG, InVals, CS); 4664 else 4665 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4666 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4667 dl, DAG, InVals, CS); 4668 } 4669 4670 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4671 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4672 dl, DAG, InVals, CS); 4673 } 4674 4675 SDValue PPCTargetLowering::LowerCall_32SVR4( 4676 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 4677 bool isTailCall, bool IsPatchPoint, 4678 const SmallVectorImpl<ISD::OutputArg> &Outs, 4679 const SmallVectorImpl<SDValue> &OutVals, 4680 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4681 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 4682 ImmutableCallSite *CS) const { 4683 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4684 // of the 32-bit SVR4 ABI stack frame layout. 4685 4686 assert((CallConv == CallingConv::C || 4687 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4688 4689 unsigned PtrByteSize = 4; 4690 4691 MachineFunction &MF = DAG.getMachineFunction(); 4692 4693 // Mark this function as potentially containing a function that contains a 4694 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4695 // and restoring the callers stack pointer in this functions epilog. This is 4696 // done because by tail calling the called function might overwrite the value 4697 // in this function's (MF) stack pointer stack slot 0(SP). 4698 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4699 CallConv == CallingConv::Fast) 4700 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4701 4702 // Count how many bytes are to be pushed on the stack, including the linkage 4703 // area, parameter list area and the part of the local variable space which 4704 // contains copies of aggregates which are passed by value. 4705 4706 // Assign locations to all of the outgoing arguments. 4707 SmallVector<CCValAssign, 16> ArgLocs; 4708 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4709 *DAG.getContext()); 4710 4711 // Reserve space for the linkage area on the stack. 4712 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4713 PtrByteSize); 4714 if (Subtarget.useSoftFloat()) 4715 CCInfo.PreAnalyzeCallOperands(Outs); 4716 4717 if (isVarArg) { 4718 // Handle fixed and variable vector arguments differently. 4719 // Fixed vector arguments go into registers as long as registers are 4720 // available. Variable vector arguments always go into memory. 4721 unsigned NumArgs = Outs.size(); 4722 4723 for (unsigned i = 0; i != NumArgs; ++i) { 4724 MVT ArgVT = Outs[i].VT; 4725 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4726 bool Result; 4727 4728 if (Outs[i].IsFixed) { 4729 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4730 CCInfo); 4731 } else { 4732 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4733 ArgFlags, CCInfo); 4734 } 4735 4736 if (Result) { 4737 #ifndef NDEBUG 4738 errs() << "Call operand #" << i << " has unhandled type " 4739 << EVT(ArgVT).getEVTString() << "\n"; 4740 #endif 4741 llvm_unreachable(nullptr); 4742 } 4743 } 4744 } else { 4745 // All arguments are treated the same. 4746 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4747 } 4748 CCInfo.clearWasPPCF128(); 4749 4750 // Assign locations to all of the outgoing aggregate by value arguments. 4751 SmallVector<CCValAssign, 16> ByValArgLocs; 4752 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4753 ByValArgLocs, *DAG.getContext()); 4754 4755 // Reserve stack space for the allocations in CCInfo. 4756 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4757 4758 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4759 4760 // Size of the linkage area, parameter list area and the part of the local 4761 // space variable where copies of aggregates which are passed by value are 4762 // stored. 4763 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4764 4765 // Calculate by how many bytes the stack has to be adjusted in case of tail 4766 // call optimization. 4767 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4768 4769 // Adjust the stack pointer for the new arguments... 4770 // These operations are automatically eliminated by the prolog/epilog pass 4771 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4772 dl); 4773 SDValue CallSeqStart = Chain; 4774 4775 // Load the return address and frame pointer so it can be moved somewhere else 4776 // later. 4777 SDValue LROp, FPOp; 4778 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 4779 dl); 4780 4781 // Set up a copy of the stack pointer for use loading and storing any 4782 // arguments that may not fit in the registers available for argument 4783 // passing. 4784 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4785 4786 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4787 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4788 SmallVector<SDValue, 8> MemOpChains; 4789 4790 bool seenFloatArg = false; 4791 // Walk the register/memloc assignments, inserting copies/loads. 4792 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4793 i != e; 4794 ++i) { 4795 CCValAssign &VA = ArgLocs[i]; 4796 SDValue Arg = OutVals[i]; 4797 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4798 4799 if (Flags.isByVal()) { 4800 // Argument is an aggregate which is passed by value, thus we need to 4801 // create a copy of it in the local variable space of the current stack 4802 // frame (which is the stack frame of the caller) and pass the address of 4803 // this copy to the callee. 4804 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4805 CCValAssign &ByValVA = ByValArgLocs[j++]; 4806 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4807 4808 // Memory reserved in the local variable space of the callers stack frame. 4809 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4810 4811 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4812 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4813 StackPtr, PtrOff); 4814 4815 // Create a copy of the argument in the local area of the current 4816 // stack frame. 4817 SDValue MemcpyCall = 4818 CreateCopyOfByValArgument(Arg, PtrOff, 4819 CallSeqStart.getNode()->getOperand(0), 4820 Flags, DAG, dl); 4821 4822 // This must go outside the CALLSEQ_START..END. 4823 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4824 CallSeqStart.getNode()->getOperand(1), 4825 SDLoc(MemcpyCall)); 4826 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4827 NewCallSeqStart.getNode()); 4828 Chain = CallSeqStart = NewCallSeqStart; 4829 4830 // Pass the address of the aggregate copy on the stack either in a 4831 // physical register or in the parameter list area of the current stack 4832 // frame to the callee. 4833 Arg = PtrOff; 4834 } 4835 4836 if (VA.isRegLoc()) { 4837 if (Arg.getValueType() == MVT::i1) 4838 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4839 4840 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4841 // Put argument in a physical register. 4842 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4843 } else { 4844 // Put argument in the parameter list area of the current stack frame. 4845 assert(VA.isMemLoc()); 4846 unsigned LocMemOffset = VA.getLocMemOffset(); 4847 4848 if (!isTailCall) { 4849 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4850 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4851 StackPtr, PtrOff); 4852 4853 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4854 MachinePointerInfo(), 4855 false, false, 0)); 4856 } else { 4857 // Calculate and remember argument location. 4858 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4859 TailCallArguments); 4860 } 4861 } 4862 } 4863 4864 if (!MemOpChains.empty()) 4865 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4866 4867 // Build a sequence of copy-to-reg nodes chained together with token chain 4868 // and flag operands which copy the outgoing args into the appropriate regs. 4869 SDValue InFlag; 4870 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4871 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4872 RegsToPass[i].second, InFlag); 4873 InFlag = Chain.getValue(1); 4874 } 4875 4876 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4877 // registers. 4878 if (isVarArg) { 4879 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4880 SDValue Ops[] = { Chain, InFlag }; 4881 4882 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4883 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4884 4885 InFlag = Chain.getValue(1); 4886 } 4887 4888 if (isTailCall) 4889 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 4890 false, TailCallArguments); 4891 4892 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 4893 /* unused except on PPC64 ELFv1 */ false, DAG, 4894 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 4895 NumBytes, Ins, InVals, CS); 4896 } 4897 4898 // Copy an argument into memory, being careful to do this outside the 4899 // call sequence for the call to which the argument belongs. 4900 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 4901 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 4902 SelectionDAG &DAG, const SDLoc &dl) const { 4903 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4904 CallSeqStart.getNode()->getOperand(0), 4905 Flags, DAG, dl); 4906 // The MEMCPY must go outside the CALLSEQ_START..END. 4907 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4908 CallSeqStart.getNode()->getOperand(1), 4909 SDLoc(MemcpyCall)); 4910 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4911 NewCallSeqStart.getNode()); 4912 return NewCallSeqStart; 4913 } 4914 4915 SDValue PPCTargetLowering::LowerCall_64SVR4( 4916 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 4917 bool isTailCall, bool IsPatchPoint, 4918 const SmallVectorImpl<ISD::OutputArg> &Outs, 4919 const SmallVectorImpl<SDValue> &OutVals, 4920 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4921 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 4922 ImmutableCallSite *CS) const { 4923 4924 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4925 bool isLittleEndian = Subtarget.isLittleEndian(); 4926 unsigned NumOps = Outs.size(); 4927 bool hasNest = false; 4928 bool IsSibCall = false; 4929 4930 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4931 unsigned PtrByteSize = 8; 4932 4933 MachineFunction &MF = DAG.getMachineFunction(); 4934 4935 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 4936 IsSibCall = true; 4937 4938 // Mark this function as potentially containing a function that contains a 4939 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4940 // and restoring the callers stack pointer in this functions epilog. This is 4941 // done because by tail calling the called function might overwrite the value 4942 // in this function's (MF) stack pointer stack slot 0(SP). 4943 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4944 CallConv == CallingConv::Fast) 4945 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4946 4947 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4948 "fastcc not supported on varargs functions"); 4949 4950 // Count how many bytes are to be pushed on the stack, including the linkage 4951 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 4952 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 4953 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 4954 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4955 unsigned NumBytes = LinkageSize; 4956 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4957 unsigned &QFPR_idx = FPR_idx; 4958 4959 static const MCPhysReg GPR[] = { 4960 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4961 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4962 }; 4963 static const MCPhysReg VR[] = { 4964 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4965 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4966 }; 4967 static const MCPhysReg VSRH[] = { 4968 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 4969 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 4970 }; 4971 4972 const unsigned NumGPRs = array_lengthof(GPR); 4973 const unsigned NumFPRs = 13; 4974 const unsigned NumVRs = array_lengthof(VR); 4975 const unsigned NumQFPRs = NumFPRs; 4976 4977 // When using the fast calling convention, we don't provide backing for 4978 // arguments that will be in registers. 4979 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 4980 4981 // Add up all the space actually used. 4982 for (unsigned i = 0; i != NumOps; ++i) { 4983 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4984 EVT ArgVT = Outs[i].VT; 4985 EVT OrigVT = Outs[i].ArgVT; 4986 4987 if (Flags.isNest()) 4988 continue; 4989 4990 if (CallConv == CallingConv::Fast) { 4991 if (Flags.isByVal()) 4992 NumGPRsUsed += (Flags.getByValSize()+7)/8; 4993 else 4994 switch (ArgVT.getSimpleVT().SimpleTy) { 4995 default: llvm_unreachable("Unexpected ValueType for argument!"); 4996 case MVT::i1: 4997 case MVT::i32: 4998 case MVT::i64: 4999 if (++NumGPRsUsed <= NumGPRs) 5000 continue; 5001 break; 5002 case MVT::v4i32: 5003 case MVT::v8i16: 5004 case MVT::v16i8: 5005 case MVT::v2f64: 5006 case MVT::v2i64: 5007 case MVT::v1i128: 5008 if (++NumVRsUsed <= NumVRs) 5009 continue; 5010 break; 5011 case MVT::v4f32: 5012 // When using QPX, this is handled like a FP register, otherwise, it 5013 // is an Altivec register. 5014 if (Subtarget.hasQPX()) { 5015 if (++NumFPRsUsed <= NumFPRs) 5016 continue; 5017 } else { 5018 if (++NumVRsUsed <= NumVRs) 5019 continue; 5020 } 5021 break; 5022 case MVT::f32: 5023 case MVT::f64: 5024 case MVT::v4f64: // QPX 5025 case MVT::v4i1: // QPX 5026 if (++NumFPRsUsed <= NumFPRs) 5027 continue; 5028 break; 5029 } 5030 } 5031 5032 /* Respect alignment of argument on the stack. */ 5033 unsigned Align = 5034 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5035 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5036 5037 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5038 if (Flags.isInConsecutiveRegsLast()) 5039 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5040 } 5041 5042 unsigned NumBytesActuallyUsed = NumBytes; 5043 5044 // The prolog code of the callee may store up to 8 GPR argument registers to 5045 // the stack, allowing va_start to index over them in memory if its varargs. 5046 // Because we cannot tell if this is needed on the caller side, we have to 5047 // conservatively assume that it is needed. As such, make sure we have at 5048 // least enough stack space for the caller to store the 8 GPRs. 5049 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 5050 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5051 5052 // Tail call needs the stack to be aligned. 5053 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5054 CallConv == CallingConv::Fast) 5055 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5056 5057 int SPDiff = 0; 5058 5059 // Calculate by how many bytes the stack has to be adjusted in case of tail 5060 // call optimization. 5061 if (!IsSibCall) 5062 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5063 5064 // To protect arguments on the stack from being clobbered in a tail call, 5065 // force all the loads to happen before doing any other lowering. 5066 if (isTailCall) 5067 Chain = DAG.getStackArgumentTokenFactor(Chain); 5068 5069 // Adjust the stack pointer for the new arguments... 5070 // These operations are automatically eliminated by the prolog/epilog pass 5071 if (!IsSibCall) 5072 Chain = DAG.getCALLSEQ_START(Chain, 5073 DAG.getIntPtrConstant(NumBytes, dl, true), dl); 5074 SDValue CallSeqStart = Chain; 5075 5076 // Load the return address and frame pointer so it can be move somewhere else 5077 // later. 5078 SDValue LROp, FPOp; 5079 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5080 dl); 5081 5082 // Set up a copy of the stack pointer for use loading and storing any 5083 // arguments that may not fit in the registers available for argument 5084 // passing. 5085 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5086 5087 // Figure out which arguments are going to go in registers, and which in 5088 // memory. Also, if this is a vararg function, floating point operations 5089 // must be stored to our stack, and loaded into integer regs as well, if 5090 // any integer regs are available for argument passing. 5091 unsigned ArgOffset = LinkageSize; 5092 5093 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5094 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5095 5096 SmallVector<SDValue, 8> MemOpChains; 5097 for (unsigned i = 0; i != NumOps; ++i) { 5098 SDValue Arg = OutVals[i]; 5099 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5100 EVT ArgVT = Outs[i].VT; 5101 EVT OrigVT = Outs[i].ArgVT; 5102 5103 // PtrOff will be used to store the current argument to the stack if a 5104 // register cannot be found for it. 5105 SDValue PtrOff; 5106 5107 // We re-align the argument offset for each argument, except when using the 5108 // fast calling convention, when we need to make sure we do that only when 5109 // we'll actually use a stack slot. 5110 auto ComputePtrOff = [&]() { 5111 /* Respect alignment of argument on the stack. */ 5112 unsigned Align = 5113 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5114 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5115 5116 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5117 5118 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5119 }; 5120 5121 if (CallConv != CallingConv::Fast) { 5122 ComputePtrOff(); 5123 5124 /* Compute GPR index associated with argument offset. */ 5125 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5126 GPR_idx = std::min(GPR_idx, NumGPRs); 5127 } 5128 5129 // Promote integers to 64-bit values. 5130 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5131 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5132 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5133 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5134 } 5135 5136 // FIXME memcpy is used way more than necessary. Correctness first. 5137 // Note: "by value" is code for passing a structure by value, not 5138 // basic types. 5139 if (Flags.isByVal()) { 5140 // Note: Size includes alignment padding, so 5141 // struct x { short a; char b; } 5142 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5143 // These are the proper values we need for right-justifying the 5144 // aggregate in a parameter register. 5145 unsigned Size = Flags.getByValSize(); 5146 5147 // An empty aggregate parameter takes up no storage and no 5148 // registers. 5149 if (Size == 0) 5150 continue; 5151 5152 if (CallConv == CallingConv::Fast) 5153 ComputePtrOff(); 5154 5155 // All aggregates smaller than 8 bytes must be passed right-justified. 5156 if (Size==1 || Size==2 || Size==4) { 5157 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5158 if (GPR_idx != NumGPRs) { 5159 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5160 MachinePointerInfo(), VT, 5161 false, false, false, 0); 5162 MemOpChains.push_back(Load.getValue(1)); 5163 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5164 5165 ArgOffset += PtrByteSize; 5166 continue; 5167 } 5168 } 5169 5170 if (GPR_idx == NumGPRs && Size < 8) { 5171 SDValue AddPtr = PtrOff; 5172 if (!isLittleEndian) { 5173 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5174 PtrOff.getValueType()); 5175 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5176 } 5177 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5178 CallSeqStart, 5179 Flags, DAG, dl); 5180 ArgOffset += PtrByteSize; 5181 continue; 5182 } 5183 // Copy entire object into memory. There are cases where gcc-generated 5184 // code assumes it is there, even if it could be put entirely into 5185 // registers. (This is not what the doc says.) 5186 5187 // FIXME: The above statement is likely due to a misunderstanding of the 5188 // documents. All arguments must be copied into the parameter area BY 5189 // THE CALLEE in the event that the callee takes the address of any 5190 // formal argument. That has not yet been implemented. However, it is 5191 // reasonable to use the stack area as a staging area for the register 5192 // load. 5193 5194 // Skip this for small aggregates, as we will use the same slot for a 5195 // right-justified copy, below. 5196 if (Size >= 8) 5197 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5198 CallSeqStart, 5199 Flags, DAG, dl); 5200 5201 // When a register is available, pass a small aggregate right-justified. 5202 if (Size < 8 && GPR_idx != NumGPRs) { 5203 // The easiest way to get this right-justified in a register 5204 // is to copy the structure into the rightmost portion of a 5205 // local variable slot, then load the whole slot into the 5206 // register. 5207 // FIXME: The memcpy seems to produce pretty awful code for 5208 // small aggregates, particularly for packed ones. 5209 // FIXME: It would be preferable to use the slot in the 5210 // parameter save area instead of a new local variable. 5211 SDValue AddPtr = PtrOff; 5212 if (!isLittleEndian) { 5213 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5214 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5215 } 5216 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5217 CallSeqStart, 5218 Flags, DAG, dl); 5219 5220 // Load the slot into the register. 5221 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 5222 MachinePointerInfo(), 5223 false, false, false, 0); 5224 MemOpChains.push_back(Load.getValue(1)); 5225 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5226 5227 // Done with this argument. 5228 ArgOffset += PtrByteSize; 5229 continue; 5230 } 5231 5232 // For aggregates larger than PtrByteSize, copy the pieces of the 5233 // object that fit into registers from the parameter save area. 5234 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5235 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5236 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5237 if (GPR_idx != NumGPRs) { 5238 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5239 MachinePointerInfo(), 5240 false, false, false, 0); 5241 MemOpChains.push_back(Load.getValue(1)); 5242 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5243 ArgOffset += PtrByteSize; 5244 } else { 5245 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5246 break; 5247 } 5248 } 5249 continue; 5250 } 5251 5252 switch (Arg.getSimpleValueType().SimpleTy) { 5253 default: llvm_unreachable("Unexpected ValueType for argument!"); 5254 case MVT::i1: 5255 case MVT::i32: 5256 case MVT::i64: 5257 if (Flags.isNest()) { 5258 // The 'nest' parameter, if any, is passed in R11. 5259 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5260 hasNest = true; 5261 break; 5262 } 5263 5264 // These can be scalar arguments or elements of an integer array type 5265 // passed directly. Clang may use those instead of "byval" aggregate 5266 // types to avoid forcing arguments to memory unnecessarily. 5267 if (GPR_idx != NumGPRs) { 5268 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5269 } else { 5270 if (CallConv == CallingConv::Fast) 5271 ComputePtrOff(); 5272 5273 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5274 true, isTailCall, false, MemOpChains, 5275 TailCallArguments, dl); 5276 if (CallConv == CallingConv::Fast) 5277 ArgOffset += PtrByteSize; 5278 } 5279 if (CallConv != CallingConv::Fast) 5280 ArgOffset += PtrByteSize; 5281 break; 5282 case MVT::f32: 5283 case MVT::f64: { 5284 // These can be scalar arguments or elements of a float array type 5285 // passed directly. The latter are used to implement ELFv2 homogenous 5286 // float aggregates. 5287 5288 // Named arguments go into FPRs first, and once they overflow, the 5289 // remaining arguments go into GPRs and then the parameter save area. 5290 // Unnamed arguments for vararg functions always go to GPRs and 5291 // then the parameter save area. For now, put all arguments to vararg 5292 // routines always in both locations (FPR *and* GPR or stack slot). 5293 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5294 bool NeededLoad = false; 5295 5296 // First load the argument into the next available FPR. 5297 if (FPR_idx != NumFPRs) 5298 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5299 5300 // Next, load the argument into GPR or stack slot if needed. 5301 if (!NeedGPROrStack) 5302 ; 5303 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5304 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5305 // once we support fp <-> gpr moves. 5306 5307 // In the non-vararg case, this can only ever happen in the 5308 // presence of f32 array types, since otherwise we never run 5309 // out of FPRs before running out of GPRs. 5310 SDValue ArgVal; 5311 5312 // Double values are always passed in a single GPR. 5313 if (Arg.getValueType() != MVT::f32) { 5314 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5315 5316 // Non-array float values are extended and passed in a GPR. 5317 } else if (!Flags.isInConsecutiveRegs()) { 5318 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5319 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5320 5321 // If we have an array of floats, we collect every odd element 5322 // together with its predecessor into one GPR. 5323 } else if (ArgOffset % PtrByteSize != 0) { 5324 SDValue Lo, Hi; 5325 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5326 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5327 if (!isLittleEndian) 5328 std::swap(Lo, Hi); 5329 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5330 5331 // The final element, if even, goes into the first half of a GPR. 5332 } else if (Flags.isInConsecutiveRegsLast()) { 5333 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5334 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5335 if (!isLittleEndian) 5336 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5337 DAG.getConstant(32, dl, MVT::i32)); 5338 5339 // Non-final even elements are skipped; they will be handled 5340 // together the with subsequent argument on the next go-around. 5341 } else 5342 ArgVal = SDValue(); 5343 5344 if (ArgVal.getNode()) 5345 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5346 } else { 5347 if (CallConv == CallingConv::Fast) 5348 ComputePtrOff(); 5349 5350 // Single-precision floating-point values are mapped to the 5351 // second (rightmost) word of the stack doubleword. 5352 if (Arg.getValueType() == MVT::f32 && 5353 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5354 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5355 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5356 } 5357 5358 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5359 true, isTailCall, false, MemOpChains, 5360 TailCallArguments, dl); 5361 5362 NeededLoad = true; 5363 } 5364 // When passing an array of floats, the array occupies consecutive 5365 // space in the argument area; only round up to the next doubleword 5366 // at the end of the array. Otherwise, each float takes 8 bytes. 5367 if (CallConv != CallingConv::Fast || NeededLoad) { 5368 ArgOffset += (Arg.getValueType() == MVT::f32 && 5369 Flags.isInConsecutiveRegs()) ? 4 : 8; 5370 if (Flags.isInConsecutiveRegsLast()) 5371 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5372 } 5373 break; 5374 } 5375 case MVT::v4f32: 5376 case MVT::v4i32: 5377 case MVT::v8i16: 5378 case MVT::v16i8: 5379 case MVT::v2f64: 5380 case MVT::v2i64: 5381 case MVT::v1i128: 5382 if (!Subtarget.hasQPX()) { 5383 // These can be scalar arguments or elements of a vector array type 5384 // passed directly. The latter are used to implement ELFv2 homogenous 5385 // vector aggregates. 5386 5387 // For a varargs call, named arguments go into VRs or on the stack as 5388 // usual; unnamed arguments always go to the stack or the corresponding 5389 // GPRs when within range. For now, we always put the value in both 5390 // locations (or even all three). 5391 if (isVarArg) { 5392 // We could elide this store in the case where the object fits 5393 // entirely in R registers. Maybe later. 5394 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5395 MachinePointerInfo(), false, false, 0); 5396 MemOpChains.push_back(Store); 5397 if (VR_idx != NumVRs) { 5398 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5399 MachinePointerInfo(), 5400 false, false, false, 0); 5401 MemOpChains.push_back(Load.getValue(1)); 5402 5403 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5404 Arg.getSimpleValueType() == MVT::v2i64) ? 5405 VSRH[VR_idx] : VR[VR_idx]; 5406 ++VR_idx; 5407 5408 RegsToPass.push_back(std::make_pair(VReg, Load)); 5409 } 5410 ArgOffset += 16; 5411 for (unsigned i=0; i<16; i+=PtrByteSize) { 5412 if (GPR_idx == NumGPRs) 5413 break; 5414 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5415 DAG.getConstant(i, dl, PtrVT)); 5416 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5417 false, false, false, 0); 5418 MemOpChains.push_back(Load.getValue(1)); 5419 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5420 } 5421 break; 5422 } 5423 5424 // Non-varargs Altivec params go into VRs or on the stack. 5425 if (VR_idx != NumVRs) { 5426 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5427 Arg.getSimpleValueType() == MVT::v2i64) ? 5428 VSRH[VR_idx] : VR[VR_idx]; 5429 ++VR_idx; 5430 5431 RegsToPass.push_back(std::make_pair(VReg, Arg)); 5432 } else { 5433 if (CallConv == CallingConv::Fast) 5434 ComputePtrOff(); 5435 5436 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5437 true, isTailCall, true, MemOpChains, 5438 TailCallArguments, dl); 5439 if (CallConv == CallingConv::Fast) 5440 ArgOffset += 16; 5441 } 5442 5443 if (CallConv != CallingConv::Fast) 5444 ArgOffset += 16; 5445 break; 5446 } // not QPX 5447 5448 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5449 "Invalid QPX parameter type"); 5450 5451 /* fall through */ 5452 case MVT::v4f64: 5453 case MVT::v4i1: { 5454 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5455 if (isVarArg) { 5456 // We could elide this store in the case where the object fits 5457 // entirely in R registers. Maybe later. 5458 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5459 MachinePointerInfo(), false, false, 0); 5460 MemOpChains.push_back(Store); 5461 if (QFPR_idx != NumQFPRs) { 5462 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, 5463 Store, PtrOff, MachinePointerInfo(), 5464 false, false, false, 0); 5465 MemOpChains.push_back(Load.getValue(1)); 5466 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5467 } 5468 ArgOffset += (IsF32 ? 16 : 32); 5469 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5470 if (GPR_idx == NumGPRs) 5471 break; 5472 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5473 DAG.getConstant(i, dl, PtrVT)); 5474 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5475 false, false, false, 0); 5476 MemOpChains.push_back(Load.getValue(1)); 5477 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5478 } 5479 break; 5480 } 5481 5482 // Non-varargs QPX params go into registers or on the stack. 5483 if (QFPR_idx != NumQFPRs) { 5484 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5485 } else { 5486 if (CallConv == CallingConv::Fast) 5487 ComputePtrOff(); 5488 5489 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5490 true, isTailCall, true, MemOpChains, 5491 TailCallArguments, dl); 5492 if (CallConv == CallingConv::Fast) 5493 ArgOffset += (IsF32 ? 16 : 32); 5494 } 5495 5496 if (CallConv != CallingConv::Fast) 5497 ArgOffset += (IsF32 ? 16 : 32); 5498 break; 5499 } 5500 } 5501 } 5502 5503 assert(NumBytesActuallyUsed == ArgOffset); 5504 (void)NumBytesActuallyUsed; 5505 5506 if (!MemOpChains.empty()) 5507 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5508 5509 // Check if this is an indirect call (MTCTR/BCTRL). 5510 // See PrepareCall() for more information about calls through function 5511 // pointers in the 64-bit SVR4 ABI. 5512 if (!isTailCall && !IsPatchPoint && 5513 !isFunctionGlobalAddress(Callee) && 5514 !isa<ExternalSymbolSDNode>(Callee)) { 5515 // Load r2 into a virtual register and store it to the TOC save area. 5516 setUsesTOCBasePtr(DAG); 5517 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5518 // TOC save area offset. 5519 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5520 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5521 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5522 Chain = DAG.getStore( 5523 Val.getValue(1), dl, Val, AddPtr, 5524 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset), 5525 false, false, 0); 5526 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5527 // This does not mean the MTCTR instruction must use R12; it's easier 5528 // to model this as an extra parameter, so do that. 5529 if (isELFv2ABI && !IsPatchPoint) 5530 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5531 } 5532 5533 // Build a sequence of copy-to-reg nodes chained together with token chain 5534 // and flag operands which copy the outgoing args into the appropriate regs. 5535 SDValue InFlag; 5536 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5537 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5538 RegsToPass[i].second, InFlag); 5539 InFlag = Chain.getValue(1); 5540 } 5541 5542 if (isTailCall && !IsSibCall) 5543 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 5544 FPOp, true, TailCallArguments); 5545 5546 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, hasNest, 5547 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 5548 SPDiff, NumBytes, Ins, InVals, CS); 5549 } 5550 5551 SDValue PPCTargetLowering::LowerCall_Darwin( 5552 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5553 bool isTailCall, bool IsPatchPoint, 5554 const SmallVectorImpl<ISD::OutputArg> &Outs, 5555 const SmallVectorImpl<SDValue> &OutVals, 5556 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5557 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5558 ImmutableCallSite *CS) const { 5559 5560 unsigned NumOps = Outs.size(); 5561 5562 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5563 bool isPPC64 = PtrVT == MVT::i64; 5564 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5565 5566 MachineFunction &MF = DAG.getMachineFunction(); 5567 5568 // Mark this function as potentially containing a function that contains a 5569 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5570 // and restoring the callers stack pointer in this functions epilog. This is 5571 // done because by tail calling the called function might overwrite the value 5572 // in this function's (MF) stack pointer stack slot 0(SP). 5573 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5574 CallConv == CallingConv::Fast) 5575 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5576 5577 // Count how many bytes are to be pushed on the stack, including the linkage 5578 // area, and parameter passing area. We start with 24/48 bytes, which is 5579 // prereserved space for [SP][CR][LR][3 x unused]. 5580 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5581 unsigned NumBytes = LinkageSize; 5582 5583 // Add up all the space actually used. 5584 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5585 // they all go in registers, but we must reserve stack space for them for 5586 // possible use by the caller. In varargs or 64-bit calls, parameters are 5587 // assigned stack space in order, with padding so Altivec parameters are 5588 // 16-byte aligned. 5589 unsigned nAltivecParamsAtEnd = 0; 5590 for (unsigned i = 0; i != NumOps; ++i) { 5591 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5592 EVT ArgVT = Outs[i].VT; 5593 // Varargs Altivec parameters are padded to a 16 byte boundary. 5594 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5595 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5596 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5597 if (!isVarArg && !isPPC64) { 5598 // Non-varargs Altivec parameters go after all the non-Altivec 5599 // parameters; handle those later so we know how much padding we need. 5600 nAltivecParamsAtEnd++; 5601 continue; 5602 } 5603 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5604 NumBytes = ((NumBytes+15)/16)*16; 5605 } 5606 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5607 } 5608 5609 // Allow for Altivec parameters at the end, if needed. 5610 if (nAltivecParamsAtEnd) { 5611 NumBytes = ((NumBytes+15)/16)*16; 5612 NumBytes += 16*nAltivecParamsAtEnd; 5613 } 5614 5615 // The prolog code of the callee may store up to 8 GPR argument registers to 5616 // the stack, allowing va_start to index over them in memory if its varargs. 5617 // Because we cannot tell if this is needed on the caller side, we have to 5618 // conservatively assume that it is needed. As such, make sure we have at 5619 // least enough stack space for the caller to store the 8 GPRs. 5620 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5621 5622 // Tail call needs the stack to be aligned. 5623 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5624 CallConv == CallingConv::Fast) 5625 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5626 5627 // Calculate by how many bytes the stack has to be adjusted in case of tail 5628 // call optimization. 5629 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5630 5631 // To protect arguments on the stack from being clobbered in a tail call, 5632 // force all the loads to happen before doing any other lowering. 5633 if (isTailCall) 5634 Chain = DAG.getStackArgumentTokenFactor(Chain); 5635 5636 // Adjust the stack pointer for the new arguments... 5637 // These operations are automatically eliminated by the prolog/epilog pass 5638 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5639 dl); 5640 SDValue CallSeqStart = Chain; 5641 5642 // Load the return address and frame pointer so it can be move somewhere else 5643 // later. 5644 SDValue LROp, FPOp; 5645 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5646 dl); 5647 5648 // Set up a copy of the stack pointer for use loading and storing any 5649 // arguments that may not fit in the registers available for argument 5650 // passing. 5651 SDValue StackPtr; 5652 if (isPPC64) 5653 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5654 else 5655 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5656 5657 // Figure out which arguments are going to go in registers, and which in 5658 // memory. Also, if this is a vararg function, floating point operations 5659 // must be stored to our stack, and loaded into integer regs as well, if 5660 // any integer regs are available for argument passing. 5661 unsigned ArgOffset = LinkageSize; 5662 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5663 5664 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5665 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5666 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5667 }; 5668 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5669 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5670 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5671 }; 5672 static const MCPhysReg VR[] = { 5673 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5674 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5675 }; 5676 const unsigned NumGPRs = array_lengthof(GPR_32); 5677 const unsigned NumFPRs = 13; 5678 const unsigned NumVRs = array_lengthof(VR); 5679 5680 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5681 5682 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5683 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5684 5685 SmallVector<SDValue, 8> MemOpChains; 5686 for (unsigned i = 0; i != NumOps; ++i) { 5687 SDValue Arg = OutVals[i]; 5688 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5689 5690 // PtrOff will be used to store the current argument to the stack if a 5691 // register cannot be found for it. 5692 SDValue PtrOff; 5693 5694 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5695 5696 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5697 5698 // On PPC64, promote integers to 64-bit values. 5699 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5700 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5701 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5702 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5703 } 5704 5705 // FIXME memcpy is used way more than necessary. Correctness first. 5706 // Note: "by value" is code for passing a structure by value, not 5707 // basic types. 5708 if (Flags.isByVal()) { 5709 unsigned Size = Flags.getByValSize(); 5710 // Very small objects are passed right-justified. Everything else is 5711 // passed left-justified. 5712 if (Size==1 || Size==2) { 5713 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5714 if (GPR_idx != NumGPRs) { 5715 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5716 MachinePointerInfo(), VT, 5717 false, false, false, 0); 5718 MemOpChains.push_back(Load.getValue(1)); 5719 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5720 5721 ArgOffset += PtrByteSize; 5722 } else { 5723 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5724 PtrOff.getValueType()); 5725 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5726 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5727 CallSeqStart, 5728 Flags, DAG, dl); 5729 ArgOffset += PtrByteSize; 5730 } 5731 continue; 5732 } 5733 // Copy entire object into memory. There are cases where gcc-generated 5734 // code assumes it is there, even if it could be put entirely into 5735 // registers. (This is not what the doc says.) 5736 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5737 CallSeqStart, 5738 Flags, DAG, dl); 5739 5740 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5741 // copy the pieces of the object that fit into registers from the 5742 // parameter save area. 5743 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5744 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5745 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5746 if (GPR_idx != NumGPRs) { 5747 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5748 MachinePointerInfo(), 5749 false, false, false, 0); 5750 MemOpChains.push_back(Load.getValue(1)); 5751 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5752 ArgOffset += PtrByteSize; 5753 } else { 5754 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5755 break; 5756 } 5757 } 5758 continue; 5759 } 5760 5761 switch (Arg.getSimpleValueType().SimpleTy) { 5762 default: llvm_unreachable("Unexpected ValueType for argument!"); 5763 case MVT::i1: 5764 case MVT::i32: 5765 case MVT::i64: 5766 if (GPR_idx != NumGPRs) { 5767 if (Arg.getValueType() == MVT::i1) 5768 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5769 5770 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5771 } else { 5772 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5773 isPPC64, isTailCall, false, MemOpChains, 5774 TailCallArguments, dl); 5775 } 5776 ArgOffset += PtrByteSize; 5777 break; 5778 case MVT::f32: 5779 case MVT::f64: 5780 if (FPR_idx != NumFPRs) { 5781 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5782 5783 if (isVarArg) { 5784 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5785 MachinePointerInfo(), false, false, 0); 5786 MemOpChains.push_back(Store); 5787 5788 // Float varargs are always shadowed in available integer registers 5789 if (GPR_idx != NumGPRs) { 5790 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5791 MachinePointerInfo(), false, false, 5792 false, 0); 5793 MemOpChains.push_back(Load.getValue(1)); 5794 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5795 } 5796 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5797 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5798 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5799 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5800 MachinePointerInfo(), 5801 false, false, false, 0); 5802 MemOpChains.push_back(Load.getValue(1)); 5803 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5804 } 5805 } else { 5806 // If we have any FPRs remaining, we may also have GPRs remaining. 5807 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5808 // GPRs. 5809 if (GPR_idx != NumGPRs) 5810 ++GPR_idx; 5811 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 5812 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 5813 ++GPR_idx; 5814 } 5815 } else 5816 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5817 isPPC64, isTailCall, false, MemOpChains, 5818 TailCallArguments, dl); 5819 if (isPPC64) 5820 ArgOffset += 8; 5821 else 5822 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 5823 break; 5824 case MVT::v4f32: 5825 case MVT::v4i32: 5826 case MVT::v8i16: 5827 case MVT::v16i8: 5828 if (isVarArg) { 5829 // These go aligned on the stack, or in the corresponding R registers 5830 // when within range. The Darwin PPC ABI doc claims they also go in 5831 // V registers; in fact gcc does this only for arguments that are 5832 // prototyped, not for those that match the ... We do it for all 5833 // arguments, seems to work. 5834 while (ArgOffset % 16 !=0) { 5835 ArgOffset += PtrByteSize; 5836 if (GPR_idx != NumGPRs) 5837 GPR_idx++; 5838 } 5839 // We could elide this store in the case where the object fits 5840 // entirely in R registers. Maybe later. 5841 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5842 DAG.getConstant(ArgOffset, dl, PtrVT)); 5843 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5844 MachinePointerInfo(), false, false, 0); 5845 MemOpChains.push_back(Store); 5846 if (VR_idx != NumVRs) { 5847 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5848 MachinePointerInfo(), 5849 false, false, false, 0); 5850 MemOpChains.push_back(Load.getValue(1)); 5851 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5852 } 5853 ArgOffset += 16; 5854 for (unsigned i=0; i<16; i+=PtrByteSize) { 5855 if (GPR_idx == NumGPRs) 5856 break; 5857 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5858 DAG.getConstant(i, dl, PtrVT)); 5859 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5860 false, false, false, 0); 5861 MemOpChains.push_back(Load.getValue(1)); 5862 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5863 } 5864 break; 5865 } 5866 5867 // Non-varargs Altivec params generally go in registers, but have 5868 // stack space allocated at the end. 5869 if (VR_idx != NumVRs) { 5870 // Doesn't have GPR space allocated. 5871 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5872 } else if (nAltivecParamsAtEnd==0) { 5873 // We are emitting Altivec params in order. 5874 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5875 isPPC64, isTailCall, true, MemOpChains, 5876 TailCallArguments, dl); 5877 ArgOffset += 16; 5878 } 5879 break; 5880 } 5881 } 5882 // If all Altivec parameters fit in registers, as they usually do, 5883 // they get stack space following the non-Altivec parameters. We 5884 // don't track this here because nobody below needs it. 5885 // If there are more Altivec parameters than fit in registers emit 5886 // the stores here. 5887 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5888 unsigned j = 0; 5889 // Offset is aligned; skip 1st 12 params which go in V registers. 5890 ArgOffset = ((ArgOffset+15)/16)*16; 5891 ArgOffset += 12*16; 5892 for (unsigned i = 0; i != NumOps; ++i) { 5893 SDValue Arg = OutVals[i]; 5894 EVT ArgType = Outs[i].VT; 5895 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5896 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5897 if (++j > NumVRs) { 5898 SDValue PtrOff; 5899 // We are emitting Altivec params in order. 5900 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5901 isPPC64, isTailCall, true, MemOpChains, 5902 TailCallArguments, dl); 5903 ArgOffset += 16; 5904 } 5905 } 5906 } 5907 } 5908 5909 if (!MemOpChains.empty()) 5910 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5911 5912 // On Darwin, R12 must contain the address of an indirect callee. This does 5913 // not mean the MTCTR instruction must use R12; it's easier to model this as 5914 // an extra parameter, so do that. 5915 if (!isTailCall && 5916 !isFunctionGlobalAddress(Callee) && 5917 !isa<ExternalSymbolSDNode>(Callee) && 5918 !isBLACompatibleAddress(Callee, DAG)) 5919 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5920 PPC::R12), Callee)); 5921 5922 // Build a sequence of copy-to-reg nodes chained together with token chain 5923 // and flag operands which copy the outgoing args into the appropriate regs. 5924 SDValue InFlag; 5925 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5926 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5927 RegsToPass[i].second, InFlag); 5928 InFlag = Chain.getValue(1); 5929 } 5930 5931 if (isTailCall) 5932 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 5933 FPOp, true, TailCallArguments); 5934 5935 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5936 /* unused except on PPC64 ELFv1 */ false, DAG, 5937 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5938 NumBytes, Ins, InVals, CS); 5939 } 5940 5941 bool 5942 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5943 MachineFunction &MF, bool isVarArg, 5944 const SmallVectorImpl<ISD::OutputArg> &Outs, 5945 LLVMContext &Context) const { 5946 SmallVector<CCValAssign, 16> RVLocs; 5947 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 5948 return CCInfo.CheckReturn(Outs, RetCC_PPC); 5949 } 5950 5951 SDValue 5952 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 5953 bool isVarArg, 5954 const SmallVectorImpl<ISD::OutputArg> &Outs, 5955 const SmallVectorImpl<SDValue> &OutVals, 5956 const SDLoc &dl, SelectionDAG &DAG) const { 5957 5958 SmallVector<CCValAssign, 16> RVLocs; 5959 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5960 *DAG.getContext()); 5961 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 5962 5963 SDValue Flag; 5964 SmallVector<SDValue, 4> RetOps(1, Chain); 5965 5966 // Copy the result values into the output registers. 5967 for (unsigned i = 0; i != RVLocs.size(); ++i) { 5968 CCValAssign &VA = RVLocs[i]; 5969 assert(VA.isRegLoc() && "Can only return in registers!"); 5970 5971 SDValue Arg = OutVals[i]; 5972 5973 switch (VA.getLocInfo()) { 5974 default: llvm_unreachable("Unknown loc info!"); 5975 case CCValAssign::Full: break; 5976 case CCValAssign::AExt: 5977 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 5978 break; 5979 case CCValAssign::ZExt: 5980 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 5981 break; 5982 case CCValAssign::SExt: 5983 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 5984 break; 5985 } 5986 5987 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 5988 Flag = Chain.getValue(1); 5989 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 5990 } 5991 5992 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5993 const MCPhysReg *I = 5994 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 5995 if (I) { 5996 for (; *I; ++I) { 5997 5998 if (PPC::G8RCRegClass.contains(*I)) 5999 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6000 else if (PPC::F8RCRegClass.contains(*I)) 6001 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6002 else if (PPC::CRRCRegClass.contains(*I)) 6003 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6004 else if (PPC::VRRCRegClass.contains(*I)) 6005 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6006 else 6007 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6008 } 6009 } 6010 6011 RetOps[0] = Chain; // Update chain. 6012 6013 // Add the flag if we have it. 6014 if (Flag.getNode()) 6015 RetOps.push_back(Flag); 6016 6017 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6018 } 6019 6020 SDValue PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET( 6021 SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) const { 6022 SDLoc dl(Op); 6023 6024 // Get the corect type for integers. 6025 EVT IntVT = Op.getValueType(); 6026 6027 // Get the inputs. 6028 SDValue Chain = Op.getOperand(0); 6029 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6030 // Build a DYNAREAOFFSET node. 6031 SDValue Ops[2] = {Chain, FPSIdx}; 6032 SDVTList VTs = DAG.getVTList(IntVT); 6033 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6034 } 6035 6036 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 6037 const PPCSubtarget &Subtarget) const { 6038 // When we pop the dynamic allocation we need to restore the SP link. 6039 SDLoc dl(Op); 6040 6041 // Get the corect type for pointers. 6042 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6043 6044 // Construct the stack pointer operand. 6045 bool isPPC64 = Subtarget.isPPC64(); 6046 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6047 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6048 6049 // Get the operands for the STACKRESTORE. 6050 SDValue Chain = Op.getOperand(0); 6051 SDValue SaveSP = Op.getOperand(1); 6052 6053 // Load the old link SP. 6054 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 6055 MachinePointerInfo(), 6056 false, false, false, 0); 6057 6058 // Restore the stack pointer. 6059 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6060 6061 // Store the old link SP. 6062 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 6063 false, false, 0); 6064 } 6065 6066 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6067 MachineFunction &MF = DAG.getMachineFunction(); 6068 bool isPPC64 = Subtarget.isPPC64(); 6069 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6070 6071 // Get current frame pointer save index. The users of this index will be 6072 // primarily DYNALLOC instructions. 6073 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6074 int RASI = FI->getReturnAddrSaveIndex(); 6075 6076 // If the frame pointer save index hasn't been defined yet. 6077 if (!RASI) { 6078 // Find out what the fix offset of the frame pointer save area. 6079 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6080 // Allocate the frame index for frame pointer save area. 6081 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6082 // Save the result. 6083 FI->setReturnAddrSaveIndex(RASI); 6084 } 6085 return DAG.getFrameIndex(RASI, PtrVT); 6086 } 6087 6088 SDValue 6089 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6090 MachineFunction &MF = DAG.getMachineFunction(); 6091 bool isPPC64 = Subtarget.isPPC64(); 6092 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6093 6094 // Get current frame pointer save index. The users of this index will be 6095 // primarily DYNALLOC instructions. 6096 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6097 int FPSI = FI->getFramePointerSaveIndex(); 6098 6099 // If the frame pointer save index hasn't been defined yet. 6100 if (!FPSI) { 6101 // Find out what the fix offset of the frame pointer save area. 6102 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6103 // Allocate the frame index for frame pointer save area. 6104 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6105 // Save the result. 6106 FI->setFramePointerSaveIndex(FPSI); 6107 } 6108 return DAG.getFrameIndex(FPSI, PtrVT); 6109 } 6110 6111 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6112 SelectionDAG &DAG, 6113 const PPCSubtarget &Subtarget) const { 6114 // Get the inputs. 6115 SDValue Chain = Op.getOperand(0); 6116 SDValue Size = Op.getOperand(1); 6117 SDLoc dl(Op); 6118 6119 // Get the corect type for pointers. 6120 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6121 // Negate the size. 6122 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6123 DAG.getConstant(0, dl, PtrVT), Size); 6124 // Construct a node for the frame pointer save index. 6125 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6126 // Build a DYNALLOC node. 6127 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6128 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6129 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6130 } 6131 6132 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6133 SelectionDAG &DAG) const { 6134 SDLoc DL(Op); 6135 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6136 DAG.getVTList(MVT::i32, MVT::Other), 6137 Op.getOperand(0), Op.getOperand(1)); 6138 } 6139 6140 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6141 SelectionDAG &DAG) const { 6142 SDLoc DL(Op); 6143 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6144 Op.getOperand(0), Op.getOperand(1)); 6145 } 6146 6147 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6148 if (Op.getValueType().isVector()) 6149 return LowerVectorLoad(Op, DAG); 6150 6151 assert(Op.getValueType() == MVT::i1 && 6152 "Custom lowering only for i1 loads"); 6153 6154 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6155 6156 SDLoc dl(Op); 6157 LoadSDNode *LD = cast<LoadSDNode>(Op); 6158 6159 SDValue Chain = LD->getChain(); 6160 SDValue BasePtr = LD->getBasePtr(); 6161 MachineMemOperand *MMO = LD->getMemOperand(); 6162 6163 SDValue NewLD = 6164 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6165 BasePtr, MVT::i8, MMO); 6166 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6167 6168 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6169 return DAG.getMergeValues(Ops, dl); 6170 } 6171 6172 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6173 if (Op.getOperand(1).getValueType().isVector()) 6174 return LowerVectorStore(Op, DAG); 6175 6176 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6177 "Custom lowering only for i1 stores"); 6178 6179 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6180 6181 SDLoc dl(Op); 6182 StoreSDNode *ST = cast<StoreSDNode>(Op); 6183 6184 SDValue Chain = ST->getChain(); 6185 SDValue BasePtr = ST->getBasePtr(); 6186 SDValue Value = ST->getValue(); 6187 MachineMemOperand *MMO = ST->getMemOperand(); 6188 6189 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6190 Value); 6191 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6192 } 6193 6194 // FIXME: Remove this once the ANDI glue bug is fixed: 6195 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6196 assert(Op.getValueType() == MVT::i1 && 6197 "Custom lowering only for i1 results"); 6198 6199 SDLoc DL(Op); 6200 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6201 Op.getOperand(0)); 6202 } 6203 6204 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6205 /// possible. 6206 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6207 // Not FP? Not a fsel. 6208 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6209 !Op.getOperand(2).getValueType().isFloatingPoint()) 6210 return Op; 6211 6212 // We might be able to do better than this under some circumstances, but in 6213 // general, fsel-based lowering of select is a finite-math-only optimization. 6214 // For more information, see section F.3 of the 2.06 ISA specification. 6215 if (!DAG.getTarget().Options.NoInfsFPMath || 6216 !DAG.getTarget().Options.NoNaNsFPMath) 6217 return Op; 6218 // TODO: Propagate flags from the select rather than global settings. 6219 SDNodeFlags Flags; 6220 Flags.setNoInfs(true); 6221 Flags.setNoNaNs(true); 6222 6223 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6224 6225 EVT ResVT = Op.getValueType(); 6226 EVT CmpVT = Op.getOperand(0).getValueType(); 6227 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6228 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6229 SDLoc dl(Op); 6230 6231 // If the RHS of the comparison is a 0.0, we don't need to do the 6232 // subtraction at all. 6233 SDValue Sel1; 6234 if (isFloatingPointZero(RHS)) 6235 switch (CC) { 6236 default: break; // SETUO etc aren't handled by fsel. 6237 case ISD::SETNE: 6238 std::swap(TV, FV); 6239 case ISD::SETEQ: 6240 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6241 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6242 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6243 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6244 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6245 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6246 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6247 case ISD::SETULT: 6248 case ISD::SETLT: 6249 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6250 case ISD::SETOGE: 6251 case ISD::SETGE: 6252 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6253 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6254 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6255 case ISD::SETUGT: 6256 case ISD::SETGT: 6257 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6258 case ISD::SETOLE: 6259 case ISD::SETLE: 6260 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6261 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6262 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6263 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6264 } 6265 6266 SDValue Cmp; 6267 switch (CC) { 6268 default: break; // SETUO etc aren't handled by fsel. 6269 case ISD::SETNE: 6270 std::swap(TV, FV); 6271 case ISD::SETEQ: 6272 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6273 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6274 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6275 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6276 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6277 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6278 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6279 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6280 case ISD::SETULT: 6281 case ISD::SETLT: 6282 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6283 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6284 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6285 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6286 case ISD::SETOGE: 6287 case ISD::SETGE: 6288 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6289 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6290 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6291 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6292 case ISD::SETUGT: 6293 case ISD::SETGT: 6294 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags); 6295 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6296 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6297 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6298 case ISD::SETOLE: 6299 case ISD::SETLE: 6300 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags); 6301 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6302 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6303 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6304 } 6305 return Op; 6306 } 6307 6308 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6309 SelectionDAG &DAG, 6310 const SDLoc &dl) const { 6311 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6312 SDValue Src = Op.getOperand(0); 6313 if (Src.getValueType() == MVT::f32) 6314 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6315 6316 SDValue Tmp; 6317 switch (Op.getSimpleValueType().SimpleTy) { 6318 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6319 case MVT::i32: 6320 Tmp = DAG.getNode( 6321 Op.getOpcode() == ISD::FP_TO_SINT 6322 ? PPCISD::FCTIWZ 6323 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6324 dl, MVT::f64, Src); 6325 break; 6326 case MVT::i64: 6327 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6328 "i64 FP_TO_UINT is supported only with FPCVT"); 6329 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6330 PPCISD::FCTIDUZ, 6331 dl, MVT::f64, Src); 6332 break; 6333 } 6334 6335 // Convert the FP value to an int value through memory. 6336 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6337 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6338 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6339 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6340 MachinePointerInfo MPI = 6341 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6342 6343 // Emit a store to the stack slot. 6344 SDValue Chain; 6345 if (i32Stack) { 6346 MachineFunction &MF = DAG.getMachineFunction(); 6347 MachineMemOperand *MMO = 6348 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6349 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6350 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6351 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6352 } else 6353 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 6354 MPI, false, false, 0); 6355 6356 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6357 // add in a bias on big endian. 6358 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6359 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6360 DAG.getConstant(4, dl, FIPtr.getValueType())); 6361 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6362 } 6363 6364 RLI.Chain = Chain; 6365 RLI.Ptr = FIPtr; 6366 RLI.MPI = MPI; 6367 } 6368 6369 /// \brief Custom lowers floating point to integer conversions to use 6370 /// the direct move instructions available in ISA 2.07 to avoid the 6371 /// need for load/store combinations. 6372 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6373 SelectionDAG &DAG, 6374 const SDLoc &dl) const { 6375 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6376 SDValue Src = Op.getOperand(0); 6377 6378 if (Src.getValueType() == MVT::f32) 6379 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6380 6381 SDValue Tmp; 6382 switch (Op.getSimpleValueType().SimpleTy) { 6383 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6384 case MVT::i32: 6385 Tmp = DAG.getNode( 6386 Op.getOpcode() == ISD::FP_TO_SINT 6387 ? PPCISD::FCTIWZ 6388 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6389 dl, MVT::f64, Src); 6390 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6391 break; 6392 case MVT::i64: 6393 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6394 "i64 FP_TO_UINT is supported only with FPCVT"); 6395 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6396 PPCISD::FCTIDUZ, 6397 dl, MVT::f64, Src); 6398 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6399 break; 6400 } 6401 return Tmp; 6402 } 6403 6404 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6405 const SDLoc &dl) const { 6406 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6407 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6408 6409 ReuseLoadInfo RLI; 6410 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6411 6412 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6413 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6414 RLI.Ranges); 6415 } 6416 6417 // We're trying to insert a regular store, S, and then a load, L. If the 6418 // incoming value, O, is a load, we might just be able to have our load use the 6419 // address used by O. However, we don't know if anything else will store to 6420 // that address before we can load from it. To prevent this situation, we need 6421 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6422 // the same chain operand as O, we create a token factor from the chain results 6423 // of O and L, and we replace all uses of O's chain result with that token 6424 // factor (see spliceIntoChain below for this last part). 6425 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6426 ReuseLoadInfo &RLI, 6427 SelectionDAG &DAG, 6428 ISD::LoadExtType ET) const { 6429 SDLoc dl(Op); 6430 if (ET == ISD::NON_EXTLOAD && 6431 (Op.getOpcode() == ISD::FP_TO_UINT || 6432 Op.getOpcode() == ISD::FP_TO_SINT) && 6433 isOperationLegalOrCustom(Op.getOpcode(), 6434 Op.getOperand(0).getValueType())) { 6435 6436 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6437 return true; 6438 } 6439 6440 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6441 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6442 LD->isNonTemporal()) 6443 return false; 6444 if (LD->getMemoryVT() != MemVT) 6445 return false; 6446 6447 RLI.Ptr = LD->getBasePtr(); 6448 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 6449 assert(LD->getAddressingMode() == ISD::PRE_INC && 6450 "Non-pre-inc AM on PPC?"); 6451 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6452 LD->getOffset()); 6453 } 6454 6455 RLI.Chain = LD->getChain(); 6456 RLI.MPI = LD->getPointerInfo(); 6457 RLI.IsInvariant = LD->isInvariant(); 6458 RLI.Alignment = LD->getAlignment(); 6459 RLI.AAInfo = LD->getAAInfo(); 6460 RLI.Ranges = LD->getRanges(); 6461 6462 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6463 return true; 6464 } 6465 6466 // Given the head of the old chain, ResChain, insert a token factor containing 6467 // it and NewResChain, and make users of ResChain now be users of that token 6468 // factor. 6469 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6470 SDValue NewResChain, 6471 SelectionDAG &DAG) const { 6472 if (!ResChain) 6473 return; 6474 6475 SDLoc dl(NewResChain); 6476 6477 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6478 NewResChain, DAG.getUNDEF(MVT::Other)); 6479 assert(TF.getNode() != NewResChain.getNode() && 6480 "A new TF really is required here"); 6481 6482 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6483 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6484 } 6485 6486 /// \brief Analyze profitability of direct move 6487 /// prefer float load to int load plus direct move 6488 /// when there is no integer use of int load 6489 static bool directMoveIsProfitable(const SDValue &Op) { 6490 SDNode *Origin = Op.getOperand(0).getNode(); 6491 if (Origin->getOpcode() != ISD::LOAD) 6492 return true; 6493 6494 for (SDNode::use_iterator UI = Origin->use_begin(), 6495 UE = Origin->use_end(); 6496 UI != UE; ++UI) { 6497 6498 // Only look at the users of the loaded value. 6499 if (UI.getUse().get().getResNo() != 0) 6500 continue; 6501 6502 if (UI->getOpcode() != ISD::SINT_TO_FP && 6503 UI->getOpcode() != ISD::UINT_TO_FP) 6504 return true; 6505 } 6506 6507 return false; 6508 } 6509 6510 /// \brief Custom lowers integer to floating point conversions to use 6511 /// the direct move instructions available in ISA 2.07 to avoid the 6512 /// need for load/store combinations. 6513 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6514 SelectionDAG &DAG, 6515 const SDLoc &dl) const { 6516 assert((Op.getValueType() == MVT::f32 || 6517 Op.getValueType() == MVT::f64) && 6518 "Invalid floating point type as target of conversion"); 6519 assert(Subtarget.hasFPCVT() && 6520 "Int to FP conversions with direct moves require FPCVT"); 6521 SDValue FP; 6522 SDValue Src = Op.getOperand(0); 6523 bool SinglePrec = Op.getValueType() == MVT::f32; 6524 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6525 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6526 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6527 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6528 6529 if (WordInt) { 6530 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6531 dl, MVT::f64, Src); 6532 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6533 } 6534 else { 6535 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6536 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6537 } 6538 6539 return FP; 6540 } 6541 6542 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6543 SelectionDAG &DAG) const { 6544 SDLoc dl(Op); 6545 6546 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6547 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6548 return SDValue(); 6549 6550 SDValue Value = Op.getOperand(0); 6551 // The values are now known to be -1 (false) or 1 (true). To convert this 6552 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6553 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6554 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6555 6556 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 6557 6558 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6559 6560 if (Op.getValueType() != MVT::v4f64) 6561 Value = DAG.getNode(ISD::FP_ROUND, dl, 6562 Op.getValueType(), Value, 6563 DAG.getIntPtrConstant(1, dl)); 6564 return Value; 6565 } 6566 6567 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6568 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6569 return SDValue(); 6570 6571 if (Op.getOperand(0).getValueType() == MVT::i1) 6572 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6573 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6574 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6575 6576 // If we have direct moves, we can do all the conversion, skip the store/load 6577 // however, without FPCVT we can't do most conversions. 6578 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 6579 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6580 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6581 6582 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6583 "UINT_TO_FP is supported only with FPCVT"); 6584 6585 // If we have FCFIDS, then use it when converting to single-precision. 6586 // Otherwise, convert to double-precision and then round. 6587 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6588 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6589 : PPCISD::FCFIDS) 6590 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6591 : PPCISD::FCFID); 6592 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6593 ? MVT::f32 6594 : MVT::f64; 6595 6596 if (Op.getOperand(0).getValueType() == MVT::i64) { 6597 SDValue SINT = Op.getOperand(0); 6598 // When converting to single-precision, we actually need to convert 6599 // to double-precision first and then round to single-precision. 6600 // To avoid double-rounding effects during that operation, we have 6601 // to prepare the input operand. Bits that might be truncated when 6602 // converting to double-precision are replaced by a bit that won't 6603 // be lost at this stage, but is below the single-precision rounding 6604 // position. 6605 // 6606 // However, if -enable-unsafe-fp-math is in effect, accept double 6607 // rounding to avoid the extra overhead. 6608 if (Op.getValueType() == MVT::f32 && 6609 !Subtarget.hasFPCVT() && 6610 !DAG.getTarget().Options.UnsafeFPMath) { 6611 6612 // Twiddle input to make sure the low 11 bits are zero. (If this 6613 // is the case, we are guaranteed the value will fit into the 53 bit 6614 // mantissa of an IEEE double-precision value without rounding.) 6615 // If any of those low 11 bits were not zero originally, make sure 6616 // bit 12 (value 2048) is set instead, so that the final rounding 6617 // to single-precision gets the correct result. 6618 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6619 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6620 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6621 Round, DAG.getConstant(2047, dl, MVT::i64)); 6622 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6623 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6624 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6625 6626 // However, we cannot use that value unconditionally: if the magnitude 6627 // of the input value is small, the bit-twiddling we did above might 6628 // end up visibly changing the output. Fortunately, in that case, we 6629 // don't need to twiddle bits since the original input will convert 6630 // exactly to double-precision floating-point already. Therefore, 6631 // construct a conditional to use the original value if the top 11 6632 // bits are all sign-bit copies, and use the rounded value computed 6633 // above otherwise. 6634 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6635 SINT, DAG.getConstant(53, dl, MVT::i32)); 6636 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6637 Cond, DAG.getConstant(1, dl, MVT::i64)); 6638 Cond = DAG.getSetCC(dl, MVT::i32, 6639 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6640 6641 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6642 } 6643 6644 ReuseLoadInfo RLI; 6645 SDValue Bits; 6646 6647 MachineFunction &MF = DAG.getMachineFunction(); 6648 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6649 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false, 6650 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo, 6651 RLI.Ranges); 6652 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6653 } else if (Subtarget.hasLFIWAX() && 6654 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6655 MachineMemOperand *MMO = 6656 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6657 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6658 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6659 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6660 DAG.getVTList(MVT::f64, MVT::Other), 6661 Ops, MVT::i32, MMO); 6662 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6663 } else if (Subtarget.hasFPCVT() && 6664 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6665 MachineMemOperand *MMO = 6666 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6667 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6668 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6669 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6670 DAG.getVTList(MVT::f64, MVT::Other), 6671 Ops, MVT::i32, MMO); 6672 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6673 } else if (((Subtarget.hasLFIWAX() && 6674 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6675 (Subtarget.hasFPCVT() && 6676 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6677 SINT.getOperand(0).getValueType() == MVT::i32) { 6678 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6679 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 6680 6681 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6682 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6683 6684 SDValue Store = DAG.getStore( 6685 DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6686 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6687 false, false, 0); 6688 6689 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6690 "Expected an i32 store"); 6691 6692 RLI.Ptr = FIdx; 6693 RLI.Chain = Store; 6694 RLI.MPI = 6695 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6696 RLI.Alignment = 4; 6697 6698 MachineMemOperand *MMO = 6699 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6700 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6701 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6702 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6703 PPCISD::LFIWZX : PPCISD::LFIWAX, 6704 dl, DAG.getVTList(MVT::f64, MVT::Other), 6705 Ops, MVT::i32, MMO); 6706 } else 6707 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6708 6709 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6710 6711 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6712 FP = DAG.getNode(ISD::FP_ROUND, dl, 6713 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 6714 return FP; 6715 } 6716 6717 assert(Op.getOperand(0).getValueType() == MVT::i32 && 6718 "Unhandled INT_TO_FP type in custom expander!"); 6719 // Since we only generate this in 64-bit mode, we can take advantage of 6720 // 64-bit registers. In particular, sign extend the input value into the 6721 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 6722 // then lfd it and fcfid it. 6723 MachineFunction &MF = DAG.getMachineFunction(); 6724 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 6725 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6726 6727 SDValue Ld; 6728 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 6729 ReuseLoadInfo RLI; 6730 bool ReusingLoad; 6731 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 6732 DAG))) { 6733 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 6734 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6735 6736 SDValue Store = DAG.getStore( 6737 DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 6738 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6739 false, false, 0); 6740 6741 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6742 "Expected an i32 store"); 6743 6744 RLI.Ptr = FIdx; 6745 RLI.Chain = Store; 6746 RLI.MPI = 6747 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6748 RLI.Alignment = 4; 6749 } 6750 6751 MachineMemOperand *MMO = 6752 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6753 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6754 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6755 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 6756 PPCISD::LFIWZX : PPCISD::LFIWAX, 6757 dl, DAG.getVTList(MVT::f64, MVT::Other), 6758 Ops, MVT::i32, MMO); 6759 if (ReusingLoad) 6760 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 6761 } else { 6762 assert(Subtarget.isPPC64() && 6763 "i32->FP without LFIWAX supported only on PPC64"); 6764 6765 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 6766 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6767 6768 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 6769 Op.getOperand(0)); 6770 6771 // STD the extended value into the stack slot. 6772 SDValue Store = DAG.getStore( 6773 DAG.getEntryNode(), dl, Ext64, FIdx, 6774 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6775 false, false, 0); 6776 6777 // Load the value as a double. 6778 Ld = DAG.getLoad( 6779 MVT::f64, dl, Store, FIdx, 6780 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx), 6781 false, false, false, 0); 6782 } 6783 6784 // FCFID it and return it. 6785 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 6786 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6787 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 6788 DAG.getIntPtrConstant(0, dl)); 6789 return FP; 6790 } 6791 6792 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6793 SelectionDAG &DAG) const { 6794 SDLoc dl(Op); 6795 /* 6796 The rounding mode is in bits 30:31 of FPSR, and has the following 6797 settings: 6798 00 Round to nearest 6799 01 Round to 0 6800 10 Round to +inf 6801 11 Round to -inf 6802 6803 FLT_ROUNDS, on the other hand, expects the following: 6804 -1 Undefined 6805 0 Round to 0 6806 1 Round to nearest 6807 2 Round to +inf 6808 3 Round to -inf 6809 6810 To perform the conversion, we do: 6811 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 6812 */ 6813 6814 MachineFunction &MF = DAG.getMachineFunction(); 6815 EVT VT = Op.getValueType(); 6816 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 6817 6818 // Save FP Control Word to register 6819 EVT NodeTys[] = { 6820 MVT::f64, // return register 6821 MVT::Glue // unused in this context 6822 }; 6823 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 6824 6825 // Save FP register to stack slot 6826 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 6827 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 6828 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 6829 StackSlot, MachinePointerInfo(), false, false,0); 6830 6831 // Load FP Control Word from low 32 bits of stack slot. 6832 SDValue Four = DAG.getConstant(4, dl, PtrVT); 6833 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 6834 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 6835 false, false, false, 0); 6836 6837 // Transform as necessary 6838 SDValue CWD1 = 6839 DAG.getNode(ISD::AND, dl, MVT::i32, 6840 CWD, DAG.getConstant(3, dl, MVT::i32)); 6841 SDValue CWD2 = 6842 DAG.getNode(ISD::SRL, dl, MVT::i32, 6843 DAG.getNode(ISD::AND, dl, MVT::i32, 6844 DAG.getNode(ISD::XOR, dl, MVT::i32, 6845 CWD, DAG.getConstant(3, dl, MVT::i32)), 6846 DAG.getConstant(3, dl, MVT::i32)), 6847 DAG.getConstant(1, dl, MVT::i32)); 6848 6849 SDValue RetVal = 6850 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 6851 6852 return DAG.getNode((VT.getSizeInBits() < 16 ? 6853 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 6854 } 6855 6856 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6857 EVT VT = Op.getValueType(); 6858 unsigned BitWidth = VT.getSizeInBits(); 6859 SDLoc dl(Op); 6860 assert(Op.getNumOperands() == 3 && 6861 VT == Op.getOperand(1).getValueType() && 6862 "Unexpected SHL!"); 6863 6864 // Expand into a bunch of logical ops. Note that these ops 6865 // depend on the PPC behavior for oversized shift amounts. 6866 SDValue Lo = Op.getOperand(0); 6867 SDValue Hi = Op.getOperand(1); 6868 SDValue Amt = Op.getOperand(2); 6869 EVT AmtVT = Amt.getValueType(); 6870 6871 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6872 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6873 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 6874 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 6875 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 6876 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6877 DAG.getConstant(-BitWidth, dl, AmtVT)); 6878 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 6879 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6880 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 6881 SDValue OutOps[] = { OutLo, OutHi }; 6882 return DAG.getMergeValues(OutOps, dl); 6883 } 6884 6885 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6886 EVT VT = Op.getValueType(); 6887 SDLoc dl(Op); 6888 unsigned BitWidth = VT.getSizeInBits(); 6889 assert(Op.getNumOperands() == 3 && 6890 VT == Op.getOperand(1).getValueType() && 6891 "Unexpected SRL!"); 6892 6893 // Expand into a bunch of logical ops. Note that these ops 6894 // depend on the PPC behavior for oversized shift amounts. 6895 SDValue Lo = Op.getOperand(0); 6896 SDValue Hi = Op.getOperand(1); 6897 SDValue Amt = Op.getOperand(2); 6898 EVT AmtVT = Amt.getValueType(); 6899 6900 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6901 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6902 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6903 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6904 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6905 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6906 DAG.getConstant(-BitWidth, dl, AmtVT)); 6907 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 6908 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6909 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 6910 SDValue OutOps[] = { OutLo, OutHi }; 6911 return DAG.getMergeValues(OutOps, dl); 6912 } 6913 6914 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 6915 SDLoc dl(Op); 6916 EVT VT = Op.getValueType(); 6917 unsigned BitWidth = VT.getSizeInBits(); 6918 assert(Op.getNumOperands() == 3 && 6919 VT == Op.getOperand(1).getValueType() && 6920 "Unexpected SRA!"); 6921 6922 // Expand into a bunch of logical ops, followed by a select_cc. 6923 SDValue Lo = Op.getOperand(0); 6924 SDValue Hi = Op.getOperand(1); 6925 SDValue Amt = Op.getOperand(2); 6926 EVT AmtVT = Amt.getValueType(); 6927 6928 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6929 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6930 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6931 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6932 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6933 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6934 DAG.getConstant(-BitWidth, dl, AmtVT)); 6935 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 6936 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 6937 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 6938 Tmp4, Tmp6, ISD::SETLE); 6939 SDValue OutOps[] = { OutLo, OutHi }; 6940 return DAG.getMergeValues(OutOps, dl); 6941 } 6942 6943 //===----------------------------------------------------------------------===// 6944 // Vector related lowering. 6945 // 6946 6947 /// BuildSplatI - Build a canonical splati of Val with an element size of 6948 /// SplatSize. Cast the result to VT. 6949 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 6950 SelectionDAG &DAG, const SDLoc &dl) { 6951 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 6952 6953 static const MVT VTys[] = { // canonical VT to use for each size. 6954 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 6955 }; 6956 6957 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 6958 6959 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 6960 if (Val == -1) 6961 SplatSize = 1; 6962 6963 EVT CanonicalVT = VTys[SplatSize-1]; 6964 6965 // Build a canonical splat for this value. 6966 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 6967 } 6968 6969 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 6970 /// specified intrinsic ID. 6971 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 6972 const SDLoc &dl, EVT DestVT = MVT::Other) { 6973 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 6974 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6975 DAG.getConstant(IID, dl, MVT::i32), Op); 6976 } 6977 6978 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 6979 /// specified intrinsic ID. 6980 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 6981 SelectionDAG &DAG, const SDLoc &dl, 6982 EVT DestVT = MVT::Other) { 6983 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 6984 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6985 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 6986 } 6987 6988 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 6989 /// specified intrinsic ID. 6990 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 6991 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 6992 EVT DestVT = MVT::Other) { 6993 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 6994 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 6995 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 6996 } 6997 6998 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 6999 /// amount. The result has the specified value type. 7000 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7001 SelectionDAG &DAG, const SDLoc &dl) { 7002 // Force LHS/RHS to be the right type. 7003 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7004 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7005 7006 int Ops[16]; 7007 for (unsigned i = 0; i != 16; ++i) 7008 Ops[i] = i + Amt; 7009 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7010 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7011 } 7012 7013 // If this is a case we can't handle, return null and let the default 7014 // expansion code take care of it. If we CAN select this case, and if it 7015 // selects to a single instruction, return Op. Otherwise, if we can codegen 7016 // this case more efficiently than a constant pool load, lower it to the 7017 // sequence of ops that should be used. 7018 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7019 SelectionDAG &DAG) const { 7020 SDLoc dl(Op); 7021 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7022 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7023 7024 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7025 // We first build an i32 vector, load it into a QPX register, 7026 // then convert it to a floating-point vector and compare it 7027 // to a zero vector to get the boolean result. 7028 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7029 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7030 MachinePointerInfo PtrInfo = 7031 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7032 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7033 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7034 7035 assert(BVN->getNumOperands() == 4 && 7036 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7037 7038 bool IsConst = true; 7039 for (unsigned i = 0; i < 4; ++i) { 7040 if (BVN->getOperand(i).isUndef()) continue; 7041 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7042 IsConst = false; 7043 break; 7044 } 7045 } 7046 7047 if (IsConst) { 7048 Constant *One = 7049 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7050 Constant *NegOne = 7051 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7052 7053 SmallVector<Constant*, 4> CV(4, NegOne); 7054 for (unsigned i = 0; i < 4; ++i) { 7055 if (BVN->getOperand(i).isUndef()) 7056 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7057 else if (isNullConstant(BVN->getOperand(i))) 7058 continue; 7059 else 7060 CV[i] = One; 7061 } 7062 7063 Constant *CP = ConstantVector::get(CV); 7064 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7065 16 /* alignment */); 7066 7067 SmallVector<SDValue, 2> Ops; 7068 Ops.push_back(DAG.getEntryNode()); 7069 Ops.push_back(CPIdx); 7070 7071 SmallVector<EVT, 2> ValueVTs; 7072 ValueVTs.push_back(MVT::v4i1); 7073 ValueVTs.push_back(MVT::Other); // chain 7074 SDVTList VTs = DAG.getVTList(ValueVTs); 7075 7076 return DAG.getMemIntrinsicNode( 7077 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7078 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7079 } 7080 7081 SmallVector<SDValue, 4> Stores; 7082 for (unsigned i = 0; i < 4; ++i) { 7083 if (BVN->getOperand(i).isUndef()) continue; 7084 7085 unsigned Offset = 4*i; 7086 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7087 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7088 7089 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7090 if (StoreSize > 4) { 7091 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 7092 BVN->getOperand(i), Idx, 7093 PtrInfo.getWithOffset(Offset), 7094 MVT::i32, false, false, 0)); 7095 } else { 7096 SDValue StoreValue = BVN->getOperand(i); 7097 if (StoreSize < 4) 7098 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7099 7100 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 7101 StoreValue, Idx, 7102 PtrInfo.getWithOffset(Offset), 7103 false, false, 0)); 7104 } 7105 } 7106 7107 SDValue StoreChain; 7108 if (!Stores.empty()) 7109 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7110 else 7111 StoreChain = DAG.getEntryNode(); 7112 7113 // Now load from v4i32 into the QPX register; this will extend it to 7114 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7115 // is typed as v4f64 because the QPX register integer states are not 7116 // explicitly represented. 7117 7118 SmallVector<SDValue, 2> Ops; 7119 Ops.push_back(StoreChain); 7120 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32)); 7121 Ops.push_back(FIdx); 7122 7123 SmallVector<EVT, 2> ValueVTs; 7124 ValueVTs.push_back(MVT::v4f64); 7125 ValueVTs.push_back(MVT::Other); // chain 7126 SDVTList VTs = DAG.getVTList(ValueVTs); 7127 7128 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7129 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7130 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7131 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7132 LoadedVect); 7133 7134 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7135 7136 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7137 } 7138 7139 // All other QPX vectors are handled by generic code. 7140 if (Subtarget.hasQPX()) 7141 return SDValue(); 7142 7143 // Check if this is a splat of a constant value. 7144 APInt APSplatBits, APSplatUndef; 7145 unsigned SplatBitSize; 7146 bool HasAnyUndefs; 7147 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7148 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7149 SplatBitSize > 32) 7150 return SDValue(); 7151 7152 unsigned SplatBits = APSplatBits.getZExtValue(); 7153 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7154 unsigned SplatSize = SplatBitSize / 8; 7155 7156 // First, handle single instruction cases. 7157 7158 // All zeros? 7159 if (SplatBits == 0) { 7160 // Canonicalize all zero vectors to be v4i32. 7161 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7162 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7163 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7164 } 7165 return Op; 7166 } 7167 7168 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7169 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7170 (32-SplatBitSize)); 7171 if (SextVal >= -16 && SextVal <= 15) 7172 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7173 7174 // Two instruction sequences. 7175 7176 // If this value is in the range [-32,30] and is even, use: 7177 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7178 // If this value is in the range [17,31] and is odd, use: 7179 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7180 // If this value is in the range [-31,-17] and is odd, use: 7181 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7182 // Note the last two are three-instruction sequences. 7183 if (SextVal >= -32 && SextVal <= 31) { 7184 // To avoid having these optimizations undone by constant folding, 7185 // we convert to a pseudo that will be expanded later into one of 7186 // the above forms. 7187 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7188 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7189 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7190 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7191 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7192 if (VT == Op.getValueType()) 7193 return RetVal; 7194 else 7195 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7196 } 7197 7198 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7199 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7200 // for fneg/fabs. 7201 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7202 // Make -1 and vspltisw -1: 7203 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7204 7205 // Make the VSLW intrinsic, computing 0x8000_0000. 7206 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7207 OnesV, DAG, dl); 7208 7209 // xor by OnesV to invert it. 7210 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7211 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7212 } 7213 7214 // Check to see if this is a wide variety of vsplti*, binop self cases. 7215 static const signed char SplatCsts[] = { 7216 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7217 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7218 }; 7219 7220 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7221 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7222 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7223 int i = SplatCsts[idx]; 7224 7225 // Figure out what shift amount will be used by altivec if shifted by i in 7226 // this splat size. 7227 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7228 7229 // vsplti + shl self. 7230 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7231 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7232 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7233 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7234 Intrinsic::ppc_altivec_vslw 7235 }; 7236 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7237 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7238 } 7239 7240 // vsplti + srl self. 7241 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7242 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7243 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7244 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7245 Intrinsic::ppc_altivec_vsrw 7246 }; 7247 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7248 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7249 } 7250 7251 // vsplti + sra self. 7252 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7253 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7254 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7255 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7256 Intrinsic::ppc_altivec_vsraw 7257 }; 7258 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7259 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7260 } 7261 7262 // vsplti + rol self. 7263 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7264 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7265 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7266 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7267 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7268 Intrinsic::ppc_altivec_vrlw 7269 }; 7270 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7271 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7272 } 7273 7274 // t = vsplti c, result = vsldoi t, t, 1 7275 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7276 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7277 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7278 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7279 } 7280 // t = vsplti c, result = vsldoi t, t, 2 7281 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7282 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7283 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7284 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7285 } 7286 // t = vsplti c, result = vsldoi t, t, 3 7287 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7288 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7289 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7290 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7291 } 7292 } 7293 7294 return SDValue(); 7295 } 7296 7297 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7298 /// the specified operations to build the shuffle. 7299 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7300 SDValue RHS, SelectionDAG &DAG, 7301 const SDLoc &dl) { 7302 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7303 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7304 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7305 7306 enum { 7307 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7308 OP_VMRGHW, 7309 OP_VMRGLW, 7310 OP_VSPLTISW0, 7311 OP_VSPLTISW1, 7312 OP_VSPLTISW2, 7313 OP_VSPLTISW3, 7314 OP_VSLDOI4, 7315 OP_VSLDOI8, 7316 OP_VSLDOI12 7317 }; 7318 7319 if (OpNum == OP_COPY) { 7320 if (LHSID == (1*9+2)*9+3) return LHS; 7321 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7322 return RHS; 7323 } 7324 7325 SDValue OpLHS, OpRHS; 7326 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7327 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7328 7329 int ShufIdxs[16]; 7330 switch (OpNum) { 7331 default: llvm_unreachable("Unknown i32 permute!"); 7332 case OP_VMRGHW: 7333 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7334 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7335 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7336 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7337 break; 7338 case OP_VMRGLW: 7339 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7340 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7341 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7342 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7343 break; 7344 case OP_VSPLTISW0: 7345 for (unsigned i = 0; i != 16; ++i) 7346 ShufIdxs[i] = (i&3)+0; 7347 break; 7348 case OP_VSPLTISW1: 7349 for (unsigned i = 0; i != 16; ++i) 7350 ShufIdxs[i] = (i&3)+4; 7351 break; 7352 case OP_VSPLTISW2: 7353 for (unsigned i = 0; i != 16; ++i) 7354 ShufIdxs[i] = (i&3)+8; 7355 break; 7356 case OP_VSPLTISW3: 7357 for (unsigned i = 0; i != 16; ++i) 7358 ShufIdxs[i] = (i&3)+12; 7359 break; 7360 case OP_VSLDOI4: 7361 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7362 case OP_VSLDOI8: 7363 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7364 case OP_VSLDOI12: 7365 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7366 } 7367 EVT VT = OpLHS.getValueType(); 7368 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7369 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7370 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7371 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7372 } 7373 7374 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7375 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7376 /// return the code it can be lowered into. Worst case, it can always be 7377 /// lowered into a vperm. 7378 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7379 SelectionDAG &DAG) const { 7380 SDLoc dl(Op); 7381 SDValue V1 = Op.getOperand(0); 7382 SDValue V2 = Op.getOperand(1); 7383 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7384 EVT VT = Op.getValueType(); 7385 bool isLittleEndian = Subtarget.isLittleEndian(); 7386 7387 if (Subtarget.hasVSX()) { 7388 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 7389 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 7390 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7391 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 7392 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7393 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 7394 } 7395 } 7396 7397 if (Subtarget.hasQPX()) { 7398 if (VT.getVectorNumElements() != 4) 7399 return SDValue(); 7400 7401 if (V2.isUndef()) V2 = V1; 7402 7403 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 7404 if (AlignIdx != -1) { 7405 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7406 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7407 } else if (SVOp->isSplat()) { 7408 int SplatIdx = SVOp->getSplatIndex(); 7409 if (SplatIdx >= 4) { 7410 std::swap(V1, V2); 7411 SplatIdx -= 4; 7412 } 7413 7414 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7415 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7416 } 7417 7418 // Lower this into a qvgpci/qvfperm pair. 7419 7420 // Compute the qvgpci literal 7421 unsigned idx = 0; 7422 for (unsigned i = 0; i < 4; ++i) { 7423 int m = SVOp->getMaskElt(i); 7424 unsigned mm = m >= 0 ? (unsigned) m : i; 7425 idx |= mm << (3-i)*3; 7426 } 7427 7428 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7429 DAG.getConstant(idx, dl, MVT::i32)); 7430 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7431 } 7432 7433 // Cases that are handled by instructions that take permute immediates 7434 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7435 // selected by the instruction selector. 7436 if (V2.isUndef()) { 7437 if (PPC::isSplatShuffleMask(SVOp, 1) || 7438 PPC::isSplatShuffleMask(SVOp, 2) || 7439 PPC::isSplatShuffleMask(SVOp, 4) || 7440 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7441 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7442 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7443 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7444 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7445 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7446 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7447 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7448 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 7449 (Subtarget.hasP8Altivec() && ( 7450 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7451 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 7452 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 7453 return Op; 7454 } 7455 } 7456 7457 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7458 // and produce a fixed permutation. If any of these match, do not lower to 7459 // VPERM. 7460 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7461 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7462 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7463 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7464 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7465 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7466 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7467 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7468 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7469 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7470 (Subtarget.hasP8Altivec() && ( 7471 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7472 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 7473 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 7474 return Op; 7475 7476 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7477 // perfect shuffle table to emit an optimal matching sequence. 7478 ArrayRef<int> PermMask = SVOp->getMask(); 7479 7480 unsigned PFIndexes[4]; 7481 bool isFourElementShuffle = true; 7482 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7483 unsigned EltNo = 8; // Start out undef. 7484 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7485 if (PermMask[i*4+j] < 0) 7486 continue; // Undef, ignore it. 7487 7488 unsigned ByteSource = PermMask[i*4+j]; 7489 if ((ByteSource & 3) != j) { 7490 isFourElementShuffle = false; 7491 break; 7492 } 7493 7494 if (EltNo == 8) { 7495 EltNo = ByteSource/4; 7496 } else if (EltNo != ByteSource/4) { 7497 isFourElementShuffle = false; 7498 break; 7499 } 7500 } 7501 PFIndexes[i] = EltNo; 7502 } 7503 7504 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7505 // perfect shuffle vector to determine if it is cost effective to do this as 7506 // discrete instructions, or whether we should use a vperm. 7507 // For now, we skip this for little endian until such time as we have a 7508 // little-endian perfect shuffle table. 7509 if (isFourElementShuffle && !isLittleEndian) { 7510 // Compute the index in the perfect shuffle table. 7511 unsigned PFTableIndex = 7512 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7513 7514 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7515 unsigned Cost = (PFEntry >> 30); 7516 7517 // Determining when to avoid vperm is tricky. Many things affect the cost 7518 // of vperm, particularly how many times the perm mask needs to be computed. 7519 // For example, if the perm mask can be hoisted out of a loop or is already 7520 // used (perhaps because there are multiple permutes with the same shuffle 7521 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7522 // the loop requires an extra register. 7523 // 7524 // As a compromise, we only emit discrete instructions if the shuffle can be 7525 // generated in 3 or fewer operations. When we have loop information 7526 // available, if this block is within a loop, we should avoid using vperm 7527 // for 3-operation perms and use a constant pool load instead. 7528 if (Cost < 3) 7529 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7530 } 7531 7532 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7533 // vector that will get spilled to the constant pool. 7534 if (V2.isUndef()) V2 = V1; 7535 7536 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7537 // that it is in input element units, not in bytes. Convert now. 7538 7539 // For little endian, the order of the input vectors is reversed, and 7540 // the permutation mask is complemented with respect to 31. This is 7541 // necessary to produce proper semantics with the big-endian-biased vperm 7542 // instruction. 7543 EVT EltVT = V1.getValueType().getVectorElementType(); 7544 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7545 7546 SmallVector<SDValue, 16> ResultMask; 7547 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7548 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7549 7550 for (unsigned j = 0; j != BytesPerElement; ++j) 7551 if (isLittleEndian) 7552 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7553 dl, MVT::i32)); 7554 else 7555 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7556 MVT::i32)); 7557 } 7558 7559 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 7560 if (isLittleEndian) 7561 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7562 V2, V1, VPermMask); 7563 else 7564 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7565 V1, V2, VPermMask); 7566 } 7567 7568 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 7569 /// vector comparison. If it is, return true and fill in Opc/isDot with 7570 /// information about the intrinsic. 7571 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 7572 bool &isDot, const PPCSubtarget &Subtarget) { 7573 unsigned IntrinsicID = 7574 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7575 CompareOpc = -1; 7576 isDot = false; 7577 switch (IntrinsicID) { 7578 default: return false; 7579 // Comparison predicates. 7580 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 7581 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 7582 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 7583 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 7584 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 7585 case Intrinsic::ppc_altivec_vcmpequd_p: 7586 if (Subtarget.hasP8Altivec()) { 7587 CompareOpc = 199; 7588 isDot = 1; 7589 } else 7590 return false; 7591 7592 break; 7593 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 7594 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 7595 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 7596 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 7597 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 7598 case Intrinsic::ppc_altivec_vcmpgtsd_p: 7599 if (Subtarget.hasP8Altivec()) { 7600 CompareOpc = 967; 7601 isDot = 1; 7602 } else 7603 return false; 7604 7605 break; 7606 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 7607 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 7608 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 7609 case Intrinsic::ppc_altivec_vcmpgtud_p: 7610 if (Subtarget.hasP8Altivec()) { 7611 CompareOpc = 711; 7612 isDot = 1; 7613 } else 7614 return false; 7615 7616 break; 7617 // VSX predicate comparisons use the same infrastructure 7618 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 7619 case Intrinsic::ppc_vsx_xvcmpgedp_p: 7620 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 7621 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 7622 case Intrinsic::ppc_vsx_xvcmpgesp_p: 7623 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 7624 if (Subtarget.hasVSX()) { 7625 switch (IntrinsicID) { 7626 case Intrinsic::ppc_vsx_xvcmpeqdp_p: CompareOpc = 99; break; 7627 case Intrinsic::ppc_vsx_xvcmpgedp_p: CompareOpc = 115; break; 7628 case Intrinsic::ppc_vsx_xvcmpgtdp_p: CompareOpc = 107; break; 7629 case Intrinsic::ppc_vsx_xvcmpeqsp_p: CompareOpc = 67; break; 7630 case Intrinsic::ppc_vsx_xvcmpgesp_p: CompareOpc = 83; break; 7631 case Intrinsic::ppc_vsx_xvcmpgtsp_p: CompareOpc = 75; break; 7632 } 7633 isDot = 1; 7634 } 7635 else 7636 return false; 7637 7638 break; 7639 7640 // Normal Comparisons. 7641 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 7642 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 7643 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 7644 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 7645 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 7646 case Intrinsic::ppc_altivec_vcmpequd: 7647 if (Subtarget.hasP8Altivec()) { 7648 CompareOpc = 199; 7649 isDot = 0; 7650 } else 7651 return false; 7652 7653 break; 7654 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 7655 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 7656 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 7657 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 7658 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 7659 case Intrinsic::ppc_altivec_vcmpgtsd: 7660 if (Subtarget.hasP8Altivec()) { 7661 CompareOpc = 967; 7662 isDot = 0; 7663 } else 7664 return false; 7665 7666 break; 7667 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 7668 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 7669 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 7670 case Intrinsic::ppc_altivec_vcmpgtud: 7671 if (Subtarget.hasP8Altivec()) { 7672 CompareOpc = 711; 7673 isDot = 0; 7674 } else 7675 return false; 7676 7677 break; 7678 } 7679 return true; 7680 } 7681 7682 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 7683 /// lower, do it, otherwise return null. 7684 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 7685 SelectionDAG &DAG) const { 7686 unsigned IntrinsicID = 7687 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7688 7689 if (IntrinsicID == Intrinsic::thread_pointer) { 7690 // Reads the thread pointer register, used for __builtin_thread_pointer. 7691 bool is64bit = Subtarget.isPPC64(); 7692 return DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 7693 is64bit ? MVT::i64 : MVT::i32); 7694 } 7695 7696 // If this is a lowered altivec predicate compare, CompareOpc is set to the 7697 // opcode number of the comparison. 7698 SDLoc dl(Op); 7699 int CompareOpc; 7700 bool isDot; 7701 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 7702 return SDValue(); // Don't custom lower most intrinsics. 7703 7704 // If this is a non-dot comparison, make the VCMP node and we are done. 7705 if (!isDot) { 7706 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 7707 Op.getOperand(1), Op.getOperand(2), 7708 DAG.getConstant(CompareOpc, dl, MVT::i32)); 7709 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 7710 } 7711 7712 // Create the PPCISD altivec 'dot' comparison node. 7713 SDValue Ops[] = { 7714 Op.getOperand(2), // LHS 7715 Op.getOperand(3), // RHS 7716 DAG.getConstant(CompareOpc, dl, MVT::i32) 7717 }; 7718 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 7719 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 7720 7721 // Now that we have the comparison, emit a copy from the CR to a GPR. 7722 // This is flagged to the above dot comparison. 7723 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 7724 DAG.getRegister(PPC::CR6, MVT::i32), 7725 CompNode.getValue(1)); 7726 7727 // Unpack the result based on how the target uses it. 7728 unsigned BitNo; // Bit # of CR6. 7729 bool InvertBit; // Invert result? 7730 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 7731 default: // Can't happen, don't crash on invalid number though. 7732 case 0: // Return the value of the EQ bit of CR6. 7733 BitNo = 0; InvertBit = false; 7734 break; 7735 case 1: // Return the inverted value of the EQ bit of CR6. 7736 BitNo = 0; InvertBit = true; 7737 break; 7738 case 2: // Return the value of the LT bit of CR6. 7739 BitNo = 2; InvertBit = false; 7740 break; 7741 case 3: // Return the inverted value of the LT bit of CR6. 7742 BitNo = 2; InvertBit = true; 7743 break; 7744 } 7745 7746 // Shift the bit into the low position. 7747 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 7748 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 7749 // Isolate the bit. 7750 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 7751 DAG.getConstant(1, dl, MVT::i32)); 7752 7753 // If we are supposed to, toggle the bit. 7754 if (InvertBit) 7755 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 7756 DAG.getConstant(1, dl, MVT::i32)); 7757 return Flags; 7758 } 7759 7760 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 7761 SelectionDAG &DAG) const { 7762 SDLoc dl(Op); 7763 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 7764 // instructions), but for smaller types, we need to first extend up to v2i32 7765 // before doing going farther. 7766 if (Op.getValueType() == MVT::v2i64) { 7767 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 7768 if (ExtVT != MVT::v2i32) { 7769 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 7770 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 7771 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 7772 ExtVT.getVectorElementType(), 4))); 7773 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 7774 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 7775 DAG.getValueType(MVT::v2i32)); 7776 } 7777 7778 return Op; 7779 } 7780 7781 return SDValue(); 7782 } 7783 7784 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 7785 SelectionDAG &DAG) const { 7786 SDLoc dl(Op); 7787 // Create a stack slot that is 16-byte aligned. 7788 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7789 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7790 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7791 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7792 7793 // Store the input value into Value#0 of the stack slot. 7794 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 7795 Op.getOperand(0), FIdx, MachinePointerInfo(), 7796 false, false, 0); 7797 // Load it out. 7798 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 7799 false, false, false, 0); 7800 } 7801 7802 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7803 SelectionDAG &DAG) const { 7804 SDLoc dl(Op); 7805 SDNode *N = Op.getNode(); 7806 7807 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 7808 "Unknown extract_vector_elt type"); 7809 7810 SDValue Value = N->getOperand(0); 7811 7812 // The first part of this is like the store lowering except that we don't 7813 // need to track the chain. 7814 7815 // The values are now known to be -1 (false) or 1 (true). To convert this 7816 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7817 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7818 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7819 7820 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7821 // understand how to form the extending load. 7822 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7823 7824 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7825 7826 // Now convert to an integer and store. 7827 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7828 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7829 Value); 7830 7831 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 7832 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 7833 MachinePointerInfo PtrInfo = 7834 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7835 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7836 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7837 7838 SDValue StoreChain = DAG.getEntryNode(); 7839 SmallVector<SDValue, 2> Ops; 7840 Ops.push_back(StoreChain); 7841 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 7842 Ops.push_back(Value); 7843 Ops.push_back(FIdx); 7844 7845 SmallVector<EVT, 2> ValueVTs; 7846 ValueVTs.push_back(MVT::Other); // chain 7847 SDVTList VTs = DAG.getVTList(ValueVTs); 7848 7849 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7850 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7851 7852 // Extract the value requested. 7853 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7854 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7855 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7856 7857 SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 7858 PtrInfo.getWithOffset(Offset), 7859 false, false, false, 0); 7860 7861 if (!Subtarget.useCRBits()) 7862 return IntVal; 7863 7864 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 7865 } 7866 7867 /// Lowering for QPX v4i1 loads 7868 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 7869 SelectionDAG &DAG) const { 7870 SDLoc dl(Op); 7871 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 7872 SDValue LoadChain = LN->getChain(); 7873 SDValue BasePtr = LN->getBasePtr(); 7874 7875 if (Op.getValueType() == MVT::v4f64 || 7876 Op.getValueType() == MVT::v4f32) { 7877 EVT MemVT = LN->getMemoryVT(); 7878 unsigned Alignment = LN->getAlignment(); 7879 7880 // If this load is properly aligned, then it is legal. 7881 if (Alignment >= MemVT.getStoreSize()) 7882 return Op; 7883 7884 EVT ScalarVT = Op.getValueType().getScalarType(), 7885 ScalarMemVT = MemVT.getScalarType(); 7886 unsigned Stride = ScalarMemVT.getStoreSize(); 7887 7888 SmallVector<SDValue, 8> Vals, LoadChains; 7889 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7890 SDValue Load; 7891 if (ScalarVT != ScalarMemVT) 7892 Load = 7893 DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 7894 BasePtr, 7895 LN->getPointerInfo().getWithOffset(Idx*Stride), 7896 ScalarMemVT, LN->isVolatile(), LN->isNonTemporal(), 7897 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7898 LN->getAAInfo()); 7899 else 7900 Load = 7901 DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 7902 LN->getPointerInfo().getWithOffset(Idx*Stride), 7903 LN->isVolatile(), LN->isNonTemporal(), 7904 LN->isInvariant(), MinAlign(Alignment, Idx*Stride), 7905 LN->getAAInfo()); 7906 7907 if (Idx == 0 && LN->isIndexed()) { 7908 assert(LN->getAddressingMode() == ISD::PRE_INC && 7909 "Unknown addressing mode on vector load"); 7910 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 7911 LN->getAddressingMode()); 7912 } 7913 7914 Vals.push_back(Load); 7915 LoadChains.push_back(Load.getValue(1)); 7916 7917 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7918 DAG.getConstant(Stride, dl, 7919 BasePtr.getValueType())); 7920 } 7921 7922 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7923 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 7924 7925 if (LN->isIndexed()) { 7926 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 7927 return DAG.getMergeValues(RetOps, dl); 7928 } 7929 7930 SDValue RetOps[] = { Value, TF }; 7931 return DAG.getMergeValues(RetOps, dl); 7932 } 7933 7934 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 7935 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 7936 7937 // To lower v4i1 from a byte array, we load the byte elements of the 7938 // vector and then reuse the BUILD_VECTOR logic. 7939 7940 SmallVector<SDValue, 4> VectElmts, VectElmtChains; 7941 for (unsigned i = 0; i < 4; ++i) { 7942 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7943 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 7944 7945 VectElmts.push_back(DAG.getExtLoad(ISD::EXTLOAD, 7946 dl, MVT::i32, LoadChain, Idx, 7947 LN->getPointerInfo().getWithOffset(i), 7948 MVT::i8 /* memory type */, 7949 LN->isVolatile(), LN->isNonTemporal(), 7950 LN->isInvariant(), 7951 1 /* alignment */, LN->getAAInfo())); 7952 VectElmtChains.push_back(VectElmts[i].getValue(1)); 7953 } 7954 7955 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 7956 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 7957 7958 SDValue RVals[] = { Value, LoadChain }; 7959 return DAG.getMergeValues(RVals, dl); 7960 } 7961 7962 /// Lowering for QPX v4i1 stores 7963 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 7964 SelectionDAG &DAG) const { 7965 SDLoc dl(Op); 7966 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 7967 SDValue StoreChain = SN->getChain(); 7968 SDValue BasePtr = SN->getBasePtr(); 7969 SDValue Value = SN->getValue(); 7970 7971 if (Value.getValueType() == MVT::v4f64 || 7972 Value.getValueType() == MVT::v4f32) { 7973 EVT MemVT = SN->getMemoryVT(); 7974 unsigned Alignment = SN->getAlignment(); 7975 7976 // If this store is properly aligned, then it is legal. 7977 if (Alignment >= MemVT.getStoreSize()) 7978 return Op; 7979 7980 EVT ScalarVT = Value.getValueType().getScalarType(), 7981 ScalarMemVT = MemVT.getScalarType(); 7982 unsigned Stride = ScalarMemVT.getStoreSize(); 7983 7984 SmallVector<SDValue, 8> Stores; 7985 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7986 SDValue Ex = DAG.getNode( 7987 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 7988 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 7989 SDValue Store; 7990 if (ScalarVT != ScalarMemVT) 7991 Store = 7992 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 7993 SN->getPointerInfo().getWithOffset(Idx*Stride), 7994 ScalarMemVT, SN->isVolatile(), SN->isNonTemporal(), 7995 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 7996 else 7997 Store = 7998 DAG.getStore(StoreChain, dl, Ex, BasePtr, 7999 SN->getPointerInfo().getWithOffset(Idx*Stride), 8000 SN->isVolatile(), SN->isNonTemporal(), 8001 MinAlign(Alignment, Idx*Stride), SN->getAAInfo()); 8002 8003 if (Idx == 0 && SN->isIndexed()) { 8004 assert(SN->getAddressingMode() == ISD::PRE_INC && 8005 "Unknown addressing mode on vector store"); 8006 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 8007 SN->getAddressingMode()); 8008 } 8009 8010 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8011 DAG.getConstant(Stride, dl, 8012 BasePtr.getValueType())); 8013 Stores.push_back(Store); 8014 } 8015 8016 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8017 8018 if (SN->isIndexed()) { 8019 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 8020 return DAG.getMergeValues(RetOps, dl); 8021 } 8022 8023 return TF; 8024 } 8025 8026 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 8027 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 8028 8029 // The values are now known to be -1 (false) or 1 (true). To convert this 8030 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8031 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8032 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8033 8034 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8035 // understand how to form the extending load. 8036 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8037 8038 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8039 8040 // Now convert to an integer and store. 8041 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8042 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8043 Value); 8044 8045 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 8046 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 8047 MachinePointerInfo PtrInfo = 8048 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8049 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8050 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8051 8052 SmallVector<SDValue, 2> Ops; 8053 Ops.push_back(StoreChain); 8054 Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32)); 8055 Ops.push_back(Value); 8056 Ops.push_back(FIdx); 8057 8058 SmallVector<EVT, 2> ValueVTs; 8059 ValueVTs.push_back(MVT::Other); // chain 8060 SDVTList VTs = DAG.getVTList(ValueVTs); 8061 8062 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8063 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8064 8065 // Move data into the byte array. 8066 SmallVector<SDValue, 4> Loads, LoadChains; 8067 for (unsigned i = 0; i < 4; ++i) { 8068 unsigned Offset = 4*i; 8069 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8070 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8071 8072 Loads.push_back(DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 8073 PtrInfo.getWithOffset(Offset), 8074 false, false, false, 0)); 8075 LoadChains.push_back(Loads[i].getValue(1)); 8076 } 8077 8078 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8079 8080 SmallVector<SDValue, 4> Stores; 8081 for (unsigned i = 0; i < 4; ++i) { 8082 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8083 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8084 8085 Stores.push_back(DAG.getTruncStore( 8086 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 8087 MVT::i8 /* memory type */, SN->isNonTemporal(), SN->isVolatile(), 8088 1 /* alignment */, SN->getAAInfo())); 8089 } 8090 8091 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8092 8093 return StoreChain; 8094 } 8095 8096 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 8097 SDLoc dl(Op); 8098 if (Op.getValueType() == MVT::v4i32) { 8099 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8100 8101 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 8102 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 8103 8104 SDValue RHSSwap = // = vrlw RHS, 16 8105 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 8106 8107 // Shrinkify inputs to v8i16. 8108 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 8109 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 8110 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 8111 8112 // Low parts multiplied together, generating 32-bit results (we ignore the 8113 // top parts). 8114 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 8115 LHS, RHS, DAG, dl, MVT::v4i32); 8116 8117 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 8118 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 8119 // Shift the high parts up 16 bits. 8120 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 8121 Neg16, DAG, dl); 8122 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 8123 } else if (Op.getValueType() == MVT::v8i16) { 8124 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8125 8126 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 8127 8128 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 8129 LHS, RHS, Zero, DAG, dl); 8130 } else if (Op.getValueType() == MVT::v16i8) { 8131 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8132 bool isLittleEndian = Subtarget.isLittleEndian(); 8133 8134 // Multiply the even 8-bit parts, producing 16-bit sums. 8135 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 8136 LHS, RHS, DAG, dl, MVT::v8i16); 8137 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 8138 8139 // Multiply the odd 8-bit parts, producing 16-bit sums. 8140 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 8141 LHS, RHS, DAG, dl, MVT::v8i16); 8142 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 8143 8144 // Merge the results together. Because vmuleub and vmuloub are 8145 // instructions with a big-endian bias, we must reverse the 8146 // element numbering and reverse the meaning of "odd" and "even" 8147 // when generating little endian code. 8148 int Ops[16]; 8149 for (unsigned i = 0; i != 8; ++i) { 8150 if (isLittleEndian) { 8151 Ops[i*2 ] = 2*i; 8152 Ops[i*2+1] = 2*i+16; 8153 } else { 8154 Ops[i*2 ] = 2*i+1; 8155 Ops[i*2+1] = 2*i+1+16; 8156 } 8157 } 8158 if (isLittleEndian) 8159 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 8160 else 8161 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 8162 } else { 8163 llvm_unreachable("Unknown mul to lower!"); 8164 } 8165 } 8166 8167 /// LowerOperation - Provide custom lowering hooks for some operations. 8168 /// 8169 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8170 switch (Op.getOpcode()) { 8171 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 8172 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 8173 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 8174 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 8175 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 8176 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 8177 case ISD::SETCC: return LowerSETCC(Op, DAG); 8178 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 8179 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 8180 case ISD::VASTART: 8181 return LowerVASTART(Op, DAG, Subtarget); 8182 8183 case ISD::VAARG: 8184 return LowerVAARG(Op, DAG, Subtarget); 8185 8186 case ISD::VACOPY: 8187 return LowerVACOPY(Op, DAG, Subtarget); 8188 8189 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget); 8190 case ISD::DYNAMIC_STACKALLOC: 8191 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget); 8192 case ISD::GET_DYNAMIC_AREA_OFFSET: return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG, Subtarget); 8193 8194 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 8195 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 8196 8197 case ISD::LOAD: return LowerLOAD(Op, DAG); 8198 case ISD::STORE: return LowerSTORE(Op, DAG); 8199 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 8200 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 8201 case ISD::FP_TO_UINT: 8202 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 8203 SDLoc(Op)); 8204 case ISD::UINT_TO_FP: 8205 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 8206 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 8207 8208 // Lower 64-bit shifts. 8209 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 8210 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 8211 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 8212 8213 // Vector-related lowering. 8214 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 8215 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 8216 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 8217 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 8218 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 8219 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 8220 case ISD::MUL: return LowerMUL(Op, DAG); 8221 8222 // For counter-based loop handling. 8223 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 8224 8225 // Frame & Return address. 8226 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 8227 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 8228 } 8229 } 8230 8231 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 8232 SmallVectorImpl<SDValue>&Results, 8233 SelectionDAG &DAG) const { 8234 SDLoc dl(N); 8235 switch (N->getOpcode()) { 8236 default: 8237 llvm_unreachable("Do not know how to custom type legalize this operation!"); 8238 case ISD::READCYCLECOUNTER: { 8239 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 8240 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 8241 8242 Results.push_back(RTB); 8243 Results.push_back(RTB.getValue(1)); 8244 Results.push_back(RTB.getValue(2)); 8245 break; 8246 } 8247 case ISD::INTRINSIC_W_CHAIN: { 8248 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 8249 Intrinsic::ppc_is_decremented_ctr_nonzero) 8250 break; 8251 8252 assert(N->getValueType(0) == MVT::i1 && 8253 "Unexpected result type for CTR decrement intrinsic"); 8254 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 8255 N->getValueType(0)); 8256 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 8257 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 8258 N->getOperand(1)); 8259 8260 Results.push_back(NewInt); 8261 Results.push_back(NewInt.getValue(1)); 8262 break; 8263 } 8264 case ISD::VAARG: { 8265 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 8266 return; 8267 8268 EVT VT = N->getValueType(0); 8269 8270 if (VT == MVT::i64) { 8271 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget); 8272 8273 Results.push_back(NewNode); 8274 Results.push_back(NewNode.getValue(1)); 8275 } 8276 return; 8277 } 8278 case ISD::FP_ROUND_INREG: { 8279 assert(N->getValueType(0) == MVT::ppcf128); 8280 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 8281 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8282 MVT::f64, N->getOperand(0), 8283 DAG.getIntPtrConstant(0, dl)); 8284 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8285 MVT::f64, N->getOperand(0), 8286 DAG.getIntPtrConstant(1, dl)); 8287 8288 // Add the two halves of the long double in round-to-zero mode. 8289 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 8290 8291 // We know the low half is about to be thrown away, so just use something 8292 // convenient. 8293 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 8294 FPreg, FPreg)); 8295 return; 8296 } 8297 case ISD::FP_TO_SINT: 8298 case ISD::FP_TO_UINT: 8299 // LowerFP_TO_INT() can only handle f32 and f64. 8300 if (N->getOperand(0).getValueType() == MVT::ppcf128) 8301 return; 8302 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 8303 return; 8304 } 8305 } 8306 8307 //===----------------------------------------------------------------------===// 8308 // Other Lowering Code 8309 //===----------------------------------------------------------------------===// 8310 8311 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 8312 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 8313 Function *Func = Intrinsic::getDeclaration(M, Id); 8314 return Builder.CreateCall(Func, {}); 8315 } 8316 8317 // The mappings for emitLeading/TrailingFence is taken from 8318 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 8319 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 8320 AtomicOrdering Ord, bool IsStore, 8321 bool IsLoad) const { 8322 if (Ord == AtomicOrdering::SequentiallyConsistent) 8323 return callIntrinsic(Builder, Intrinsic::ppc_sync); 8324 if (isReleaseOrStronger(Ord)) 8325 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8326 return nullptr; 8327 } 8328 8329 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 8330 AtomicOrdering Ord, bool IsStore, 8331 bool IsLoad) const { 8332 if (IsLoad && isAcquireOrStronger(Ord)) 8333 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8334 // FIXME: this is too conservative, a dependent branch + isync is enough. 8335 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 8336 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 8337 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 8338 return nullptr; 8339 } 8340 8341 MachineBasicBlock * 8342 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 8343 unsigned AtomicSize, 8344 unsigned BinOpcode) const { 8345 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8346 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8347 8348 auto LoadMnemonic = PPC::LDARX; 8349 auto StoreMnemonic = PPC::STDCX; 8350 switch (AtomicSize) { 8351 default: 8352 llvm_unreachable("Unexpected size of atomic entity"); 8353 case 1: 8354 LoadMnemonic = PPC::LBARX; 8355 StoreMnemonic = PPC::STBCX; 8356 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8357 break; 8358 case 2: 8359 LoadMnemonic = PPC::LHARX; 8360 StoreMnemonic = PPC::STHCX; 8361 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8362 break; 8363 case 4: 8364 LoadMnemonic = PPC::LWARX; 8365 StoreMnemonic = PPC::STWCX; 8366 break; 8367 case 8: 8368 LoadMnemonic = PPC::LDARX; 8369 StoreMnemonic = PPC::STDCX; 8370 break; 8371 } 8372 8373 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8374 MachineFunction *F = BB->getParent(); 8375 MachineFunction::iterator It = ++BB->getIterator(); 8376 8377 unsigned dest = MI->getOperand(0).getReg(); 8378 unsigned ptrA = MI->getOperand(1).getReg(); 8379 unsigned ptrB = MI->getOperand(2).getReg(); 8380 unsigned incr = MI->getOperand(3).getReg(); 8381 DebugLoc dl = MI->getDebugLoc(); 8382 8383 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8384 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8385 F->insert(It, loopMBB); 8386 F->insert(It, exitMBB); 8387 exitMBB->splice(exitMBB->begin(), BB, 8388 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8389 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8390 8391 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8392 unsigned TmpReg = (!BinOpcode) ? incr : 8393 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 8394 : &PPC::GPRCRegClass); 8395 8396 // thisMBB: 8397 // ... 8398 // fallthrough --> loopMBB 8399 BB->addSuccessor(loopMBB); 8400 8401 // loopMBB: 8402 // l[wd]arx dest, ptr 8403 // add r0, dest, incr 8404 // st[wd]cx. r0, ptr 8405 // bne- loopMBB 8406 // fallthrough --> exitMBB 8407 BB = loopMBB; 8408 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8409 .addReg(ptrA).addReg(ptrB); 8410 if (BinOpcode) 8411 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 8412 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8413 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 8414 BuildMI(BB, dl, TII->get(PPC::BCC)) 8415 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8416 BB->addSuccessor(loopMBB); 8417 BB->addSuccessor(exitMBB); 8418 8419 // exitMBB: 8420 // ... 8421 BB = exitMBB; 8422 return BB; 8423 } 8424 8425 MachineBasicBlock * 8426 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 8427 MachineBasicBlock *BB, 8428 bool is8bit, // operation 8429 unsigned BinOpcode) const { 8430 // If we support part-word atomic mnemonics, just use them 8431 if (Subtarget.hasPartwordAtomics()) 8432 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode); 8433 8434 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8435 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8436 // In 64 bit mode we have to use 64 bits for addresses, even though the 8437 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 8438 // registers without caring whether they're 32 or 64, but here we're 8439 // doing actual arithmetic on the addresses. 8440 bool is64bit = Subtarget.isPPC64(); 8441 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8442 8443 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8444 MachineFunction *F = BB->getParent(); 8445 MachineFunction::iterator It = ++BB->getIterator(); 8446 8447 unsigned dest = MI->getOperand(0).getReg(); 8448 unsigned ptrA = MI->getOperand(1).getReg(); 8449 unsigned ptrB = MI->getOperand(2).getReg(); 8450 unsigned incr = MI->getOperand(3).getReg(); 8451 DebugLoc dl = MI->getDebugLoc(); 8452 8453 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8454 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8455 F->insert(It, loopMBB); 8456 F->insert(It, exitMBB); 8457 exitMBB->splice(exitMBB->begin(), BB, 8458 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8459 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8460 8461 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8462 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8463 : &PPC::GPRCRegClass; 8464 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8465 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8466 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8467 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 8468 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8469 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8470 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8471 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8472 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 8473 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8474 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8475 unsigned Ptr1Reg; 8476 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 8477 8478 // thisMBB: 8479 // ... 8480 // fallthrough --> loopMBB 8481 BB->addSuccessor(loopMBB); 8482 8483 // The 4-byte load must be aligned, while a char or short may be 8484 // anywhere in the word. Hence all this nasty bookkeeping code. 8485 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8486 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8487 // xori shift, shift1, 24 [16] 8488 // rlwinm ptr, ptr1, 0, 0, 29 8489 // slw incr2, incr, shift 8490 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8491 // slw mask, mask2, shift 8492 // loopMBB: 8493 // lwarx tmpDest, ptr 8494 // add tmp, tmpDest, incr2 8495 // andc tmp2, tmpDest, mask 8496 // and tmp3, tmp, mask 8497 // or tmp4, tmp3, tmp2 8498 // stwcx. tmp4, ptr 8499 // bne- loopMBB 8500 // fallthrough --> exitMBB 8501 // srw dest, tmpDest, shift 8502 if (ptrA != ZeroReg) { 8503 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8504 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8505 .addReg(ptrA).addReg(ptrB); 8506 } else { 8507 Ptr1Reg = ptrB; 8508 } 8509 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8510 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8511 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8512 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8513 if (is64bit) 8514 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8515 .addReg(Ptr1Reg).addImm(0).addImm(61); 8516 else 8517 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8518 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8519 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 8520 .addReg(incr).addReg(ShiftReg); 8521 if (is8bit) 8522 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8523 else { 8524 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8525 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 8526 } 8527 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8528 .addReg(Mask2Reg).addReg(ShiftReg); 8529 8530 BB = loopMBB; 8531 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8532 .addReg(ZeroReg).addReg(PtrReg); 8533 if (BinOpcode) 8534 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 8535 .addReg(Incr2Reg).addReg(TmpDestReg); 8536 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 8537 .addReg(TmpDestReg).addReg(MaskReg); 8538 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 8539 .addReg(TmpReg).addReg(MaskReg); 8540 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 8541 .addReg(Tmp3Reg).addReg(Tmp2Reg); 8542 BuildMI(BB, dl, TII->get(PPC::STWCX)) 8543 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 8544 BuildMI(BB, dl, TII->get(PPC::BCC)) 8545 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8546 BB->addSuccessor(loopMBB); 8547 BB->addSuccessor(exitMBB); 8548 8549 // exitMBB: 8550 // ... 8551 BB = exitMBB; 8552 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 8553 .addReg(ShiftReg); 8554 return BB; 8555 } 8556 8557 llvm::MachineBasicBlock* 8558 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 8559 MachineBasicBlock *MBB) const { 8560 DebugLoc DL = MI->getDebugLoc(); 8561 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8562 8563 MachineFunction *MF = MBB->getParent(); 8564 MachineRegisterInfo &MRI = MF->getRegInfo(); 8565 8566 const BasicBlock *BB = MBB->getBasicBlock(); 8567 MachineFunction::iterator I = ++MBB->getIterator(); 8568 8569 // Memory Reference 8570 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8571 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8572 8573 unsigned DstReg = MI->getOperand(0).getReg(); 8574 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 8575 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 8576 unsigned mainDstReg = MRI.createVirtualRegister(RC); 8577 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 8578 8579 MVT PVT = getPointerTy(MF->getDataLayout()); 8580 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8581 "Invalid Pointer Size!"); 8582 // For v = setjmp(buf), we generate 8583 // 8584 // thisMBB: 8585 // SjLjSetup mainMBB 8586 // bl mainMBB 8587 // v_restore = 1 8588 // b sinkMBB 8589 // 8590 // mainMBB: 8591 // buf[LabelOffset] = LR 8592 // v_main = 0 8593 // 8594 // sinkMBB: 8595 // v = phi(main, restore) 8596 // 8597 8598 MachineBasicBlock *thisMBB = MBB; 8599 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 8600 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 8601 MF->insert(I, mainMBB); 8602 MF->insert(I, sinkMBB); 8603 8604 MachineInstrBuilder MIB; 8605 8606 // Transfer the remainder of BB and its successor edges to sinkMBB. 8607 sinkMBB->splice(sinkMBB->begin(), MBB, 8608 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8609 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 8610 8611 // Note that the structure of the jmp_buf used here is not compatible 8612 // with that used by libc, and is not designed to be. Specifically, it 8613 // stores only those 'reserved' registers that LLVM does not otherwise 8614 // understand how to spill. Also, by convention, by the time this 8615 // intrinsic is called, Clang has already stored the frame address in the 8616 // first slot of the buffer and stack address in the third. Following the 8617 // X86 target code, we'll store the jump address in the second slot. We also 8618 // need to save the TOC pointer (R2) to handle jumps between shared 8619 // libraries, and that will be stored in the fourth slot. The thread 8620 // identifier (R13) is not affected. 8621 8622 // thisMBB: 8623 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8624 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8625 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8626 8627 // Prepare IP either in reg. 8628 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 8629 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 8630 unsigned BufReg = MI->getOperand(1).getReg(); 8631 8632 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 8633 setUsesTOCBasePtr(*MBB->getParent()); 8634 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 8635 .addReg(PPC::X2) 8636 .addImm(TOCOffset) 8637 .addReg(BufReg); 8638 MIB.setMemRefs(MMOBegin, MMOEnd); 8639 } 8640 8641 // Naked functions never have a base pointer, and so we use r1. For all 8642 // other functions, this decision must be delayed until during PEI. 8643 unsigned BaseReg; 8644 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 8645 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 8646 else 8647 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 8648 8649 MIB = BuildMI(*thisMBB, MI, DL, 8650 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 8651 .addReg(BaseReg) 8652 .addImm(BPOffset) 8653 .addReg(BufReg); 8654 MIB.setMemRefs(MMOBegin, MMOEnd); 8655 8656 // Setup 8657 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 8658 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 8659 MIB.addRegMask(TRI->getNoPreservedMask()); 8660 8661 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 8662 8663 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 8664 .addMBB(mainMBB); 8665 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 8666 8667 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 8668 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 8669 8670 // mainMBB: 8671 // mainDstReg = 0 8672 MIB = 8673 BuildMI(mainMBB, DL, 8674 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 8675 8676 // Store IP 8677 if (Subtarget.isPPC64()) { 8678 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 8679 .addReg(LabelReg) 8680 .addImm(LabelOffset) 8681 .addReg(BufReg); 8682 } else { 8683 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 8684 .addReg(LabelReg) 8685 .addImm(LabelOffset) 8686 .addReg(BufReg); 8687 } 8688 8689 MIB.setMemRefs(MMOBegin, MMOEnd); 8690 8691 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 8692 mainMBB->addSuccessor(sinkMBB); 8693 8694 // sinkMBB: 8695 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 8696 TII->get(PPC::PHI), DstReg) 8697 .addReg(mainDstReg).addMBB(mainMBB) 8698 .addReg(restoreDstReg).addMBB(thisMBB); 8699 8700 MI->eraseFromParent(); 8701 return sinkMBB; 8702 } 8703 8704 MachineBasicBlock * 8705 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 8706 MachineBasicBlock *MBB) const { 8707 DebugLoc DL = MI->getDebugLoc(); 8708 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8709 8710 MachineFunction *MF = MBB->getParent(); 8711 MachineRegisterInfo &MRI = MF->getRegInfo(); 8712 8713 // Memory Reference 8714 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 8715 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 8716 8717 MVT PVT = getPointerTy(MF->getDataLayout()); 8718 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8719 "Invalid Pointer Size!"); 8720 8721 const TargetRegisterClass *RC = 8722 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 8723 unsigned Tmp = MRI.createVirtualRegister(RC); 8724 // Since FP is only updated here but NOT referenced, it's treated as GPR. 8725 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 8726 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 8727 unsigned BP = 8728 (PVT == MVT::i64) 8729 ? PPC::X30 8730 : (Subtarget.isSVR4ABI() && 8731 MF->getTarget().getRelocationModel() == Reloc::PIC_ 8732 ? PPC::R29 8733 : PPC::R30); 8734 8735 MachineInstrBuilder MIB; 8736 8737 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8738 const int64_t SPOffset = 2 * PVT.getStoreSize(); 8739 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8740 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8741 8742 unsigned BufReg = MI->getOperand(0).getReg(); 8743 8744 // Reload FP (the jumped-to function may not have had a 8745 // frame pointer, and if so, then its r31 will be restored 8746 // as necessary). 8747 if (PVT == MVT::i64) { 8748 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 8749 .addImm(0) 8750 .addReg(BufReg); 8751 } else { 8752 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 8753 .addImm(0) 8754 .addReg(BufReg); 8755 } 8756 MIB.setMemRefs(MMOBegin, MMOEnd); 8757 8758 // Reload IP 8759 if (PVT == MVT::i64) { 8760 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 8761 .addImm(LabelOffset) 8762 .addReg(BufReg); 8763 } else { 8764 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 8765 .addImm(LabelOffset) 8766 .addReg(BufReg); 8767 } 8768 MIB.setMemRefs(MMOBegin, MMOEnd); 8769 8770 // Reload SP 8771 if (PVT == MVT::i64) { 8772 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 8773 .addImm(SPOffset) 8774 .addReg(BufReg); 8775 } else { 8776 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 8777 .addImm(SPOffset) 8778 .addReg(BufReg); 8779 } 8780 MIB.setMemRefs(MMOBegin, MMOEnd); 8781 8782 // Reload BP 8783 if (PVT == MVT::i64) { 8784 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 8785 .addImm(BPOffset) 8786 .addReg(BufReg); 8787 } else { 8788 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 8789 .addImm(BPOffset) 8790 .addReg(BufReg); 8791 } 8792 MIB.setMemRefs(MMOBegin, MMOEnd); 8793 8794 // Reload TOC 8795 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 8796 setUsesTOCBasePtr(*MBB->getParent()); 8797 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 8798 .addImm(TOCOffset) 8799 .addReg(BufReg); 8800 8801 MIB.setMemRefs(MMOBegin, MMOEnd); 8802 } 8803 8804 // Jump 8805 BuildMI(*MBB, MI, DL, 8806 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 8807 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 8808 8809 MI->eraseFromParent(); 8810 return MBB; 8811 } 8812 8813 MachineBasicBlock * 8814 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 8815 MachineBasicBlock *BB) const { 8816 if (MI->getOpcode() == TargetOpcode::STACKMAP || 8817 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8818 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 8819 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 8820 // Call lowering should have added an r2 operand to indicate a dependence 8821 // on the TOC base pointer value. It can't however, because there is no 8822 // way to mark the dependence as implicit there, and so the stackmap code 8823 // will confuse it with a regular operand. Instead, add the dependence 8824 // here. 8825 setUsesTOCBasePtr(*BB->getParent()); 8826 MI->addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 8827 } 8828 8829 return emitPatchPoint(MI, BB); 8830 } 8831 8832 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 8833 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 8834 return emitEHSjLjSetJmp(MI, BB); 8835 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 8836 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 8837 return emitEHSjLjLongJmp(MI, BB); 8838 } 8839 8840 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8841 8842 // To "insert" these instructions we actually have to insert their 8843 // control-flow patterns. 8844 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8845 MachineFunction::iterator It = ++BB->getIterator(); 8846 8847 MachineFunction *F = BB->getParent(); 8848 8849 if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 8850 MI->getOpcode() == PPC::SELECT_CC_I8 || 8851 MI->getOpcode() == PPC::SELECT_I4 || 8852 MI->getOpcode() == PPC::SELECT_I8)) { 8853 SmallVector<MachineOperand, 2> Cond; 8854 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8855 MI->getOpcode() == PPC::SELECT_CC_I8) 8856 Cond.push_back(MI->getOperand(4)); 8857 else 8858 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 8859 Cond.push_back(MI->getOperand(1)); 8860 8861 DebugLoc dl = MI->getDebugLoc(); 8862 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 8863 Cond, MI->getOperand(2).getReg(), 8864 MI->getOperand(3).getReg()); 8865 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 8866 MI->getOpcode() == PPC::SELECT_CC_I8 || 8867 MI->getOpcode() == PPC::SELECT_CC_F4 || 8868 MI->getOpcode() == PPC::SELECT_CC_F8 || 8869 MI->getOpcode() == PPC::SELECT_CC_QFRC || 8870 MI->getOpcode() == PPC::SELECT_CC_QSRC || 8871 MI->getOpcode() == PPC::SELECT_CC_QBRC || 8872 MI->getOpcode() == PPC::SELECT_CC_VRRC || 8873 MI->getOpcode() == PPC::SELECT_CC_VSFRC || 8874 MI->getOpcode() == PPC::SELECT_CC_VSSRC || 8875 MI->getOpcode() == PPC::SELECT_CC_VSRC || 8876 MI->getOpcode() == PPC::SELECT_I4 || 8877 MI->getOpcode() == PPC::SELECT_I8 || 8878 MI->getOpcode() == PPC::SELECT_F4 || 8879 MI->getOpcode() == PPC::SELECT_F8 || 8880 MI->getOpcode() == PPC::SELECT_QFRC || 8881 MI->getOpcode() == PPC::SELECT_QSRC || 8882 MI->getOpcode() == PPC::SELECT_QBRC || 8883 MI->getOpcode() == PPC::SELECT_VRRC || 8884 MI->getOpcode() == PPC::SELECT_VSFRC || 8885 MI->getOpcode() == PPC::SELECT_VSSRC || 8886 MI->getOpcode() == PPC::SELECT_VSRC) { 8887 // The incoming instruction knows the destination vreg to set, the 8888 // condition code register to branch on, the true/false values to 8889 // select between, and a branch opcode to use. 8890 8891 // thisMBB: 8892 // ... 8893 // TrueVal = ... 8894 // cmpTY ccX, r1, r2 8895 // bCC copy1MBB 8896 // fallthrough --> copy0MBB 8897 MachineBasicBlock *thisMBB = BB; 8898 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8899 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8900 DebugLoc dl = MI->getDebugLoc(); 8901 F->insert(It, copy0MBB); 8902 F->insert(It, sinkMBB); 8903 8904 // Transfer the remainder of BB and its successor edges to sinkMBB. 8905 sinkMBB->splice(sinkMBB->begin(), BB, 8906 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8907 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8908 8909 // Next, add the true and fallthrough blocks as its successors. 8910 BB->addSuccessor(copy0MBB); 8911 BB->addSuccessor(sinkMBB); 8912 8913 if (MI->getOpcode() == PPC::SELECT_I4 || 8914 MI->getOpcode() == PPC::SELECT_I8 || 8915 MI->getOpcode() == PPC::SELECT_F4 || 8916 MI->getOpcode() == PPC::SELECT_F8 || 8917 MI->getOpcode() == PPC::SELECT_QFRC || 8918 MI->getOpcode() == PPC::SELECT_QSRC || 8919 MI->getOpcode() == PPC::SELECT_QBRC || 8920 MI->getOpcode() == PPC::SELECT_VRRC || 8921 MI->getOpcode() == PPC::SELECT_VSFRC || 8922 MI->getOpcode() == PPC::SELECT_VSSRC || 8923 MI->getOpcode() == PPC::SELECT_VSRC) { 8924 BuildMI(BB, dl, TII->get(PPC::BC)) 8925 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8926 } else { 8927 unsigned SelectPred = MI->getOperand(4).getImm(); 8928 BuildMI(BB, dl, TII->get(PPC::BCC)) 8929 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 8930 } 8931 8932 // copy0MBB: 8933 // %FalseValue = ... 8934 // # fallthrough to sinkMBB 8935 BB = copy0MBB; 8936 8937 // Update machine-CFG edges 8938 BB->addSuccessor(sinkMBB); 8939 8940 // sinkMBB: 8941 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8942 // ... 8943 BB = sinkMBB; 8944 BuildMI(*BB, BB->begin(), dl, 8945 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 8946 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 8947 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 8948 } else if (MI->getOpcode() == PPC::ReadTB) { 8949 // To read the 64-bit time-base register on a 32-bit target, we read the 8950 // two halves. Should the counter have wrapped while it was being read, we 8951 // need to try again. 8952 // ... 8953 // readLoop: 8954 // mfspr Rx,TBU # load from TBU 8955 // mfspr Ry,TB # load from TB 8956 // mfspr Rz,TBU # load from TBU 8957 // cmpw crX,Rx,Rz # check if 'old'='new' 8958 // bne readLoop # branch if they're not equal 8959 // ... 8960 8961 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 8962 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8963 DebugLoc dl = MI->getDebugLoc(); 8964 F->insert(It, readMBB); 8965 F->insert(It, sinkMBB); 8966 8967 // Transfer the remainder of BB and its successor edges to sinkMBB. 8968 sinkMBB->splice(sinkMBB->begin(), BB, 8969 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8970 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8971 8972 BB->addSuccessor(readMBB); 8973 BB = readMBB; 8974 8975 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8976 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 8977 unsigned LoReg = MI->getOperand(0).getReg(); 8978 unsigned HiReg = MI->getOperand(1).getReg(); 8979 8980 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 8981 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 8982 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 8983 8984 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 8985 8986 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 8987 .addReg(HiReg).addReg(ReadAgainReg); 8988 BuildMI(BB, dl, TII->get(PPC::BCC)) 8989 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 8990 8991 BB->addSuccessor(readMBB); 8992 BB->addSuccessor(sinkMBB); 8993 } 8994 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 8995 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 8996 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 8997 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 8998 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 8999 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 9000 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 9001 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 9002 9003 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 9004 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 9005 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 9006 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 9007 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 9008 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 9009 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 9010 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 9011 9012 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 9013 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 9014 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 9015 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 9016 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 9017 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 9018 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 9019 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 9020 9021 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 9022 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 9023 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 9024 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 9025 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 9026 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 9027 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 9028 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 9029 9030 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 9031 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 9032 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 9033 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 9034 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 9035 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 9036 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 9037 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 9038 9039 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 9040 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 9041 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 9042 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 9043 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 9044 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 9045 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 9046 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 9047 9048 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 9049 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 9050 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 9051 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 9052 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 9053 BB = EmitAtomicBinary(MI, BB, 4, 0); 9054 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 9055 BB = EmitAtomicBinary(MI, BB, 8, 0); 9056 9057 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 9058 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 9059 (Subtarget.hasPartwordAtomics() && 9060 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 9061 (Subtarget.hasPartwordAtomics() && 9062 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 9063 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 9064 9065 auto LoadMnemonic = PPC::LDARX; 9066 auto StoreMnemonic = PPC::STDCX; 9067 switch(MI->getOpcode()) { 9068 default: 9069 llvm_unreachable("Compare and swap of unknown size"); 9070 case PPC::ATOMIC_CMP_SWAP_I8: 9071 LoadMnemonic = PPC::LBARX; 9072 StoreMnemonic = PPC::STBCX; 9073 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9074 break; 9075 case PPC::ATOMIC_CMP_SWAP_I16: 9076 LoadMnemonic = PPC::LHARX; 9077 StoreMnemonic = PPC::STHCX; 9078 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9079 break; 9080 case PPC::ATOMIC_CMP_SWAP_I32: 9081 LoadMnemonic = PPC::LWARX; 9082 StoreMnemonic = PPC::STWCX; 9083 break; 9084 case PPC::ATOMIC_CMP_SWAP_I64: 9085 LoadMnemonic = PPC::LDARX; 9086 StoreMnemonic = PPC::STDCX; 9087 break; 9088 } 9089 unsigned dest = MI->getOperand(0).getReg(); 9090 unsigned ptrA = MI->getOperand(1).getReg(); 9091 unsigned ptrB = MI->getOperand(2).getReg(); 9092 unsigned oldval = MI->getOperand(3).getReg(); 9093 unsigned newval = MI->getOperand(4).getReg(); 9094 DebugLoc dl = MI->getDebugLoc(); 9095 9096 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9097 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9098 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9099 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9100 F->insert(It, loop1MBB); 9101 F->insert(It, loop2MBB); 9102 F->insert(It, midMBB); 9103 F->insert(It, exitMBB); 9104 exitMBB->splice(exitMBB->begin(), BB, 9105 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9106 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9107 9108 // thisMBB: 9109 // ... 9110 // fallthrough --> loopMBB 9111 BB->addSuccessor(loop1MBB); 9112 9113 // loop1MBB: 9114 // l[bhwd]arx dest, ptr 9115 // cmp[wd] dest, oldval 9116 // bne- midMBB 9117 // loop2MBB: 9118 // st[bhwd]cx. newval, ptr 9119 // bne- loopMBB 9120 // b exitBB 9121 // midMBB: 9122 // st[bhwd]cx. dest, ptr 9123 // exitBB: 9124 BB = loop1MBB; 9125 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9126 .addReg(ptrA).addReg(ptrB); 9127 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 9128 .addReg(oldval).addReg(dest); 9129 BuildMI(BB, dl, TII->get(PPC::BCC)) 9130 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9131 BB->addSuccessor(loop2MBB); 9132 BB->addSuccessor(midMBB); 9133 9134 BB = loop2MBB; 9135 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9136 .addReg(newval).addReg(ptrA).addReg(ptrB); 9137 BuildMI(BB, dl, TII->get(PPC::BCC)) 9138 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9139 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9140 BB->addSuccessor(loop1MBB); 9141 BB->addSuccessor(exitMBB); 9142 9143 BB = midMBB; 9144 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9145 .addReg(dest).addReg(ptrA).addReg(ptrB); 9146 BB->addSuccessor(exitMBB); 9147 9148 // exitMBB: 9149 // ... 9150 BB = exitMBB; 9151 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 9152 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 9153 // We must use 64-bit registers for addresses when targeting 64-bit, 9154 // since we're actually doing arithmetic on them. Other registers 9155 // can be 32-bit. 9156 bool is64bit = Subtarget.isPPC64(); 9157 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 9158 9159 unsigned dest = MI->getOperand(0).getReg(); 9160 unsigned ptrA = MI->getOperand(1).getReg(); 9161 unsigned ptrB = MI->getOperand(2).getReg(); 9162 unsigned oldval = MI->getOperand(3).getReg(); 9163 unsigned newval = MI->getOperand(4).getReg(); 9164 DebugLoc dl = MI->getDebugLoc(); 9165 9166 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9167 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9168 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9169 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9170 F->insert(It, loop1MBB); 9171 F->insert(It, loop2MBB); 9172 F->insert(It, midMBB); 9173 F->insert(It, exitMBB); 9174 exitMBB->splice(exitMBB->begin(), BB, 9175 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9176 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9177 9178 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9179 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9180 : &PPC::GPRCRegClass; 9181 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9182 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9183 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 9184 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 9185 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 9186 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 9187 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 9188 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9189 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9190 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9191 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9192 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9193 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9194 unsigned Ptr1Reg; 9195 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 9196 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9197 // thisMBB: 9198 // ... 9199 // fallthrough --> loopMBB 9200 BB->addSuccessor(loop1MBB); 9201 9202 // The 4-byte load must be aligned, while a char or short may be 9203 // anywhere in the word. Hence all this nasty bookkeeping code. 9204 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9205 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9206 // xori shift, shift1, 24 [16] 9207 // rlwinm ptr, ptr1, 0, 0, 29 9208 // slw newval2, newval, shift 9209 // slw oldval2, oldval,shift 9210 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9211 // slw mask, mask2, shift 9212 // and newval3, newval2, mask 9213 // and oldval3, oldval2, mask 9214 // loop1MBB: 9215 // lwarx tmpDest, ptr 9216 // and tmp, tmpDest, mask 9217 // cmpw tmp, oldval3 9218 // bne- midMBB 9219 // loop2MBB: 9220 // andc tmp2, tmpDest, mask 9221 // or tmp4, tmp2, newval3 9222 // stwcx. tmp4, ptr 9223 // bne- loop1MBB 9224 // b exitBB 9225 // midMBB: 9226 // stwcx. tmpDest, ptr 9227 // exitBB: 9228 // srw dest, tmpDest, shift 9229 if (ptrA != ZeroReg) { 9230 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9231 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9232 .addReg(ptrA).addReg(ptrB); 9233 } else { 9234 Ptr1Reg = ptrB; 9235 } 9236 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9237 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9238 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9239 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9240 if (is64bit) 9241 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9242 .addReg(Ptr1Reg).addImm(0).addImm(61); 9243 else 9244 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9245 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9246 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 9247 .addReg(newval).addReg(ShiftReg); 9248 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 9249 .addReg(oldval).addReg(ShiftReg); 9250 if (is8bit) 9251 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9252 else { 9253 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9254 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 9255 .addReg(Mask3Reg).addImm(65535); 9256 } 9257 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9258 .addReg(Mask2Reg).addReg(ShiftReg); 9259 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 9260 .addReg(NewVal2Reg).addReg(MaskReg); 9261 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 9262 .addReg(OldVal2Reg).addReg(MaskReg); 9263 9264 BB = loop1MBB; 9265 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9266 .addReg(ZeroReg).addReg(PtrReg); 9267 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 9268 .addReg(TmpDestReg).addReg(MaskReg); 9269 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 9270 .addReg(TmpReg).addReg(OldVal3Reg); 9271 BuildMI(BB, dl, TII->get(PPC::BCC)) 9272 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9273 BB->addSuccessor(loop2MBB); 9274 BB->addSuccessor(midMBB); 9275 9276 BB = loop2MBB; 9277 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 9278 .addReg(TmpDestReg).addReg(MaskReg); 9279 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 9280 .addReg(Tmp2Reg).addReg(NewVal3Reg); 9281 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 9282 .addReg(ZeroReg).addReg(PtrReg); 9283 BuildMI(BB, dl, TII->get(PPC::BCC)) 9284 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9285 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9286 BB->addSuccessor(loop1MBB); 9287 BB->addSuccessor(exitMBB); 9288 9289 BB = midMBB; 9290 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 9291 .addReg(ZeroReg).addReg(PtrReg); 9292 BB->addSuccessor(exitMBB); 9293 9294 // exitMBB: 9295 // ... 9296 BB = exitMBB; 9297 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 9298 .addReg(ShiftReg); 9299 } else if (MI->getOpcode() == PPC::FADDrtz) { 9300 // This pseudo performs an FADD with rounding mode temporarily forced 9301 // to round-to-zero. We emit this via custom inserter since the FPSCR 9302 // is not modeled at the SelectionDAG level. 9303 unsigned Dest = MI->getOperand(0).getReg(); 9304 unsigned Src1 = MI->getOperand(1).getReg(); 9305 unsigned Src2 = MI->getOperand(2).getReg(); 9306 DebugLoc dl = MI->getDebugLoc(); 9307 9308 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9309 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 9310 9311 // Save FPSCR value. 9312 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 9313 9314 // Set rounding mode to round-to-zero. 9315 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 9316 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 9317 9318 // Perform addition. 9319 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 9320 9321 // Restore FPSCR value. 9322 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 9323 } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9324 MI->getOpcode() == PPC::ANDIo_1_GT_BIT || 9325 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9326 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) { 9327 unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9328 MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ? 9329 PPC::ANDIo8 : PPC::ANDIo; 9330 bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT || 9331 MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8); 9332 9333 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9334 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 9335 &PPC::GPRCRegClass : 9336 &PPC::G8RCRegClass); 9337 9338 DebugLoc dl = MI->getDebugLoc(); 9339 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 9340 .addReg(MI->getOperand(1).getReg()).addImm(1); 9341 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 9342 MI->getOperand(0).getReg()) 9343 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 9344 } else if (MI->getOpcode() == PPC::TCHECK_RET) { 9345 DebugLoc Dl = MI->getDebugLoc(); 9346 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9347 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9348 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 9349 return BB; 9350 } else { 9351 llvm_unreachable("Unexpected instr type to insert"); 9352 } 9353 9354 MI->eraseFromParent(); // The pseudo instruction is gone now. 9355 return BB; 9356 } 9357 9358 //===----------------------------------------------------------------------===// 9359 // Target Optimization Hooks 9360 //===----------------------------------------------------------------------===// 9361 9362 static std::string getRecipOp(const char *Base, EVT VT) { 9363 std::string RecipOp(Base); 9364 if (VT.getScalarType() == MVT::f64) 9365 RecipOp += "d"; 9366 else 9367 RecipOp += "f"; 9368 9369 if (VT.isVector()) 9370 RecipOp = "vec-" + RecipOp; 9371 9372 return RecipOp; 9373 } 9374 9375 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand, 9376 DAGCombinerInfo &DCI, 9377 unsigned &RefinementSteps, 9378 bool &UseOneConstNR) const { 9379 EVT VT = Operand.getValueType(); 9380 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 9381 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 9382 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9383 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9384 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9385 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9386 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9387 std::string RecipOp = getRecipOp("sqrt", VT); 9388 if (!Recips.isEnabled(RecipOp)) 9389 return SDValue(); 9390 9391 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9392 UseOneConstNR = true; 9393 return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 9394 } 9395 return SDValue(); 9396 } 9397 9398 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, 9399 DAGCombinerInfo &DCI, 9400 unsigned &RefinementSteps) const { 9401 EVT VT = Operand.getValueType(); 9402 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 9403 (VT == MVT::f64 && Subtarget.hasFRE()) || 9404 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9405 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9406 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9407 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9408 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9409 std::string RecipOp = getRecipOp("div", VT); 9410 if (!Recips.isEnabled(RecipOp)) 9411 return SDValue(); 9412 9413 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9414 return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 9415 } 9416 return SDValue(); 9417 } 9418 9419 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 9420 // Note: This functionality is used only when unsafe-fp-math is enabled, and 9421 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 9422 // enabled for division), this functionality is redundant with the default 9423 // combiner logic (once the division -> reciprocal/multiply transformation 9424 // has taken place). As a result, this matters more for older cores than for 9425 // newer ones. 9426 9427 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9428 // reciprocal if there are two or more FDIVs (for embedded cores with only 9429 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 9430 switch (Subtarget.getDarwinDirective()) { 9431 default: 9432 return 3; 9433 case PPC::DIR_440: 9434 case PPC::DIR_A2: 9435 case PPC::DIR_E500mc: 9436 case PPC::DIR_E5500: 9437 return 2; 9438 } 9439 } 9440 9441 // isConsecutiveLSLoc needs to work even if all adds have not yet been 9442 // collapsed, and so we need to look through chains of them. 9443 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 9444 int64_t& Offset, SelectionDAG &DAG) { 9445 if (DAG.isBaseWithConstantOffset(Loc)) { 9446 Base = Loc.getOperand(0); 9447 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 9448 9449 // The base might itself be a base plus an offset, and if so, accumulate 9450 // that as well. 9451 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 9452 } 9453 } 9454 9455 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 9456 unsigned Bytes, int Dist, 9457 SelectionDAG &DAG) { 9458 if (VT.getSizeInBits() / 8 != Bytes) 9459 return false; 9460 9461 SDValue BaseLoc = Base->getBasePtr(); 9462 if (Loc.getOpcode() == ISD::FrameIndex) { 9463 if (BaseLoc.getOpcode() != ISD::FrameIndex) 9464 return false; 9465 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9466 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 9467 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 9468 int FS = MFI->getObjectSize(FI); 9469 int BFS = MFI->getObjectSize(BFI); 9470 if (FS != BFS || FS != (int)Bytes) return false; 9471 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 9472 } 9473 9474 SDValue Base1 = Loc, Base2 = BaseLoc; 9475 int64_t Offset1 = 0, Offset2 = 0; 9476 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 9477 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 9478 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 9479 return true; 9480 9481 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9482 const GlobalValue *GV1 = nullptr; 9483 const GlobalValue *GV2 = nullptr; 9484 Offset1 = 0; 9485 Offset2 = 0; 9486 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 9487 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 9488 if (isGA1 && isGA2 && GV1 == GV2) 9489 return Offset1 == (Offset2 + Dist*Bytes); 9490 return false; 9491 } 9492 9493 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 9494 // not enforce equality of the chain operands. 9495 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 9496 unsigned Bytes, int Dist, 9497 SelectionDAG &DAG) { 9498 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 9499 EVT VT = LS->getMemoryVT(); 9500 SDValue Loc = LS->getBasePtr(); 9501 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 9502 } 9503 9504 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 9505 EVT VT; 9506 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9507 default: return false; 9508 case Intrinsic::ppc_qpx_qvlfd: 9509 case Intrinsic::ppc_qpx_qvlfda: 9510 VT = MVT::v4f64; 9511 break; 9512 case Intrinsic::ppc_qpx_qvlfs: 9513 case Intrinsic::ppc_qpx_qvlfsa: 9514 VT = MVT::v4f32; 9515 break; 9516 case Intrinsic::ppc_qpx_qvlfcd: 9517 case Intrinsic::ppc_qpx_qvlfcda: 9518 VT = MVT::v2f64; 9519 break; 9520 case Intrinsic::ppc_qpx_qvlfcs: 9521 case Intrinsic::ppc_qpx_qvlfcsa: 9522 VT = MVT::v2f32; 9523 break; 9524 case Intrinsic::ppc_qpx_qvlfiwa: 9525 case Intrinsic::ppc_qpx_qvlfiwz: 9526 case Intrinsic::ppc_altivec_lvx: 9527 case Intrinsic::ppc_altivec_lvxl: 9528 case Intrinsic::ppc_vsx_lxvw4x: 9529 VT = MVT::v4i32; 9530 break; 9531 case Intrinsic::ppc_vsx_lxvd2x: 9532 VT = MVT::v2f64; 9533 break; 9534 case Intrinsic::ppc_altivec_lvebx: 9535 VT = MVT::i8; 9536 break; 9537 case Intrinsic::ppc_altivec_lvehx: 9538 VT = MVT::i16; 9539 break; 9540 case Intrinsic::ppc_altivec_lvewx: 9541 VT = MVT::i32; 9542 break; 9543 } 9544 9545 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 9546 } 9547 9548 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 9549 EVT VT; 9550 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9551 default: return false; 9552 case Intrinsic::ppc_qpx_qvstfd: 9553 case Intrinsic::ppc_qpx_qvstfda: 9554 VT = MVT::v4f64; 9555 break; 9556 case Intrinsic::ppc_qpx_qvstfs: 9557 case Intrinsic::ppc_qpx_qvstfsa: 9558 VT = MVT::v4f32; 9559 break; 9560 case Intrinsic::ppc_qpx_qvstfcd: 9561 case Intrinsic::ppc_qpx_qvstfcda: 9562 VT = MVT::v2f64; 9563 break; 9564 case Intrinsic::ppc_qpx_qvstfcs: 9565 case Intrinsic::ppc_qpx_qvstfcsa: 9566 VT = MVT::v2f32; 9567 break; 9568 case Intrinsic::ppc_qpx_qvstfiw: 9569 case Intrinsic::ppc_qpx_qvstfiwa: 9570 case Intrinsic::ppc_altivec_stvx: 9571 case Intrinsic::ppc_altivec_stvxl: 9572 case Intrinsic::ppc_vsx_stxvw4x: 9573 VT = MVT::v4i32; 9574 break; 9575 case Intrinsic::ppc_vsx_stxvd2x: 9576 VT = MVT::v2f64; 9577 break; 9578 case Intrinsic::ppc_altivec_stvebx: 9579 VT = MVT::i8; 9580 break; 9581 case Intrinsic::ppc_altivec_stvehx: 9582 VT = MVT::i16; 9583 break; 9584 case Intrinsic::ppc_altivec_stvewx: 9585 VT = MVT::i32; 9586 break; 9587 } 9588 9589 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 9590 } 9591 9592 return false; 9593 } 9594 9595 // Return true is there is a nearyby consecutive load to the one provided 9596 // (regardless of alignment). We search up and down the chain, looking though 9597 // token factors and other loads (but nothing else). As a result, a true result 9598 // indicates that it is safe to create a new consecutive load adjacent to the 9599 // load provided. 9600 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 9601 SDValue Chain = LD->getChain(); 9602 EVT VT = LD->getMemoryVT(); 9603 9604 SmallSet<SDNode *, 16> LoadRoots; 9605 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 9606 SmallSet<SDNode *, 16> Visited; 9607 9608 // First, search up the chain, branching to follow all token-factor operands. 9609 // If we find a consecutive load, then we're done, otherwise, record all 9610 // nodes just above the top-level loads and token factors. 9611 while (!Queue.empty()) { 9612 SDNode *ChainNext = Queue.pop_back_val(); 9613 if (!Visited.insert(ChainNext).second) 9614 continue; 9615 9616 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 9617 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9618 return true; 9619 9620 if (!Visited.count(ChainLD->getChain().getNode())) 9621 Queue.push_back(ChainLD->getChain().getNode()); 9622 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 9623 for (const SDUse &O : ChainNext->ops()) 9624 if (!Visited.count(O.getNode())) 9625 Queue.push_back(O.getNode()); 9626 } else 9627 LoadRoots.insert(ChainNext); 9628 } 9629 9630 // Second, search down the chain, starting from the top-level nodes recorded 9631 // in the first phase. These top-level nodes are the nodes just above all 9632 // loads and token factors. Starting with their uses, recursively look though 9633 // all loads (just the chain uses) and token factors to find a consecutive 9634 // load. 9635 Visited.clear(); 9636 Queue.clear(); 9637 9638 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 9639 IE = LoadRoots.end(); I != IE; ++I) { 9640 Queue.push_back(*I); 9641 9642 while (!Queue.empty()) { 9643 SDNode *LoadRoot = Queue.pop_back_val(); 9644 if (!Visited.insert(LoadRoot).second) 9645 continue; 9646 9647 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 9648 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9649 return true; 9650 9651 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 9652 UE = LoadRoot->use_end(); UI != UE; ++UI) 9653 if (((isa<MemSDNode>(*UI) && 9654 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 9655 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 9656 Queue.push_back(*UI); 9657 } 9658 } 9659 9660 return false; 9661 } 9662 9663 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 9664 DAGCombinerInfo &DCI) const { 9665 SelectionDAG &DAG = DCI.DAG; 9666 SDLoc dl(N); 9667 9668 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 9669 // If we're tracking CR bits, we need to be careful that we don't have: 9670 // trunc(binary-ops(zext(x), zext(y))) 9671 // or 9672 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 9673 // such that we're unnecessarily moving things into GPRs when it would be 9674 // better to keep them in CR bits. 9675 9676 // Note that trunc here can be an actual i1 trunc, or can be the effective 9677 // truncation that comes from a setcc or select_cc. 9678 if (N->getOpcode() == ISD::TRUNCATE && 9679 N->getValueType(0) != MVT::i1) 9680 return SDValue(); 9681 9682 if (N->getOperand(0).getValueType() != MVT::i32 && 9683 N->getOperand(0).getValueType() != MVT::i64) 9684 return SDValue(); 9685 9686 if (N->getOpcode() == ISD::SETCC || 9687 N->getOpcode() == ISD::SELECT_CC) { 9688 // If we're looking at a comparison, then we need to make sure that the 9689 // high bits (all except for the first) don't matter the result. 9690 ISD::CondCode CC = 9691 cast<CondCodeSDNode>(N->getOperand( 9692 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 9693 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 9694 9695 if (ISD::isSignedIntSetCC(CC)) { 9696 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 9697 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 9698 return SDValue(); 9699 } else if (ISD::isUnsignedIntSetCC(CC)) { 9700 if (!DAG.MaskedValueIsZero(N->getOperand(0), 9701 APInt::getHighBitsSet(OpBits, OpBits-1)) || 9702 !DAG.MaskedValueIsZero(N->getOperand(1), 9703 APInt::getHighBitsSet(OpBits, OpBits-1))) 9704 return SDValue(); 9705 } else { 9706 // This is neither a signed nor an unsigned comparison, just make sure 9707 // that the high bits are equal. 9708 APInt Op1Zero, Op1One; 9709 APInt Op2Zero, Op2One; 9710 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 9711 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 9712 9713 // We don't really care about what is known about the first bit (if 9714 // anything), so clear it in all masks prior to comparing them. 9715 Op1Zero.clearBit(0); Op1One.clearBit(0); 9716 Op2Zero.clearBit(0); Op2One.clearBit(0); 9717 9718 if (Op1Zero != Op2Zero || Op1One != Op2One) 9719 return SDValue(); 9720 } 9721 } 9722 9723 // We now know that the higher-order bits are irrelevant, we just need to 9724 // make sure that all of the intermediate operations are bit operations, and 9725 // all inputs are extensions. 9726 if (N->getOperand(0).getOpcode() != ISD::AND && 9727 N->getOperand(0).getOpcode() != ISD::OR && 9728 N->getOperand(0).getOpcode() != ISD::XOR && 9729 N->getOperand(0).getOpcode() != ISD::SELECT && 9730 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 9731 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 9732 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 9733 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 9734 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 9735 return SDValue(); 9736 9737 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 9738 N->getOperand(1).getOpcode() != ISD::AND && 9739 N->getOperand(1).getOpcode() != ISD::OR && 9740 N->getOperand(1).getOpcode() != ISD::XOR && 9741 N->getOperand(1).getOpcode() != ISD::SELECT && 9742 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 9743 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 9744 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 9745 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 9746 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 9747 return SDValue(); 9748 9749 SmallVector<SDValue, 4> Inputs; 9750 SmallVector<SDValue, 8> BinOps, PromOps; 9751 SmallPtrSet<SDNode *, 16> Visited; 9752 9753 for (unsigned i = 0; i < 2; ++i) { 9754 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9755 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9756 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9757 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9758 isa<ConstantSDNode>(N->getOperand(i))) 9759 Inputs.push_back(N->getOperand(i)); 9760 else 9761 BinOps.push_back(N->getOperand(i)); 9762 9763 if (N->getOpcode() == ISD::TRUNCATE) 9764 break; 9765 } 9766 9767 // Visit all inputs, collect all binary operations (and, or, xor and 9768 // select) that are all fed by extensions. 9769 while (!BinOps.empty()) { 9770 SDValue BinOp = BinOps.back(); 9771 BinOps.pop_back(); 9772 9773 if (!Visited.insert(BinOp.getNode()).second) 9774 continue; 9775 9776 PromOps.push_back(BinOp); 9777 9778 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9779 // The condition of the select is not promoted. 9780 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9781 continue; 9782 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9783 continue; 9784 9785 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9786 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9787 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9788 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9789 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9790 Inputs.push_back(BinOp.getOperand(i)); 9791 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9792 BinOp.getOperand(i).getOpcode() == ISD::OR || 9793 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9794 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9795 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 9796 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9797 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9798 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9799 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 9800 BinOps.push_back(BinOp.getOperand(i)); 9801 } else { 9802 // We have an input that is not an extension or another binary 9803 // operation; we'll abort this transformation. 9804 return SDValue(); 9805 } 9806 } 9807 } 9808 9809 // Make sure that this is a self-contained cluster of operations (which 9810 // is not quite the same thing as saying that everything has only one 9811 // use). 9812 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9813 if (isa<ConstantSDNode>(Inputs[i])) 9814 continue; 9815 9816 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9817 UE = Inputs[i].getNode()->use_end(); 9818 UI != UE; ++UI) { 9819 SDNode *User = *UI; 9820 if (User != N && !Visited.count(User)) 9821 return SDValue(); 9822 9823 // Make sure that we're not going to promote the non-output-value 9824 // operand(s) or SELECT or SELECT_CC. 9825 // FIXME: Although we could sometimes handle this, and it does occur in 9826 // practice that one of the condition inputs to the select is also one of 9827 // the outputs, we currently can't deal with this. 9828 if (User->getOpcode() == ISD::SELECT) { 9829 if (User->getOperand(0) == Inputs[i]) 9830 return SDValue(); 9831 } else if (User->getOpcode() == ISD::SELECT_CC) { 9832 if (User->getOperand(0) == Inputs[i] || 9833 User->getOperand(1) == Inputs[i]) 9834 return SDValue(); 9835 } 9836 } 9837 } 9838 9839 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9840 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9841 UE = PromOps[i].getNode()->use_end(); 9842 UI != UE; ++UI) { 9843 SDNode *User = *UI; 9844 if (User != N && !Visited.count(User)) 9845 return SDValue(); 9846 9847 // Make sure that we're not going to promote the non-output-value 9848 // operand(s) or SELECT or SELECT_CC. 9849 // FIXME: Although we could sometimes handle this, and it does occur in 9850 // practice that one of the condition inputs to the select is also one of 9851 // the outputs, we currently can't deal with this. 9852 if (User->getOpcode() == ISD::SELECT) { 9853 if (User->getOperand(0) == PromOps[i]) 9854 return SDValue(); 9855 } else if (User->getOpcode() == ISD::SELECT_CC) { 9856 if (User->getOperand(0) == PromOps[i] || 9857 User->getOperand(1) == PromOps[i]) 9858 return SDValue(); 9859 } 9860 } 9861 } 9862 9863 // Replace all inputs with the extension operand. 9864 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9865 // Constants may have users outside the cluster of to-be-promoted nodes, 9866 // and so we need to replace those as we do the promotions. 9867 if (isa<ConstantSDNode>(Inputs[i])) 9868 continue; 9869 else 9870 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 9871 } 9872 9873 std::list<HandleSDNode> PromOpHandles; 9874 for (auto &PromOp : PromOps) 9875 PromOpHandles.emplace_back(PromOp); 9876 9877 // Replace all operations (these are all the same, but have a different 9878 // (i1) return type). DAG.getNode will validate that the types of 9879 // a binary operator match, so go through the list in reverse so that 9880 // we've likely promoted both operands first. Any intermediate truncations or 9881 // extensions disappear. 9882 while (!PromOpHandles.empty()) { 9883 SDValue PromOp = PromOpHandles.back().getValue(); 9884 PromOpHandles.pop_back(); 9885 9886 if (PromOp.getOpcode() == ISD::TRUNCATE || 9887 PromOp.getOpcode() == ISD::SIGN_EXTEND || 9888 PromOp.getOpcode() == ISD::ZERO_EXTEND || 9889 PromOp.getOpcode() == ISD::ANY_EXTEND) { 9890 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 9891 PromOp.getOperand(0).getValueType() != MVT::i1) { 9892 // The operand is not yet ready (see comment below). 9893 PromOpHandles.emplace_front(PromOp); 9894 continue; 9895 } 9896 9897 SDValue RepValue = PromOp.getOperand(0); 9898 if (isa<ConstantSDNode>(RepValue)) 9899 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 9900 9901 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 9902 continue; 9903 } 9904 9905 unsigned C; 9906 switch (PromOp.getOpcode()) { 9907 default: C = 0; break; 9908 case ISD::SELECT: C = 1; break; 9909 case ISD::SELECT_CC: C = 2; break; 9910 } 9911 9912 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9913 PromOp.getOperand(C).getValueType() != MVT::i1) || 9914 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9915 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 9916 // The to-be-promoted operands of this node have not yet been 9917 // promoted (this should be rare because we're going through the 9918 // list backward, but if one of the operands has several users in 9919 // this cluster of to-be-promoted nodes, it is possible). 9920 PromOpHandles.emplace_front(PromOp); 9921 continue; 9922 } 9923 9924 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9925 PromOp.getNode()->op_end()); 9926 9927 // If there are any constant inputs, make sure they're replaced now. 9928 for (unsigned i = 0; i < 2; ++i) 9929 if (isa<ConstantSDNode>(Ops[C+i])) 9930 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 9931 9932 DAG.ReplaceAllUsesOfValueWith(PromOp, 9933 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 9934 } 9935 9936 // Now we're left with the initial truncation itself. 9937 if (N->getOpcode() == ISD::TRUNCATE) 9938 return N->getOperand(0); 9939 9940 // Otherwise, this is a comparison. The operands to be compared have just 9941 // changed type (to i1), but everything else is the same. 9942 return SDValue(N, 0); 9943 } 9944 9945 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 9946 DAGCombinerInfo &DCI) const { 9947 SelectionDAG &DAG = DCI.DAG; 9948 SDLoc dl(N); 9949 9950 // If we're tracking CR bits, we need to be careful that we don't have: 9951 // zext(binary-ops(trunc(x), trunc(y))) 9952 // or 9953 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 9954 // such that we're unnecessarily moving things into CR bits that can more 9955 // efficiently stay in GPRs. Note that if we're not certain that the high 9956 // bits are set as required by the final extension, we still may need to do 9957 // some masking to get the proper behavior. 9958 9959 // This same functionality is important on PPC64 when dealing with 9960 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 9961 // the return values of functions. Because it is so similar, it is handled 9962 // here as well. 9963 9964 if (N->getValueType(0) != MVT::i32 && 9965 N->getValueType(0) != MVT::i64) 9966 return SDValue(); 9967 9968 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 9969 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 9970 return SDValue(); 9971 9972 if (N->getOperand(0).getOpcode() != ISD::AND && 9973 N->getOperand(0).getOpcode() != ISD::OR && 9974 N->getOperand(0).getOpcode() != ISD::XOR && 9975 N->getOperand(0).getOpcode() != ISD::SELECT && 9976 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 9977 return SDValue(); 9978 9979 SmallVector<SDValue, 4> Inputs; 9980 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 9981 SmallPtrSet<SDNode *, 16> Visited; 9982 9983 // Visit all inputs, collect all binary operations (and, or, xor and 9984 // select) that are all fed by truncations. 9985 while (!BinOps.empty()) { 9986 SDValue BinOp = BinOps.back(); 9987 BinOps.pop_back(); 9988 9989 if (!Visited.insert(BinOp.getNode()).second) 9990 continue; 9991 9992 PromOps.push_back(BinOp); 9993 9994 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9995 // The condition of the select is not promoted. 9996 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9997 continue; 9998 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9999 continue; 10000 10001 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 10002 isa<ConstantSDNode>(BinOp.getOperand(i))) { 10003 Inputs.push_back(BinOp.getOperand(i)); 10004 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 10005 BinOp.getOperand(i).getOpcode() == ISD::OR || 10006 BinOp.getOperand(i).getOpcode() == ISD::XOR || 10007 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 10008 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 10009 BinOps.push_back(BinOp.getOperand(i)); 10010 } else { 10011 // We have an input that is not a truncation or another binary 10012 // operation; we'll abort this transformation. 10013 return SDValue(); 10014 } 10015 } 10016 } 10017 10018 // The operands of a select that must be truncated when the select is 10019 // promoted because the operand is actually part of the to-be-promoted set. 10020 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 10021 10022 // Make sure that this is a self-contained cluster of operations (which 10023 // is not quite the same thing as saying that everything has only one 10024 // use). 10025 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10026 if (isa<ConstantSDNode>(Inputs[i])) 10027 continue; 10028 10029 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 10030 UE = Inputs[i].getNode()->use_end(); 10031 UI != UE; ++UI) { 10032 SDNode *User = *UI; 10033 if (User != N && !Visited.count(User)) 10034 return SDValue(); 10035 10036 // If we're going to promote the non-output-value operand(s) or SELECT or 10037 // SELECT_CC, record them for truncation. 10038 if (User->getOpcode() == ISD::SELECT) { 10039 if (User->getOperand(0) == Inputs[i]) 10040 SelectTruncOp[0].insert(std::make_pair(User, 10041 User->getOperand(0).getValueType())); 10042 } else if (User->getOpcode() == ISD::SELECT_CC) { 10043 if (User->getOperand(0) == Inputs[i]) 10044 SelectTruncOp[0].insert(std::make_pair(User, 10045 User->getOperand(0).getValueType())); 10046 if (User->getOperand(1) == Inputs[i]) 10047 SelectTruncOp[1].insert(std::make_pair(User, 10048 User->getOperand(1).getValueType())); 10049 } 10050 } 10051 } 10052 10053 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 10054 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 10055 UE = PromOps[i].getNode()->use_end(); 10056 UI != UE; ++UI) { 10057 SDNode *User = *UI; 10058 if (User != N && !Visited.count(User)) 10059 return SDValue(); 10060 10061 // If we're going to promote the non-output-value operand(s) or SELECT or 10062 // SELECT_CC, record them for truncation. 10063 if (User->getOpcode() == ISD::SELECT) { 10064 if (User->getOperand(0) == PromOps[i]) 10065 SelectTruncOp[0].insert(std::make_pair(User, 10066 User->getOperand(0).getValueType())); 10067 } else if (User->getOpcode() == ISD::SELECT_CC) { 10068 if (User->getOperand(0) == PromOps[i]) 10069 SelectTruncOp[0].insert(std::make_pair(User, 10070 User->getOperand(0).getValueType())); 10071 if (User->getOperand(1) == PromOps[i]) 10072 SelectTruncOp[1].insert(std::make_pair(User, 10073 User->getOperand(1).getValueType())); 10074 } 10075 } 10076 } 10077 10078 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 10079 bool ReallyNeedsExt = false; 10080 if (N->getOpcode() != ISD::ANY_EXTEND) { 10081 // If all of the inputs are not already sign/zero extended, then 10082 // we'll still need to do that at the end. 10083 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10084 if (isa<ConstantSDNode>(Inputs[i])) 10085 continue; 10086 10087 unsigned OpBits = 10088 Inputs[i].getOperand(0).getValueSizeInBits(); 10089 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 10090 10091 if ((N->getOpcode() == ISD::ZERO_EXTEND && 10092 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 10093 APInt::getHighBitsSet(OpBits, 10094 OpBits-PromBits))) || 10095 (N->getOpcode() == ISD::SIGN_EXTEND && 10096 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 10097 (OpBits-(PromBits-1)))) { 10098 ReallyNeedsExt = true; 10099 break; 10100 } 10101 } 10102 } 10103 10104 // Replace all inputs, either with the truncation operand, or a 10105 // truncation or extension to the final output type. 10106 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10107 // Constant inputs need to be replaced with the to-be-promoted nodes that 10108 // use them because they might have users outside of the cluster of 10109 // promoted nodes. 10110 if (isa<ConstantSDNode>(Inputs[i])) 10111 continue; 10112 10113 SDValue InSrc = Inputs[i].getOperand(0); 10114 if (Inputs[i].getValueType() == N->getValueType(0)) 10115 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 10116 else if (N->getOpcode() == ISD::SIGN_EXTEND) 10117 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10118 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 10119 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10120 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10121 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 10122 else 10123 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10124 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 10125 } 10126 10127 std::list<HandleSDNode> PromOpHandles; 10128 for (auto &PromOp : PromOps) 10129 PromOpHandles.emplace_back(PromOp); 10130 10131 // Replace all operations (these are all the same, but have a different 10132 // (promoted) return type). DAG.getNode will validate that the types of 10133 // a binary operator match, so go through the list in reverse so that 10134 // we've likely promoted both operands first. 10135 while (!PromOpHandles.empty()) { 10136 SDValue PromOp = PromOpHandles.back().getValue(); 10137 PromOpHandles.pop_back(); 10138 10139 unsigned C; 10140 switch (PromOp.getOpcode()) { 10141 default: C = 0; break; 10142 case ISD::SELECT: C = 1; break; 10143 case ISD::SELECT_CC: C = 2; break; 10144 } 10145 10146 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 10147 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 10148 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 10149 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 10150 // The to-be-promoted operands of this node have not yet been 10151 // promoted (this should be rare because we're going through the 10152 // list backward, but if one of the operands has several users in 10153 // this cluster of to-be-promoted nodes, it is possible). 10154 PromOpHandles.emplace_front(PromOp); 10155 continue; 10156 } 10157 10158 // For SELECT and SELECT_CC nodes, we do a similar check for any 10159 // to-be-promoted comparison inputs. 10160 if (PromOp.getOpcode() == ISD::SELECT || 10161 PromOp.getOpcode() == ISD::SELECT_CC) { 10162 if ((SelectTruncOp[0].count(PromOp.getNode()) && 10163 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 10164 (SelectTruncOp[1].count(PromOp.getNode()) && 10165 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 10166 PromOpHandles.emplace_front(PromOp); 10167 continue; 10168 } 10169 } 10170 10171 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 10172 PromOp.getNode()->op_end()); 10173 10174 // If this node has constant inputs, then they'll need to be promoted here. 10175 for (unsigned i = 0; i < 2; ++i) { 10176 if (!isa<ConstantSDNode>(Ops[C+i])) 10177 continue; 10178 if (Ops[C+i].getValueType() == N->getValueType(0)) 10179 continue; 10180 10181 if (N->getOpcode() == ISD::SIGN_EXTEND) 10182 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10183 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10184 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10185 else 10186 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10187 } 10188 10189 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 10190 // truncate them again to the original value type. 10191 if (PromOp.getOpcode() == ISD::SELECT || 10192 PromOp.getOpcode() == ISD::SELECT_CC) { 10193 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 10194 if (SI0 != SelectTruncOp[0].end()) 10195 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 10196 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 10197 if (SI1 != SelectTruncOp[1].end()) 10198 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 10199 } 10200 10201 DAG.ReplaceAllUsesOfValueWith(PromOp, 10202 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 10203 } 10204 10205 // Now we're left with the initial extension itself. 10206 if (!ReallyNeedsExt) 10207 return N->getOperand(0); 10208 10209 // To zero extend, just mask off everything except for the first bit (in the 10210 // i1 case). 10211 if (N->getOpcode() == ISD::ZERO_EXTEND) 10212 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 10213 DAG.getConstant(APInt::getLowBitsSet( 10214 N->getValueSizeInBits(0), PromBits), 10215 dl, N->getValueType(0))); 10216 10217 assert(N->getOpcode() == ISD::SIGN_EXTEND && 10218 "Invalid extension type"); 10219 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 10220 SDValue ShiftCst = 10221 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 10222 return DAG.getNode( 10223 ISD::SRA, dl, N->getValueType(0), 10224 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 10225 ShiftCst); 10226 } 10227 10228 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 10229 DAGCombinerInfo &DCI) const { 10230 assert((N->getOpcode() == ISD::SINT_TO_FP || 10231 N->getOpcode() == ISD::UINT_TO_FP) && 10232 "Need an int -> FP conversion node here"); 10233 10234 if (!Subtarget.has64BitSupport()) 10235 return SDValue(); 10236 10237 SelectionDAG &DAG = DCI.DAG; 10238 SDLoc dl(N); 10239 SDValue Op(N, 0); 10240 10241 // Don't handle ppc_fp128 here or i1 conversions. 10242 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 10243 return SDValue(); 10244 if (Op.getOperand(0).getValueType() == MVT::i1) 10245 return SDValue(); 10246 10247 // For i32 intermediate values, unfortunately, the conversion functions 10248 // leave the upper 32 bits of the value are undefined. Within the set of 10249 // scalar instructions, we have no method for zero- or sign-extending the 10250 // value. Thus, we cannot handle i32 intermediate values here. 10251 if (Op.getOperand(0).getValueType() == MVT::i32) 10252 return SDValue(); 10253 10254 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 10255 "UINT_TO_FP is supported only with FPCVT"); 10256 10257 // If we have FCFIDS, then use it when converting to single-precision. 10258 // Otherwise, convert to double-precision and then round. 10259 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 10260 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 10261 : PPCISD::FCFIDS) 10262 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 10263 : PPCISD::FCFID); 10264 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 10265 ? MVT::f32 10266 : MVT::f64; 10267 10268 // If we're converting from a float, to an int, and back to a float again, 10269 // then we don't need the store/load pair at all. 10270 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 10271 Subtarget.hasFPCVT()) || 10272 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 10273 SDValue Src = Op.getOperand(0).getOperand(0); 10274 if (Src.getValueType() == MVT::f32) { 10275 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 10276 DCI.AddToWorklist(Src.getNode()); 10277 } else if (Src.getValueType() != MVT::f64) { 10278 // Make sure that we don't pick up a ppc_fp128 source value. 10279 return SDValue(); 10280 } 10281 10282 unsigned FCTOp = 10283 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 10284 PPCISD::FCTIDUZ; 10285 10286 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 10287 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 10288 10289 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 10290 FP = DAG.getNode(ISD::FP_ROUND, dl, 10291 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 10292 DCI.AddToWorklist(FP.getNode()); 10293 } 10294 10295 return FP; 10296 } 10297 10298 return SDValue(); 10299 } 10300 10301 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 10302 // builtins) into loads with swaps. 10303 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 10304 DAGCombinerInfo &DCI) const { 10305 SelectionDAG &DAG = DCI.DAG; 10306 SDLoc dl(N); 10307 SDValue Chain; 10308 SDValue Base; 10309 MachineMemOperand *MMO; 10310 10311 switch (N->getOpcode()) { 10312 default: 10313 llvm_unreachable("Unexpected opcode for little endian VSX load"); 10314 case ISD::LOAD: { 10315 LoadSDNode *LD = cast<LoadSDNode>(N); 10316 Chain = LD->getChain(); 10317 Base = LD->getBasePtr(); 10318 MMO = LD->getMemOperand(); 10319 // If the MMO suggests this isn't a load of a full vector, leave 10320 // things alone. For a built-in, we have to make the change for 10321 // correctness, so if there is a size problem that will be a bug. 10322 if (MMO->getSize() < 16) 10323 return SDValue(); 10324 break; 10325 } 10326 case ISD::INTRINSIC_W_CHAIN: { 10327 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10328 Chain = Intrin->getChain(); 10329 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 10330 // us what we want. Get operand 2 instead. 10331 Base = Intrin->getOperand(2); 10332 MMO = Intrin->getMemOperand(); 10333 break; 10334 } 10335 } 10336 10337 MVT VecTy = N->getValueType(0).getSimpleVT(); 10338 SDValue LoadOps[] = { Chain, Base }; 10339 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 10340 DAG.getVTList(MVT::v2f64, MVT::Other), 10341 LoadOps, MVT::v2f64, MMO); 10342 10343 DCI.AddToWorklist(Load.getNode()); 10344 Chain = Load.getValue(1); 10345 SDValue Swap = DAG.getNode( 10346 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 10347 DCI.AddToWorklist(Swap.getNode()); 10348 10349 // Add a bitcast if the resulting load type doesn't match v2f64. 10350 if (VecTy != MVT::v2f64) { 10351 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 10352 DCI.AddToWorklist(N.getNode()); 10353 // Package {bitcast value, swap's chain} to match Load's shape. 10354 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 10355 N, Swap.getValue(1)); 10356 } 10357 10358 return Swap; 10359 } 10360 10361 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 10362 // builtins) into stores with swaps. 10363 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 10364 DAGCombinerInfo &DCI) const { 10365 SelectionDAG &DAG = DCI.DAG; 10366 SDLoc dl(N); 10367 SDValue Chain; 10368 SDValue Base; 10369 unsigned SrcOpnd; 10370 MachineMemOperand *MMO; 10371 10372 switch (N->getOpcode()) { 10373 default: 10374 llvm_unreachable("Unexpected opcode for little endian VSX store"); 10375 case ISD::STORE: { 10376 StoreSDNode *ST = cast<StoreSDNode>(N); 10377 Chain = ST->getChain(); 10378 Base = ST->getBasePtr(); 10379 MMO = ST->getMemOperand(); 10380 SrcOpnd = 1; 10381 // If the MMO suggests this isn't a store of a full vector, leave 10382 // things alone. For a built-in, we have to make the change for 10383 // correctness, so if there is a size problem that will be a bug. 10384 if (MMO->getSize() < 16) 10385 return SDValue(); 10386 break; 10387 } 10388 case ISD::INTRINSIC_VOID: { 10389 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10390 Chain = Intrin->getChain(); 10391 // Intrin->getBasePtr() oddly does not get what we want. 10392 Base = Intrin->getOperand(3); 10393 MMO = Intrin->getMemOperand(); 10394 SrcOpnd = 2; 10395 break; 10396 } 10397 } 10398 10399 SDValue Src = N->getOperand(SrcOpnd); 10400 MVT VecTy = Src.getValueType().getSimpleVT(); 10401 10402 // All stores are done as v2f64 and possible bit cast. 10403 if (VecTy != MVT::v2f64) { 10404 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 10405 DCI.AddToWorklist(Src.getNode()); 10406 } 10407 10408 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10409 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 10410 DCI.AddToWorklist(Swap.getNode()); 10411 Chain = Swap.getValue(1); 10412 SDValue StoreOps[] = { Chain, Swap, Base }; 10413 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 10414 DAG.getVTList(MVT::Other), 10415 StoreOps, VecTy, MMO); 10416 DCI.AddToWorklist(Store.getNode()); 10417 return Store; 10418 } 10419 10420 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 10421 DAGCombinerInfo &DCI) const { 10422 SelectionDAG &DAG = DCI.DAG; 10423 SDLoc dl(N); 10424 switch (N->getOpcode()) { 10425 default: break; 10426 case PPCISD::SHL: 10427 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 10428 return N->getOperand(0); 10429 break; 10430 case PPCISD::SRL: 10431 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 10432 return N->getOperand(0); 10433 break; 10434 case PPCISD::SRA: 10435 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10436 if (C->isNullValue() || // 0 >>s V -> 0. 10437 C->isAllOnesValue()) // -1 >>s V -> -1. 10438 return N->getOperand(0); 10439 } 10440 break; 10441 case ISD::SIGN_EXTEND: 10442 case ISD::ZERO_EXTEND: 10443 case ISD::ANY_EXTEND: 10444 return DAGCombineExtBoolTrunc(N, DCI); 10445 case ISD::TRUNCATE: 10446 case ISD::SETCC: 10447 case ISD::SELECT_CC: 10448 return DAGCombineTruncBoolExt(N, DCI); 10449 case ISD::SINT_TO_FP: 10450 case ISD::UINT_TO_FP: 10451 return combineFPToIntToFP(N, DCI); 10452 case ISD::STORE: { 10453 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 10454 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 10455 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 10456 N->getOperand(1).getValueType() == MVT::i32 && 10457 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 10458 SDValue Val = N->getOperand(1).getOperand(0); 10459 if (Val.getValueType() == MVT::f32) { 10460 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 10461 DCI.AddToWorklist(Val.getNode()); 10462 } 10463 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 10464 DCI.AddToWorklist(Val.getNode()); 10465 10466 SDValue Ops[] = { 10467 N->getOperand(0), Val, N->getOperand(2), 10468 DAG.getValueType(N->getOperand(1).getValueType()) 10469 }; 10470 10471 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 10472 DAG.getVTList(MVT::Other), Ops, 10473 cast<StoreSDNode>(N)->getMemoryVT(), 10474 cast<StoreSDNode>(N)->getMemOperand()); 10475 DCI.AddToWorklist(Val.getNode()); 10476 return Val; 10477 } 10478 10479 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 10480 if (cast<StoreSDNode>(N)->isUnindexed() && 10481 N->getOperand(1).getOpcode() == ISD::BSWAP && 10482 N->getOperand(1).getNode()->hasOneUse() && 10483 (N->getOperand(1).getValueType() == MVT::i32 || 10484 N->getOperand(1).getValueType() == MVT::i16 || 10485 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10486 N->getOperand(1).getValueType() == MVT::i64))) { 10487 SDValue BSwapOp = N->getOperand(1).getOperand(0); 10488 // Do an any-extend to 32-bits if this is a half-word input. 10489 if (BSwapOp.getValueType() == MVT::i16) 10490 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 10491 10492 SDValue Ops[] = { 10493 N->getOperand(0), BSwapOp, N->getOperand(2), 10494 DAG.getValueType(N->getOperand(1).getValueType()) 10495 }; 10496 return 10497 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 10498 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 10499 cast<StoreSDNode>(N)->getMemOperand()); 10500 } 10501 10502 // For little endian, VSX stores require generating xxswapd/lxvd2x. 10503 EVT VT = N->getOperand(1).getValueType(); 10504 if (VT.isSimple()) { 10505 MVT StoreVT = VT.getSimpleVT(); 10506 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10507 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 10508 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 10509 return expandVSXStoreForLE(N, DCI); 10510 } 10511 break; 10512 } 10513 case ISD::LOAD: { 10514 LoadSDNode *LD = cast<LoadSDNode>(N); 10515 EVT VT = LD->getValueType(0); 10516 10517 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10518 if (VT.isSimple()) { 10519 MVT LoadVT = VT.getSimpleVT(); 10520 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10521 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 10522 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 10523 return expandVSXLoadForLE(N, DCI); 10524 } 10525 10526 // We sometimes end up with a 64-bit integer load, from which we extract 10527 // two single-precision floating-point numbers. This happens with 10528 // std::complex<float>, and other similar structures, because of the way we 10529 // canonicalize structure copies. However, if we lack direct moves, 10530 // then the final bitcasts from the extracted integer values to the 10531 // floating-point numbers turn into store/load pairs. Even with direct moves, 10532 // just loading the two floating-point numbers is likely better. 10533 auto ReplaceTwoFloatLoad = [&]() { 10534 if (VT != MVT::i64) 10535 return false; 10536 10537 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 10538 LD->isVolatile()) 10539 return false; 10540 10541 // We're looking for a sequence like this: 10542 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 10543 // t16: i64 = srl t13, Constant:i32<32> 10544 // t17: i32 = truncate t16 10545 // t18: f32 = bitcast t17 10546 // t19: i32 = truncate t13 10547 // t20: f32 = bitcast t19 10548 10549 if (!LD->hasNUsesOfValue(2, 0)) 10550 return false; 10551 10552 auto UI = LD->use_begin(); 10553 while (UI.getUse().getResNo() != 0) ++UI; 10554 SDNode *Trunc = *UI++; 10555 while (UI.getUse().getResNo() != 0) ++UI; 10556 SDNode *RightShift = *UI; 10557 if (Trunc->getOpcode() != ISD::TRUNCATE) 10558 std::swap(Trunc, RightShift); 10559 10560 if (Trunc->getOpcode() != ISD::TRUNCATE || 10561 Trunc->getValueType(0) != MVT::i32 || 10562 !Trunc->hasOneUse()) 10563 return false; 10564 if (RightShift->getOpcode() != ISD::SRL || 10565 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 10566 RightShift->getConstantOperandVal(1) != 32 || 10567 !RightShift->hasOneUse()) 10568 return false; 10569 10570 SDNode *Trunc2 = *RightShift->use_begin(); 10571 if (Trunc2->getOpcode() != ISD::TRUNCATE || 10572 Trunc2->getValueType(0) != MVT::i32 || 10573 !Trunc2->hasOneUse()) 10574 return false; 10575 10576 SDNode *Bitcast = *Trunc->use_begin(); 10577 SDNode *Bitcast2 = *Trunc2->use_begin(); 10578 10579 if (Bitcast->getOpcode() != ISD::BITCAST || 10580 Bitcast->getValueType(0) != MVT::f32) 10581 return false; 10582 if (Bitcast2->getOpcode() != ISD::BITCAST || 10583 Bitcast2->getValueType(0) != MVT::f32) 10584 return false; 10585 10586 if (Subtarget.isLittleEndian()) 10587 std::swap(Bitcast, Bitcast2); 10588 10589 // Bitcast has the second float (in memory-layout order) and Bitcast2 10590 // has the first one. 10591 10592 SDValue BasePtr = LD->getBasePtr(); 10593 if (LD->isIndexed()) { 10594 assert(LD->getAddressingMode() == ISD::PRE_INC && 10595 "Non-pre-inc AM on PPC?"); 10596 BasePtr = 10597 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10598 LD->getOffset()); 10599 } 10600 10601 SDValue FloatLoad = 10602 DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 10603 LD->getPointerInfo(), false, LD->isNonTemporal(), 10604 LD->isInvariant(), LD->getAlignment(), LD->getAAInfo()); 10605 SDValue AddPtr = 10606 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 10607 BasePtr, DAG.getIntPtrConstant(4, dl)); 10608 SDValue FloatLoad2 = 10609 DAG.getLoad(MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 10610 LD->getPointerInfo().getWithOffset(4), false, 10611 LD->isNonTemporal(), LD->isInvariant(), 10612 MinAlign(LD->getAlignment(), 4), LD->getAAInfo()); 10613 10614 if (LD->isIndexed()) { 10615 // Note that DAGCombine should re-form any pre-increment load(s) from 10616 // what is produced here if that makes sense. 10617 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 10618 } 10619 10620 DCI.CombineTo(Bitcast2, FloatLoad); 10621 DCI.CombineTo(Bitcast, FloatLoad2); 10622 10623 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 10624 SDValue(FloatLoad2.getNode(), 1)); 10625 return true; 10626 }; 10627 10628 if (ReplaceTwoFloatLoad()) 10629 return SDValue(N, 0); 10630 10631 EVT MemVT = LD->getMemoryVT(); 10632 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 10633 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 10634 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 10635 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 10636 if (LD->isUnindexed() && VT.isVector() && 10637 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 10638 // P8 and later hardware should just use LOAD. 10639 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 10640 VT == MVT::v4i32 || VT == MVT::v4f32)) || 10641 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 10642 LD->getAlignment() >= ScalarABIAlignment)) && 10643 LD->getAlignment() < ABIAlignment) { 10644 // This is a type-legal unaligned Altivec or QPX load. 10645 SDValue Chain = LD->getChain(); 10646 SDValue Ptr = LD->getBasePtr(); 10647 bool isLittleEndian = Subtarget.isLittleEndian(); 10648 10649 // This implements the loading of unaligned vectors as described in 10650 // the venerable Apple Velocity Engine overview. Specifically: 10651 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 10652 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 10653 // 10654 // The general idea is to expand a sequence of one or more unaligned 10655 // loads into an alignment-based permutation-control instruction (lvsl 10656 // or lvsr), a series of regular vector loads (which always truncate 10657 // their input address to an aligned address), and a series of 10658 // permutations. The results of these permutations are the requested 10659 // loaded values. The trick is that the last "extra" load is not taken 10660 // from the address you might suspect (sizeof(vector) bytes after the 10661 // last requested load), but rather sizeof(vector) - 1 bytes after the 10662 // last requested vector. The point of this is to avoid a page fault if 10663 // the base address happened to be aligned. This works because if the 10664 // base address is aligned, then adding less than a full vector length 10665 // will cause the last vector in the sequence to be (re)loaded. 10666 // Otherwise, the next vector will be fetched as you might suspect was 10667 // necessary. 10668 10669 // We might be able to reuse the permutation generation from 10670 // a different base address offset from this one by an aligned amount. 10671 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 10672 // optimization later. 10673 Intrinsic::ID Intr, IntrLD, IntrPerm; 10674 MVT PermCntlTy, PermTy, LDTy; 10675 if (Subtarget.hasAltivec()) { 10676 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 10677 Intrinsic::ppc_altivec_lvsl; 10678 IntrLD = Intrinsic::ppc_altivec_lvx; 10679 IntrPerm = Intrinsic::ppc_altivec_vperm; 10680 PermCntlTy = MVT::v16i8; 10681 PermTy = MVT::v4i32; 10682 LDTy = MVT::v4i32; 10683 } else { 10684 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 10685 Intrinsic::ppc_qpx_qvlpcls; 10686 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 10687 Intrinsic::ppc_qpx_qvlfs; 10688 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 10689 PermCntlTy = MVT::v4f64; 10690 PermTy = MVT::v4f64; 10691 LDTy = MemVT.getSimpleVT(); 10692 } 10693 10694 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 10695 10696 // Create the new MMO for the new base load. It is like the original MMO, 10697 // but represents an area in memory almost twice the vector size centered 10698 // on the original address. If the address is unaligned, we might start 10699 // reading up to (sizeof(vector)-1) bytes below the address of the 10700 // original unaligned load. 10701 MachineFunction &MF = DAG.getMachineFunction(); 10702 MachineMemOperand *BaseMMO = 10703 MF.getMachineMemOperand(LD->getMemOperand(), 10704 -(long)MemVT.getStoreSize()+1, 10705 2*MemVT.getStoreSize()-1); 10706 10707 // Create the new base load. 10708 SDValue LDXIntID = 10709 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 10710 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 10711 SDValue BaseLoad = 10712 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10713 DAG.getVTList(PermTy, MVT::Other), 10714 BaseLoadOps, LDTy, BaseMMO); 10715 10716 // Note that the value of IncOffset (which is provided to the next 10717 // load's pointer info offset value, and thus used to calculate the 10718 // alignment), and the value of IncValue (which is actually used to 10719 // increment the pointer value) are different! This is because we 10720 // require the next load to appear to be aligned, even though it 10721 // is actually offset from the base pointer by a lesser amount. 10722 int IncOffset = VT.getSizeInBits() / 8; 10723 int IncValue = IncOffset; 10724 10725 // Walk (both up and down) the chain looking for another load at the real 10726 // (aligned) offset (the alignment of the other load does not matter in 10727 // this case). If found, then do not use the offset reduction trick, as 10728 // that will prevent the loads from being later combined (as they would 10729 // otherwise be duplicates). 10730 if (!findConsecutiveLoad(LD, DAG)) 10731 --IncValue; 10732 10733 SDValue Increment = 10734 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 10735 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 10736 10737 MachineMemOperand *ExtraMMO = 10738 MF.getMachineMemOperand(LD->getMemOperand(), 10739 1, 2*MemVT.getStoreSize()-1); 10740 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 10741 SDValue ExtraLoad = 10742 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10743 DAG.getVTList(PermTy, MVT::Other), 10744 ExtraLoadOps, LDTy, ExtraMMO); 10745 10746 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 10747 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 10748 10749 // Because vperm has a big-endian bias, we must reverse the order 10750 // of the input vectors and complement the permute control vector 10751 // when generating little endian code. We have already handled the 10752 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 10753 // and ExtraLoad here. 10754 SDValue Perm; 10755 if (isLittleEndian) 10756 Perm = BuildIntrinsicOp(IntrPerm, 10757 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 10758 else 10759 Perm = BuildIntrinsicOp(IntrPerm, 10760 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 10761 10762 if (VT != PermTy) 10763 Perm = Subtarget.hasAltivec() ? 10764 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 10765 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 10766 DAG.getTargetConstant(1, dl, MVT::i64)); 10767 // second argument is 1 because this rounding 10768 // is always exact. 10769 10770 // The output of the permutation is our loaded result, the TokenFactor is 10771 // our new chain. 10772 DCI.CombineTo(N, Perm, TF); 10773 return SDValue(N, 0); 10774 } 10775 } 10776 break; 10777 case ISD::INTRINSIC_WO_CHAIN: { 10778 bool isLittleEndian = Subtarget.isLittleEndian(); 10779 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10780 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 10781 : Intrinsic::ppc_altivec_lvsl); 10782 if ((IID == Intr || 10783 IID == Intrinsic::ppc_qpx_qvlpcld || 10784 IID == Intrinsic::ppc_qpx_qvlpcls) && 10785 N->getOperand(1)->getOpcode() == ISD::ADD) { 10786 SDValue Add = N->getOperand(1); 10787 10788 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 10789 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 10790 10791 if (DAG.MaskedValueIsZero( 10792 Add->getOperand(1), 10793 APInt::getAllOnesValue(Bits /* alignment */) 10794 .zext( 10795 Add.getValueType().getScalarType().getSizeInBits()))) { 10796 SDNode *BasePtr = Add->getOperand(0).getNode(); 10797 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10798 UE = BasePtr->use_end(); 10799 UI != UE; ++UI) { 10800 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10801 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 10802 // We've found another LVSL/LVSR, and this address is an aligned 10803 // multiple of that one. The results will be the same, so use the 10804 // one we've just found instead. 10805 10806 return SDValue(*UI, 0); 10807 } 10808 } 10809 } 10810 10811 if (isa<ConstantSDNode>(Add->getOperand(1))) { 10812 SDNode *BasePtr = Add->getOperand(0).getNode(); 10813 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10814 UE = BasePtr->use_end(); UI != UE; ++UI) { 10815 if (UI->getOpcode() == ISD::ADD && 10816 isa<ConstantSDNode>(UI->getOperand(1)) && 10817 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 10818 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 10819 (1ULL << Bits) == 0) { 10820 SDNode *OtherAdd = *UI; 10821 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 10822 VE = OtherAdd->use_end(); VI != VE; ++VI) { 10823 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10824 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 10825 return SDValue(*VI, 0); 10826 } 10827 } 10828 } 10829 } 10830 } 10831 } 10832 } 10833 10834 break; 10835 case ISD::INTRINSIC_W_CHAIN: { 10836 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10837 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10838 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10839 default: 10840 break; 10841 case Intrinsic::ppc_vsx_lxvw4x: 10842 case Intrinsic::ppc_vsx_lxvd2x: 10843 return expandVSXLoadForLE(N, DCI); 10844 } 10845 } 10846 break; 10847 } 10848 case ISD::INTRINSIC_VOID: { 10849 // For little endian, VSX stores require generating xxswapd/stxvd2x. 10850 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10851 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10852 default: 10853 break; 10854 case Intrinsic::ppc_vsx_stxvw4x: 10855 case Intrinsic::ppc_vsx_stxvd2x: 10856 return expandVSXStoreForLE(N, DCI); 10857 } 10858 } 10859 break; 10860 } 10861 case ISD::BSWAP: 10862 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 10863 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 10864 N->getOperand(0).hasOneUse() && 10865 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 10866 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10867 N->getValueType(0) == MVT::i64))) { 10868 SDValue Load = N->getOperand(0); 10869 LoadSDNode *LD = cast<LoadSDNode>(Load); 10870 // Create the byte-swapping load. 10871 SDValue Ops[] = { 10872 LD->getChain(), // Chain 10873 LD->getBasePtr(), // Ptr 10874 DAG.getValueType(N->getValueType(0)) // VT 10875 }; 10876 SDValue BSLoad = 10877 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 10878 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 10879 MVT::i64 : MVT::i32, MVT::Other), 10880 Ops, LD->getMemoryVT(), LD->getMemOperand()); 10881 10882 // If this is an i16 load, insert the truncate. 10883 SDValue ResVal = BSLoad; 10884 if (N->getValueType(0) == MVT::i16) 10885 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 10886 10887 // First, combine the bswap away. This makes the value produced by the 10888 // load dead. 10889 DCI.CombineTo(N, ResVal); 10890 10891 // Next, combine the load away, we give it a bogus result value but a real 10892 // chain result. The result value is dead because the bswap is dead. 10893 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 10894 10895 // Return N so it doesn't get rechecked! 10896 return SDValue(N, 0); 10897 } 10898 10899 break; 10900 case PPCISD::VCMP: { 10901 // If a VCMPo node already exists with exactly the same operands as this 10902 // node, use its result instead of this node (VCMPo computes both a CR6 and 10903 // a normal output). 10904 // 10905 if (!N->getOperand(0).hasOneUse() && 10906 !N->getOperand(1).hasOneUse() && 10907 !N->getOperand(2).hasOneUse()) { 10908 10909 // Scan all of the users of the LHS, looking for VCMPo's that match. 10910 SDNode *VCMPoNode = nullptr; 10911 10912 SDNode *LHSN = N->getOperand(0).getNode(); 10913 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 10914 UI != E; ++UI) 10915 if (UI->getOpcode() == PPCISD::VCMPo && 10916 UI->getOperand(1) == N->getOperand(1) && 10917 UI->getOperand(2) == N->getOperand(2) && 10918 UI->getOperand(0) == N->getOperand(0)) { 10919 VCMPoNode = *UI; 10920 break; 10921 } 10922 10923 // If there is no VCMPo node, or if the flag value has a single use, don't 10924 // transform this. 10925 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 10926 break; 10927 10928 // Look at the (necessarily single) use of the flag value. If it has a 10929 // chain, this transformation is more complex. Note that multiple things 10930 // could use the value result, which we should ignore. 10931 SDNode *FlagUser = nullptr; 10932 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 10933 FlagUser == nullptr; ++UI) { 10934 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 10935 SDNode *User = *UI; 10936 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 10937 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 10938 FlagUser = User; 10939 break; 10940 } 10941 } 10942 } 10943 10944 // If the user is a MFOCRF instruction, we know this is safe. 10945 // Otherwise we give up for right now. 10946 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 10947 return SDValue(VCMPoNode, 0); 10948 } 10949 break; 10950 } 10951 case ISD::BRCOND: { 10952 SDValue Cond = N->getOperand(1); 10953 SDValue Target = N->getOperand(2); 10954 10955 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10956 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 10957 Intrinsic::ppc_is_decremented_ctr_nonzero) { 10958 10959 // We now need to make the intrinsic dead (it cannot be instruction 10960 // selected). 10961 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 10962 assert(Cond.getNode()->hasOneUse() && 10963 "Counter decrement has more than one use"); 10964 10965 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 10966 N->getOperand(0), Target); 10967 } 10968 } 10969 break; 10970 case ISD::BR_CC: { 10971 // If this is a branch on an altivec predicate comparison, lower this so 10972 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 10973 // lowering is done pre-legalize, because the legalizer lowers the predicate 10974 // compare down to code that is difficult to reassemble. 10975 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 10976 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 10977 10978 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 10979 // value. If so, pass-through the AND to get to the intrinsic. 10980 if (LHS.getOpcode() == ISD::AND && 10981 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 10982 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 10983 Intrinsic::ppc_is_decremented_ctr_nonzero && 10984 isa<ConstantSDNode>(LHS.getOperand(1)) && 10985 !isNullConstant(LHS.getOperand(1))) 10986 LHS = LHS.getOperand(0); 10987 10988 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 10989 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 10990 Intrinsic::ppc_is_decremented_ctr_nonzero && 10991 isa<ConstantSDNode>(RHS)) { 10992 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 10993 "Counter decrement comparison is not EQ or NE"); 10994 10995 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 10996 bool isBDNZ = (CC == ISD::SETEQ && Val) || 10997 (CC == ISD::SETNE && !Val); 10998 10999 // We now need to make the intrinsic dead (it cannot be instruction 11000 // selected). 11001 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 11002 assert(LHS.getNode()->hasOneUse() && 11003 "Counter decrement has more than one use"); 11004 11005 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 11006 N->getOperand(0), N->getOperand(4)); 11007 } 11008 11009 int CompareOpc; 11010 bool isDot; 11011 11012 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 11013 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 11014 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 11015 assert(isDot && "Can't compare against a vector result!"); 11016 11017 // If this is a comparison against something other than 0/1, then we know 11018 // that the condition is never/always true. 11019 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 11020 if (Val != 0 && Val != 1) { 11021 if (CC == ISD::SETEQ) // Cond never true, remove branch. 11022 return N->getOperand(0); 11023 // Always !=, turn it into an unconditional branch. 11024 return DAG.getNode(ISD::BR, dl, MVT::Other, 11025 N->getOperand(0), N->getOperand(4)); 11026 } 11027 11028 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 11029 11030 // Create the PPCISD altivec 'dot' comparison node. 11031 SDValue Ops[] = { 11032 LHS.getOperand(2), // LHS of compare 11033 LHS.getOperand(3), // RHS of compare 11034 DAG.getConstant(CompareOpc, dl, MVT::i32) 11035 }; 11036 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 11037 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 11038 11039 // Unpack the result based on how the target uses it. 11040 PPC::Predicate CompOpc; 11041 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 11042 default: // Can't happen, don't crash on invalid number though. 11043 case 0: // Branch on the value of the EQ bit of CR6. 11044 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 11045 break; 11046 case 1: // Branch on the inverted value of the EQ bit of CR6. 11047 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 11048 break; 11049 case 2: // Branch on the value of the LT bit of CR6. 11050 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 11051 break; 11052 case 3: // Branch on the inverted value of the LT bit of CR6. 11053 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 11054 break; 11055 } 11056 11057 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 11058 DAG.getConstant(CompOpc, dl, MVT::i32), 11059 DAG.getRegister(PPC::CR6, MVT::i32), 11060 N->getOperand(4), CompNode.getValue(1)); 11061 } 11062 break; 11063 } 11064 } 11065 11066 return SDValue(); 11067 } 11068 11069 SDValue 11070 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 11071 SelectionDAG &DAG, 11072 std::vector<SDNode *> *Created) const { 11073 // fold (sdiv X, pow2) 11074 EVT VT = N->getValueType(0); 11075 if (VT == MVT::i64 && !Subtarget.isPPC64()) 11076 return SDValue(); 11077 if ((VT != MVT::i32 && VT != MVT::i64) || 11078 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 11079 return SDValue(); 11080 11081 SDLoc DL(N); 11082 SDValue N0 = N->getOperand(0); 11083 11084 bool IsNegPow2 = (-Divisor).isPowerOf2(); 11085 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 11086 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 11087 11088 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 11089 if (Created) 11090 Created->push_back(Op.getNode()); 11091 11092 if (IsNegPow2) { 11093 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 11094 if (Created) 11095 Created->push_back(Op.getNode()); 11096 } 11097 11098 return Op; 11099 } 11100 11101 //===----------------------------------------------------------------------===// 11102 // Inline Assembly Support 11103 //===----------------------------------------------------------------------===// 11104 11105 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 11106 APInt &KnownZero, 11107 APInt &KnownOne, 11108 const SelectionDAG &DAG, 11109 unsigned Depth) const { 11110 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 11111 switch (Op.getOpcode()) { 11112 default: break; 11113 case PPCISD::LBRX: { 11114 // lhbrx is known to have the top bits cleared out. 11115 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 11116 KnownZero = 0xFFFF0000; 11117 break; 11118 } 11119 case ISD::INTRINSIC_WO_CHAIN: { 11120 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 11121 default: break; 11122 case Intrinsic::ppc_altivec_vcmpbfp_p: 11123 case Intrinsic::ppc_altivec_vcmpeqfp_p: 11124 case Intrinsic::ppc_altivec_vcmpequb_p: 11125 case Intrinsic::ppc_altivec_vcmpequh_p: 11126 case Intrinsic::ppc_altivec_vcmpequw_p: 11127 case Intrinsic::ppc_altivec_vcmpequd_p: 11128 case Intrinsic::ppc_altivec_vcmpgefp_p: 11129 case Intrinsic::ppc_altivec_vcmpgtfp_p: 11130 case Intrinsic::ppc_altivec_vcmpgtsb_p: 11131 case Intrinsic::ppc_altivec_vcmpgtsh_p: 11132 case Intrinsic::ppc_altivec_vcmpgtsw_p: 11133 case Intrinsic::ppc_altivec_vcmpgtsd_p: 11134 case Intrinsic::ppc_altivec_vcmpgtub_p: 11135 case Intrinsic::ppc_altivec_vcmpgtuh_p: 11136 case Intrinsic::ppc_altivec_vcmpgtuw_p: 11137 case Intrinsic::ppc_altivec_vcmpgtud_p: 11138 KnownZero = ~1U; // All bits but the low one are known to be zero. 11139 break; 11140 } 11141 } 11142 } 11143 } 11144 11145 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 11146 switch (Subtarget.getDarwinDirective()) { 11147 default: break; 11148 case PPC::DIR_970: 11149 case PPC::DIR_PWR4: 11150 case PPC::DIR_PWR5: 11151 case PPC::DIR_PWR5X: 11152 case PPC::DIR_PWR6: 11153 case PPC::DIR_PWR6X: 11154 case PPC::DIR_PWR7: 11155 case PPC::DIR_PWR8: 11156 case PPC::DIR_PWR9: { 11157 if (!ML) 11158 break; 11159 11160 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 11161 11162 // For small loops (between 5 and 8 instructions), align to a 32-byte 11163 // boundary so that the entire loop fits in one instruction-cache line. 11164 uint64_t LoopSize = 0; 11165 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 11166 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 11167 LoopSize += TII->GetInstSizeInBytes(J); 11168 if (LoopSize > 32) 11169 break; 11170 } 11171 11172 if (LoopSize > 16 && LoopSize <= 32) 11173 return 5; 11174 11175 break; 11176 } 11177 } 11178 11179 return TargetLowering::getPrefLoopAlignment(ML); 11180 } 11181 11182 /// getConstraintType - Given a constraint, return the type of 11183 /// constraint it is for this target. 11184 PPCTargetLowering::ConstraintType 11185 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 11186 if (Constraint.size() == 1) { 11187 switch (Constraint[0]) { 11188 default: break; 11189 case 'b': 11190 case 'r': 11191 case 'f': 11192 case 'd': 11193 case 'v': 11194 case 'y': 11195 return C_RegisterClass; 11196 case 'Z': 11197 // FIXME: While Z does indicate a memory constraint, it specifically 11198 // indicates an r+r address (used in conjunction with the 'y' modifier 11199 // in the replacement string). Currently, we're forcing the base 11200 // register to be r0 in the asm printer (which is interpreted as zero) 11201 // and forming the complete address in the second register. This is 11202 // suboptimal. 11203 return C_Memory; 11204 } 11205 } else if (Constraint == "wc") { // individual CR bits. 11206 return C_RegisterClass; 11207 } else if (Constraint == "wa" || Constraint == "wd" || 11208 Constraint == "wf" || Constraint == "ws") { 11209 return C_RegisterClass; // VSX registers. 11210 } 11211 return TargetLowering::getConstraintType(Constraint); 11212 } 11213 11214 /// Examine constraint type and operand type and determine a weight value. 11215 /// This object must already have been set up with the operand type 11216 /// and the current alternative constraint selected. 11217 TargetLowering::ConstraintWeight 11218 PPCTargetLowering::getSingleConstraintMatchWeight( 11219 AsmOperandInfo &info, const char *constraint) const { 11220 ConstraintWeight weight = CW_Invalid; 11221 Value *CallOperandVal = info.CallOperandVal; 11222 // If we don't have a value, we can't do a match, 11223 // but allow it at the lowest weight. 11224 if (!CallOperandVal) 11225 return CW_Default; 11226 Type *type = CallOperandVal->getType(); 11227 11228 // Look at the constraint type. 11229 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 11230 return CW_Register; // an individual CR bit. 11231 else if ((StringRef(constraint) == "wa" || 11232 StringRef(constraint) == "wd" || 11233 StringRef(constraint) == "wf") && 11234 type->isVectorTy()) 11235 return CW_Register; 11236 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 11237 return CW_Register; 11238 11239 switch (*constraint) { 11240 default: 11241 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 11242 break; 11243 case 'b': 11244 if (type->isIntegerTy()) 11245 weight = CW_Register; 11246 break; 11247 case 'f': 11248 if (type->isFloatTy()) 11249 weight = CW_Register; 11250 break; 11251 case 'd': 11252 if (type->isDoubleTy()) 11253 weight = CW_Register; 11254 break; 11255 case 'v': 11256 if (type->isVectorTy()) 11257 weight = CW_Register; 11258 break; 11259 case 'y': 11260 weight = CW_Register; 11261 break; 11262 case 'Z': 11263 weight = CW_Memory; 11264 break; 11265 } 11266 return weight; 11267 } 11268 11269 std::pair<unsigned, const TargetRegisterClass *> 11270 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 11271 StringRef Constraint, 11272 MVT VT) const { 11273 if (Constraint.size() == 1) { 11274 // GCC RS6000 Constraint Letters 11275 switch (Constraint[0]) { 11276 case 'b': // R1-R31 11277 if (VT == MVT::i64 && Subtarget.isPPC64()) 11278 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 11279 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 11280 case 'r': // R0-R31 11281 if (VT == MVT::i64 && Subtarget.isPPC64()) 11282 return std::make_pair(0U, &PPC::G8RCRegClass); 11283 return std::make_pair(0U, &PPC::GPRCRegClass); 11284 // 'd' and 'f' constraints are both defined to be "the floating point 11285 // registers", where one is for 32-bit and the other for 64-bit. We don't 11286 // really care overly much here so just give them all the same reg classes. 11287 case 'd': 11288 case 'f': 11289 if (VT == MVT::f32 || VT == MVT::i32) 11290 return std::make_pair(0U, &PPC::F4RCRegClass); 11291 if (VT == MVT::f64 || VT == MVT::i64) 11292 return std::make_pair(0U, &PPC::F8RCRegClass); 11293 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 11294 return std::make_pair(0U, &PPC::QFRCRegClass); 11295 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 11296 return std::make_pair(0U, &PPC::QSRCRegClass); 11297 break; 11298 case 'v': 11299 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 11300 return std::make_pair(0U, &PPC::QFRCRegClass); 11301 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 11302 return std::make_pair(0U, &PPC::QSRCRegClass); 11303 if (Subtarget.hasAltivec()) 11304 return std::make_pair(0U, &PPC::VRRCRegClass); 11305 case 'y': // crrc 11306 return std::make_pair(0U, &PPC::CRRCRegClass); 11307 } 11308 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 11309 // An individual CR bit. 11310 return std::make_pair(0U, &PPC::CRBITRCRegClass); 11311 } else if ((Constraint == "wa" || Constraint == "wd" || 11312 Constraint == "wf") && Subtarget.hasVSX()) { 11313 return std::make_pair(0U, &PPC::VSRCRegClass); 11314 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 11315 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 11316 return std::make_pair(0U, &PPC::VSSRCRegClass); 11317 else 11318 return std::make_pair(0U, &PPC::VSFRCRegClass); 11319 } 11320 11321 std::pair<unsigned, const TargetRegisterClass *> R = 11322 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 11323 11324 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 11325 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 11326 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 11327 // register. 11328 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 11329 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 11330 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 11331 PPC::GPRCRegClass.contains(R.first)) 11332 return std::make_pair(TRI->getMatchingSuperReg(R.first, 11333 PPC::sub_32, &PPC::G8RCRegClass), 11334 &PPC::G8RCRegClass); 11335 11336 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 11337 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 11338 R.first = PPC::CR0; 11339 R.second = &PPC::CRRCRegClass; 11340 } 11341 11342 return R; 11343 } 11344 11345 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 11346 /// vector. If it is invalid, don't add anything to Ops. 11347 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 11348 std::string &Constraint, 11349 std::vector<SDValue>&Ops, 11350 SelectionDAG &DAG) const { 11351 SDValue Result; 11352 11353 // Only support length 1 constraints. 11354 if (Constraint.length() > 1) return; 11355 11356 char Letter = Constraint[0]; 11357 switch (Letter) { 11358 default: break; 11359 case 'I': 11360 case 'J': 11361 case 'K': 11362 case 'L': 11363 case 'M': 11364 case 'N': 11365 case 'O': 11366 case 'P': { 11367 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 11368 if (!CST) return; // Must be an immediate to match. 11369 SDLoc dl(Op); 11370 int64_t Value = CST->getSExtValue(); 11371 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 11372 // numbers are printed as such. 11373 switch (Letter) { 11374 default: llvm_unreachable("Unknown constraint letter!"); 11375 case 'I': // "I" is a signed 16-bit constant. 11376 if (isInt<16>(Value)) 11377 Result = DAG.getTargetConstant(Value, dl, TCVT); 11378 break; 11379 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 11380 if (isShiftedUInt<16, 16>(Value)) 11381 Result = DAG.getTargetConstant(Value, dl, TCVT); 11382 break; 11383 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 11384 if (isShiftedInt<16, 16>(Value)) 11385 Result = DAG.getTargetConstant(Value, dl, TCVT); 11386 break; 11387 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 11388 if (isUInt<16>(Value)) 11389 Result = DAG.getTargetConstant(Value, dl, TCVT); 11390 break; 11391 case 'M': // "M" is a constant that is greater than 31. 11392 if (Value > 31) 11393 Result = DAG.getTargetConstant(Value, dl, TCVT); 11394 break; 11395 case 'N': // "N" is a positive constant that is an exact power of two. 11396 if (Value > 0 && isPowerOf2_64(Value)) 11397 Result = DAG.getTargetConstant(Value, dl, TCVT); 11398 break; 11399 case 'O': // "O" is the constant zero. 11400 if (Value == 0) 11401 Result = DAG.getTargetConstant(Value, dl, TCVT); 11402 break; 11403 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 11404 if (isInt<16>(-Value)) 11405 Result = DAG.getTargetConstant(Value, dl, TCVT); 11406 break; 11407 } 11408 break; 11409 } 11410 } 11411 11412 if (Result.getNode()) { 11413 Ops.push_back(Result); 11414 return; 11415 } 11416 11417 // Handle standard constraint letters. 11418 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11419 } 11420 11421 // isLegalAddressingMode - Return true if the addressing mode represented 11422 // by AM is legal for this target, for a load/store of the specified type. 11423 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 11424 const AddrMode &AM, Type *Ty, 11425 unsigned AS) const { 11426 // PPC does not allow r+i addressing modes for vectors! 11427 if (Ty->isVectorTy() && AM.BaseOffs != 0) 11428 return false; 11429 11430 // PPC allows a sign-extended 16-bit immediate field. 11431 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 11432 return false; 11433 11434 // No global is ever allowed as a base. 11435 if (AM.BaseGV) 11436 return false; 11437 11438 // PPC only support r+r, 11439 switch (AM.Scale) { 11440 case 0: // "r+i" or just "i", depending on HasBaseReg. 11441 break; 11442 case 1: 11443 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 11444 return false; 11445 // Otherwise we have r+r or r+i. 11446 break; 11447 case 2: 11448 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 11449 return false; 11450 // Allow 2*r as r+r. 11451 break; 11452 default: 11453 // No other scales are supported. 11454 return false; 11455 } 11456 11457 return true; 11458 } 11459 11460 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 11461 SelectionDAG &DAG) const { 11462 MachineFunction &MF = DAG.getMachineFunction(); 11463 MachineFrameInfo *MFI = MF.getFrameInfo(); 11464 MFI->setReturnAddressIsTaken(true); 11465 11466 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 11467 return SDValue(); 11468 11469 SDLoc dl(Op); 11470 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11471 11472 // Make sure the function does not optimize away the store of the RA to 11473 // the stack. 11474 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 11475 FuncInfo->setLRStoreRequired(); 11476 bool isPPC64 = Subtarget.isPPC64(); 11477 auto PtrVT = getPointerTy(MF.getDataLayout()); 11478 11479 if (Depth > 0) { 11480 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11481 SDValue Offset = 11482 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 11483 isPPC64 ? MVT::i64 : MVT::i32); 11484 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11485 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 11486 MachinePointerInfo(), false, false, false, 0); 11487 } 11488 11489 // Just load the return address off the stack. 11490 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 11491 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 11492 MachinePointerInfo(), false, false, false, 0); 11493 } 11494 11495 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 11496 SelectionDAG &DAG) const { 11497 SDLoc dl(Op); 11498 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11499 11500 MachineFunction &MF = DAG.getMachineFunction(); 11501 MachineFrameInfo *MFI = MF.getFrameInfo(); 11502 MFI->setFrameAddressIsTaken(true); 11503 11504 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 11505 bool isPPC64 = PtrVT == MVT::i64; 11506 11507 // Naked functions never have a frame pointer, and so we use r1. For all 11508 // other functions, this decision must be delayed until during PEI. 11509 unsigned FrameReg; 11510 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 11511 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 11512 else 11513 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 11514 11515 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 11516 PtrVT); 11517 while (Depth--) 11518 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 11519 FrameAddr, MachinePointerInfo(), false, false, 11520 false, 0); 11521 return FrameAddr; 11522 } 11523 11524 // FIXME? Maybe this could be a TableGen attribute on some registers and 11525 // this table could be generated automatically from RegInfo. 11526 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 11527 SelectionDAG &DAG) const { 11528 bool isPPC64 = Subtarget.isPPC64(); 11529 bool isDarwinABI = Subtarget.isDarwinABI(); 11530 11531 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 11532 (!isPPC64 && VT != MVT::i32)) 11533 report_fatal_error("Invalid register global variable type"); 11534 11535 bool is64Bit = isPPC64 && VT == MVT::i64; 11536 unsigned Reg = StringSwitch<unsigned>(RegName) 11537 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 11538 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 11539 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 11540 (is64Bit ? PPC::X13 : PPC::R13)) 11541 .Default(0); 11542 11543 if (Reg) 11544 return Reg; 11545 report_fatal_error("Invalid register name global variable"); 11546 } 11547 11548 bool 11549 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11550 // The PowerPC target isn't yet aware of offsets. 11551 return false; 11552 } 11553 11554 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 11555 const CallInst &I, 11556 unsigned Intrinsic) const { 11557 11558 switch (Intrinsic) { 11559 case Intrinsic::ppc_qpx_qvlfd: 11560 case Intrinsic::ppc_qpx_qvlfs: 11561 case Intrinsic::ppc_qpx_qvlfcd: 11562 case Intrinsic::ppc_qpx_qvlfcs: 11563 case Intrinsic::ppc_qpx_qvlfiwa: 11564 case Intrinsic::ppc_qpx_qvlfiwz: 11565 case Intrinsic::ppc_altivec_lvx: 11566 case Intrinsic::ppc_altivec_lvxl: 11567 case Intrinsic::ppc_altivec_lvebx: 11568 case Intrinsic::ppc_altivec_lvehx: 11569 case Intrinsic::ppc_altivec_lvewx: 11570 case Intrinsic::ppc_vsx_lxvd2x: 11571 case Intrinsic::ppc_vsx_lxvw4x: { 11572 EVT VT; 11573 switch (Intrinsic) { 11574 case Intrinsic::ppc_altivec_lvebx: 11575 VT = MVT::i8; 11576 break; 11577 case Intrinsic::ppc_altivec_lvehx: 11578 VT = MVT::i16; 11579 break; 11580 case Intrinsic::ppc_altivec_lvewx: 11581 VT = MVT::i32; 11582 break; 11583 case Intrinsic::ppc_vsx_lxvd2x: 11584 VT = MVT::v2f64; 11585 break; 11586 case Intrinsic::ppc_qpx_qvlfd: 11587 VT = MVT::v4f64; 11588 break; 11589 case Intrinsic::ppc_qpx_qvlfs: 11590 VT = MVT::v4f32; 11591 break; 11592 case Intrinsic::ppc_qpx_qvlfcd: 11593 VT = MVT::v2f64; 11594 break; 11595 case Intrinsic::ppc_qpx_qvlfcs: 11596 VT = MVT::v2f32; 11597 break; 11598 default: 11599 VT = MVT::v4i32; 11600 break; 11601 } 11602 11603 Info.opc = ISD::INTRINSIC_W_CHAIN; 11604 Info.memVT = VT; 11605 Info.ptrVal = I.getArgOperand(0); 11606 Info.offset = -VT.getStoreSize()+1; 11607 Info.size = 2*VT.getStoreSize()-1; 11608 Info.align = 1; 11609 Info.vol = false; 11610 Info.readMem = true; 11611 Info.writeMem = false; 11612 return true; 11613 } 11614 case Intrinsic::ppc_qpx_qvlfda: 11615 case Intrinsic::ppc_qpx_qvlfsa: 11616 case Intrinsic::ppc_qpx_qvlfcda: 11617 case Intrinsic::ppc_qpx_qvlfcsa: 11618 case Intrinsic::ppc_qpx_qvlfiwaa: 11619 case Intrinsic::ppc_qpx_qvlfiwza: { 11620 EVT VT; 11621 switch (Intrinsic) { 11622 case Intrinsic::ppc_qpx_qvlfda: 11623 VT = MVT::v4f64; 11624 break; 11625 case Intrinsic::ppc_qpx_qvlfsa: 11626 VT = MVT::v4f32; 11627 break; 11628 case Intrinsic::ppc_qpx_qvlfcda: 11629 VT = MVT::v2f64; 11630 break; 11631 case Intrinsic::ppc_qpx_qvlfcsa: 11632 VT = MVT::v2f32; 11633 break; 11634 default: 11635 VT = MVT::v4i32; 11636 break; 11637 } 11638 11639 Info.opc = ISD::INTRINSIC_W_CHAIN; 11640 Info.memVT = VT; 11641 Info.ptrVal = I.getArgOperand(0); 11642 Info.offset = 0; 11643 Info.size = VT.getStoreSize(); 11644 Info.align = 1; 11645 Info.vol = false; 11646 Info.readMem = true; 11647 Info.writeMem = false; 11648 return true; 11649 } 11650 case Intrinsic::ppc_qpx_qvstfd: 11651 case Intrinsic::ppc_qpx_qvstfs: 11652 case Intrinsic::ppc_qpx_qvstfcd: 11653 case Intrinsic::ppc_qpx_qvstfcs: 11654 case Intrinsic::ppc_qpx_qvstfiw: 11655 case Intrinsic::ppc_altivec_stvx: 11656 case Intrinsic::ppc_altivec_stvxl: 11657 case Intrinsic::ppc_altivec_stvebx: 11658 case Intrinsic::ppc_altivec_stvehx: 11659 case Intrinsic::ppc_altivec_stvewx: 11660 case Intrinsic::ppc_vsx_stxvd2x: 11661 case Intrinsic::ppc_vsx_stxvw4x: { 11662 EVT VT; 11663 switch (Intrinsic) { 11664 case Intrinsic::ppc_altivec_stvebx: 11665 VT = MVT::i8; 11666 break; 11667 case Intrinsic::ppc_altivec_stvehx: 11668 VT = MVT::i16; 11669 break; 11670 case Intrinsic::ppc_altivec_stvewx: 11671 VT = MVT::i32; 11672 break; 11673 case Intrinsic::ppc_vsx_stxvd2x: 11674 VT = MVT::v2f64; 11675 break; 11676 case Intrinsic::ppc_qpx_qvstfd: 11677 VT = MVT::v4f64; 11678 break; 11679 case Intrinsic::ppc_qpx_qvstfs: 11680 VT = MVT::v4f32; 11681 break; 11682 case Intrinsic::ppc_qpx_qvstfcd: 11683 VT = MVT::v2f64; 11684 break; 11685 case Intrinsic::ppc_qpx_qvstfcs: 11686 VT = MVT::v2f32; 11687 break; 11688 default: 11689 VT = MVT::v4i32; 11690 break; 11691 } 11692 11693 Info.opc = ISD::INTRINSIC_VOID; 11694 Info.memVT = VT; 11695 Info.ptrVal = I.getArgOperand(1); 11696 Info.offset = -VT.getStoreSize()+1; 11697 Info.size = 2*VT.getStoreSize()-1; 11698 Info.align = 1; 11699 Info.vol = false; 11700 Info.readMem = false; 11701 Info.writeMem = true; 11702 return true; 11703 } 11704 case Intrinsic::ppc_qpx_qvstfda: 11705 case Intrinsic::ppc_qpx_qvstfsa: 11706 case Intrinsic::ppc_qpx_qvstfcda: 11707 case Intrinsic::ppc_qpx_qvstfcsa: 11708 case Intrinsic::ppc_qpx_qvstfiwa: { 11709 EVT VT; 11710 switch (Intrinsic) { 11711 case Intrinsic::ppc_qpx_qvstfda: 11712 VT = MVT::v4f64; 11713 break; 11714 case Intrinsic::ppc_qpx_qvstfsa: 11715 VT = MVT::v4f32; 11716 break; 11717 case Intrinsic::ppc_qpx_qvstfcda: 11718 VT = MVT::v2f64; 11719 break; 11720 case Intrinsic::ppc_qpx_qvstfcsa: 11721 VT = MVT::v2f32; 11722 break; 11723 default: 11724 VT = MVT::v4i32; 11725 break; 11726 } 11727 11728 Info.opc = ISD::INTRINSIC_VOID; 11729 Info.memVT = VT; 11730 Info.ptrVal = I.getArgOperand(1); 11731 Info.offset = 0; 11732 Info.size = VT.getStoreSize(); 11733 Info.align = 1; 11734 Info.vol = false; 11735 Info.readMem = false; 11736 Info.writeMem = true; 11737 return true; 11738 } 11739 default: 11740 break; 11741 } 11742 11743 return false; 11744 } 11745 11746 /// getOptimalMemOpType - Returns the target specific optimal type for load 11747 /// and store operations as a result of memset, memcpy, and memmove 11748 /// lowering. If DstAlign is zero that means it's safe to destination 11749 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 11750 /// means there isn't a need to check it against alignment requirement, 11751 /// probably because the source does not need to be loaded. If 'IsMemset' is 11752 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 11753 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 11754 /// source is constant so it does not need to be loaded. 11755 /// It returns EVT::Other if the type should be determined using generic 11756 /// target-independent logic. 11757 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 11758 unsigned DstAlign, unsigned SrcAlign, 11759 bool IsMemset, bool ZeroMemset, 11760 bool MemcpyStrSrc, 11761 MachineFunction &MF) const { 11762 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 11763 const Function *F = MF.getFunction(); 11764 // When expanding a memset, require at least two QPX instructions to cover 11765 // the cost of loading the value to be stored from the constant pool. 11766 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 11767 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 11768 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 11769 return MVT::v4f64; 11770 } 11771 11772 // We should use Altivec/VSX loads and stores when available. For unaligned 11773 // addresses, unaligned VSX loads are only fast starting with the P8. 11774 if (Subtarget.hasAltivec() && Size >= 16 && 11775 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 11776 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 11777 return MVT::v4i32; 11778 } 11779 11780 if (Subtarget.isPPC64()) { 11781 return MVT::i64; 11782 } 11783 11784 return MVT::i32; 11785 } 11786 11787 /// \brief Returns true if it is beneficial to convert a load of a constant 11788 /// to just the constant itself. 11789 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 11790 Type *Ty) const { 11791 assert(Ty->isIntegerTy()); 11792 11793 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 11794 return !(BitSize == 0 || BitSize > 64); 11795 } 11796 11797 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11798 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11799 return false; 11800 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11801 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11802 return NumBits1 == 64 && NumBits2 == 32; 11803 } 11804 11805 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11806 if (!VT1.isInteger() || !VT2.isInteger()) 11807 return false; 11808 unsigned NumBits1 = VT1.getSizeInBits(); 11809 unsigned NumBits2 = VT2.getSizeInBits(); 11810 return NumBits1 == 64 && NumBits2 == 32; 11811 } 11812 11813 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 11814 // Generally speaking, zexts are not free, but they are free when they can be 11815 // folded with other operations. 11816 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 11817 EVT MemVT = LD->getMemoryVT(); 11818 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 11819 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 11820 (LD->getExtensionType() == ISD::NON_EXTLOAD || 11821 LD->getExtensionType() == ISD::ZEXTLOAD)) 11822 return true; 11823 } 11824 11825 // FIXME: Add other cases... 11826 // - 32-bit shifts with a zext to i64 11827 // - zext after ctlz, bswap, etc. 11828 // - zext after and by a constant mask 11829 11830 return TargetLowering::isZExtFree(Val, VT2); 11831 } 11832 11833 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 11834 assert(VT.isFloatingPoint()); 11835 return true; 11836 } 11837 11838 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11839 return isInt<16>(Imm) || isUInt<16>(Imm); 11840 } 11841 11842 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11843 return isInt<16>(Imm) || isUInt<16>(Imm); 11844 } 11845 11846 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 11847 unsigned, 11848 unsigned, 11849 bool *Fast) const { 11850 if (DisablePPCUnaligned) 11851 return false; 11852 11853 // PowerPC supports unaligned memory access for simple non-vector types. 11854 // Although accessing unaligned addresses is not as efficient as accessing 11855 // aligned addresses, it is generally more efficient than manual expansion, 11856 // and generally only traps for software emulation when crossing page 11857 // boundaries. 11858 11859 if (!VT.isSimple()) 11860 return false; 11861 11862 if (VT.getSimpleVT().isVector()) { 11863 if (Subtarget.hasVSX()) { 11864 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 11865 VT != MVT::v4f32 && VT != MVT::v4i32) 11866 return false; 11867 } else { 11868 return false; 11869 } 11870 } 11871 11872 if (VT == MVT::ppcf128) 11873 return false; 11874 11875 if (Fast) 11876 *Fast = true; 11877 11878 return true; 11879 } 11880 11881 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 11882 VT = VT.getScalarType(); 11883 11884 if (!VT.isSimple()) 11885 return false; 11886 11887 switch (VT.getSimpleVT().SimpleTy) { 11888 case MVT::f32: 11889 case MVT::f64: 11890 return true; 11891 default: 11892 break; 11893 } 11894 11895 return false; 11896 } 11897 11898 const MCPhysReg * 11899 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 11900 // LR is a callee-save register, but we must treat it as clobbered by any call 11901 // site. Hence we include LR in the scratch registers, which are in turn added 11902 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 11903 // to CTR, which is used by any indirect call. 11904 static const MCPhysReg ScratchRegs[] = { 11905 PPC::X12, PPC::LR8, PPC::CTR8, 0 11906 }; 11907 11908 return ScratchRegs; 11909 } 11910 11911 unsigned PPCTargetLowering::getExceptionPointerRegister( 11912 const Constant *PersonalityFn) const { 11913 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 11914 } 11915 11916 unsigned PPCTargetLowering::getExceptionSelectorRegister( 11917 const Constant *PersonalityFn) const { 11918 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 11919 } 11920 11921 bool 11922 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 11923 EVT VT , unsigned DefinedValues) const { 11924 if (VT == MVT::v2i64) 11925 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 11926 11927 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 11928 return true; 11929 11930 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 11931 } 11932 11933 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 11934 if (DisableILPPref || Subtarget.enableMachineScheduler()) 11935 return TargetLowering::getSchedulingPreference(N); 11936 11937 return Sched::ILP; 11938 } 11939 11940 // Create a fast isel object. 11941 FastISel * 11942 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 11943 const TargetLibraryInfo *LibInfo) const { 11944 return PPC::createFastISel(FuncInfo, LibInfo); 11945 } 11946 11947 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 11948 if (Subtarget.isDarwinABI()) return; 11949 if (!Subtarget.isPPC64()) return; 11950 11951 // Update IsSplitCSR in PPCFunctionInfo 11952 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 11953 PFI->setIsSplitCSR(true); 11954 } 11955 11956 void PPCTargetLowering::insertCopiesSplitCSR( 11957 MachineBasicBlock *Entry, 11958 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 11959 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 11960 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 11961 if (!IStart) 11962 return; 11963 11964 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11965 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 11966 MachineBasicBlock::iterator MBBI = Entry->begin(); 11967 for (const MCPhysReg *I = IStart; *I; ++I) { 11968 const TargetRegisterClass *RC = nullptr; 11969 if (PPC::G8RCRegClass.contains(*I)) 11970 RC = &PPC::G8RCRegClass; 11971 else if (PPC::F8RCRegClass.contains(*I)) 11972 RC = &PPC::F8RCRegClass; 11973 else if (PPC::CRRCRegClass.contains(*I)) 11974 RC = &PPC::CRRCRegClass; 11975 else if (PPC::VRRCRegClass.contains(*I)) 11976 RC = &PPC::VRRCRegClass; 11977 else 11978 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 11979 11980 unsigned NewVR = MRI->createVirtualRegister(RC); 11981 // Create copy from CSR to a virtual register. 11982 // FIXME: this currently does not emit CFI pseudo-instructions, it works 11983 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 11984 // nounwind. If we want to generalize this later, we may need to emit 11985 // CFI pseudo-instructions. 11986 assert(Entry->getParent()->getFunction()->hasFnAttribute( 11987 Attribute::NoUnwind) && 11988 "Function should be nounwind in insertCopiesSplitCSR!"); 11989 Entry->addLiveIn(*I); 11990 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 11991 .addReg(*I); 11992 11993 // Insert the copy-back instructions right before the terminator 11994 for (auto *Exit : Exits) 11995 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 11996 TII->get(TargetOpcode::COPY), *I) 11997 .addReg(NewVR); 11998 } 11999 } 12000 12001 // Override to enable LOAD_STACK_GUARD lowering on Linux. 12002 bool PPCTargetLowering::useLoadStackGuardNode() const { 12003 if (!Subtarget.isTargetLinux()) 12004 return TargetLowering::useLoadStackGuardNode(); 12005 return true; 12006 } 12007 12008 // Override to disable global variable loading on Linux. 12009 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 12010 if (!Subtarget.isTargetLinux()) 12011 return TargetLowering::insertSSPDeclarations(M); 12012 } 12013