1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCCallingConv.h" 17 #include "PPCCCState.h" 18 #include "PPCMachineFunctionInfo.h" 19 #include "PPCPerfectShuffle.h" 20 #include "PPCTargetMachine.h" 21 #include "PPCTargetObjectFile.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/StringSwitch.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/CodeGen/CallingConvLower.h" 27 #include "llvm/CodeGen/MachineFrameInfo.h" 28 #include "llvm/CodeGen/MachineFunction.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineLoopInfo.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/SelectionDAG.h" 33 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/Intrinsics.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/ErrorHandling.h" 41 #include "llvm/Support/Format.h" 42 #include "llvm/Support/MathExtras.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetOptions.h" 45 #include <list> 46 47 using namespace llvm; 48 49 #define DEBUG_TYPE "ppc-lowering" 50 51 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 52 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 53 54 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 55 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 56 57 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 58 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 59 60 static cl::opt<bool> DisableSCO("disable-ppc-sco", 61 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 62 63 STATISTIC(NumTailCalls, "Number of tail calls"); 64 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 65 66 // FIXME: Remove this once the bug has been fixed! 67 extern cl::opt<bool> ANDIGlueBug; 68 69 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 70 const PPCSubtarget &STI) 71 : TargetLowering(TM), Subtarget(STI) { 72 // Use _setjmp/_longjmp instead of setjmp/longjmp. 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(true); 75 76 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 77 // arguments are at least 4/8 bytes aligned. 78 bool isPPC64 = Subtarget.isPPC64(); 79 setMinStackArgumentAlignment(isPPC64 ? 8:4); 80 81 // Set up the register classes. 82 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 83 if (!useSoftFloat()) { 84 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 85 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 86 } 87 88 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 89 for (MVT VT : MVT::integer_valuetypes()) { 90 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 91 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 92 } 93 94 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 95 96 // PowerPC has pre-inc load and store's. 97 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 98 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 99 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 100 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 101 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 102 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 103 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 104 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 105 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 106 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 107 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 108 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 109 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 110 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 111 112 if (Subtarget.useCRBits()) { 113 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 114 115 if (isPPC64 || Subtarget.hasFPCVT()) { 116 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 117 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 118 isPPC64 ? MVT::i64 : MVT::i32); 119 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 120 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 121 isPPC64 ? MVT::i64 : MVT::i32); 122 } else { 123 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 124 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 125 } 126 127 // PowerPC does not support direct load / store of condition registers 128 setOperationAction(ISD::LOAD, MVT::i1, Custom); 129 setOperationAction(ISD::STORE, MVT::i1, Custom); 130 131 // FIXME: Remove this once the ANDI glue bug is fixed: 132 if (ANDIGlueBug) 133 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 134 135 for (MVT VT : MVT::integer_valuetypes()) { 136 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 137 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 138 setTruncStoreAction(VT, MVT::i1, Expand); 139 } 140 141 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 142 } 143 144 // This is used in the ppcf128->int sequence. Note it has different semantics 145 // from FP_ROUND: that rounds to nearest, this rounds to zero. 146 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 147 148 // We do not currently implement these libm ops for PowerPC. 149 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 150 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 151 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 152 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 153 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 154 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 155 156 // PowerPC has no SREM/UREM instructions 157 setOperationAction(ISD::SREM, MVT::i32, Expand); 158 setOperationAction(ISD::UREM, MVT::i32, Expand); 159 setOperationAction(ISD::SREM, MVT::i64, Expand); 160 setOperationAction(ISD::UREM, MVT::i64, Expand); 161 162 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 163 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 164 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 165 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 166 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 167 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 168 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 169 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 170 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 171 172 // We don't support sin/cos/sqrt/fmod/pow 173 setOperationAction(ISD::FSIN , MVT::f64, Expand); 174 setOperationAction(ISD::FCOS , MVT::f64, Expand); 175 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 176 setOperationAction(ISD::FREM , MVT::f64, Expand); 177 setOperationAction(ISD::FPOW , MVT::f64, Expand); 178 setOperationAction(ISD::FMA , MVT::f64, Legal); 179 setOperationAction(ISD::FSIN , MVT::f32, Expand); 180 setOperationAction(ISD::FCOS , MVT::f32, Expand); 181 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 182 setOperationAction(ISD::FREM , MVT::f32, Expand); 183 setOperationAction(ISD::FPOW , MVT::f32, Expand); 184 setOperationAction(ISD::FMA , MVT::f32, Legal); 185 186 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 187 188 // If we're enabling GP optimizations, use hardware square root 189 if (!Subtarget.hasFSQRT() && 190 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 191 Subtarget.hasFRE())) 192 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 193 194 if (!Subtarget.hasFSQRT() && 195 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 196 Subtarget.hasFRES())) 197 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 198 199 if (Subtarget.hasFCPSGN()) { 200 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 201 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 202 } else { 203 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 204 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 205 } 206 207 if (Subtarget.hasFPRND()) { 208 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 209 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 210 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 211 setOperationAction(ISD::FROUND, MVT::f64, Legal); 212 213 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 214 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 215 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 216 setOperationAction(ISD::FROUND, MVT::f32, Legal); 217 } 218 219 // PowerPC does not have BSWAP, CTPOP or CTTZ 220 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 221 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 222 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 223 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 224 225 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 226 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 227 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 228 } else { 229 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 230 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 231 } 232 233 // PowerPC does not have ROTR 234 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 235 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 236 237 if (!Subtarget.useCRBits()) { 238 // PowerPC does not have Select 239 setOperationAction(ISD::SELECT, MVT::i32, Expand); 240 setOperationAction(ISD::SELECT, MVT::i64, Expand); 241 setOperationAction(ISD::SELECT, MVT::f32, Expand); 242 setOperationAction(ISD::SELECT, MVT::f64, Expand); 243 } 244 245 // PowerPC wants to turn select_cc of FP into fsel when possible. 246 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 247 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 248 249 // PowerPC wants to optimize integer setcc a bit 250 if (!Subtarget.useCRBits()) 251 setOperationAction(ISD::SETCC, MVT::i32, Custom); 252 253 // PowerPC does not have BRCOND which requires SetCC 254 if (!Subtarget.useCRBits()) 255 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 256 257 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 258 259 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 260 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 261 262 // PowerPC does not have [U|S]INT_TO_FP 263 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 264 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 265 266 if (Subtarget.hasDirectMove() && isPPC64) { 267 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 268 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 269 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 270 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 271 } else { 272 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 273 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 274 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 275 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 276 } 277 278 // We cannot sextinreg(i1). Expand to shifts. 279 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 280 281 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 282 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 283 // support continuation, user-level threading, and etc.. As a result, no 284 // other SjLj exception interfaces are implemented and please don't build 285 // your own exception handling based on them. 286 // LLVM/Clang supports zero-cost DWARF exception handling. 287 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 288 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 289 290 // We want to legalize GlobalAddress and ConstantPool nodes into the 291 // appropriate instructions to materialize the address. 292 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 293 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 294 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 295 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 296 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 297 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 298 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 299 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 300 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 301 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 302 303 // TRAP is legal. 304 setOperationAction(ISD::TRAP, MVT::Other, Legal); 305 306 // TRAMPOLINE is custom lowered. 307 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 308 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 309 310 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 311 setOperationAction(ISD::VASTART , MVT::Other, Custom); 312 313 if (Subtarget.isSVR4ABI()) { 314 if (isPPC64) { 315 // VAARG always uses double-word chunks, so promote anything smaller. 316 setOperationAction(ISD::VAARG, MVT::i1, Promote); 317 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 318 setOperationAction(ISD::VAARG, MVT::i8, Promote); 319 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 320 setOperationAction(ISD::VAARG, MVT::i16, Promote); 321 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 322 setOperationAction(ISD::VAARG, MVT::i32, Promote); 323 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 324 setOperationAction(ISD::VAARG, MVT::Other, Expand); 325 } else { 326 // VAARG is custom lowered with the 32-bit SVR4 ABI. 327 setOperationAction(ISD::VAARG, MVT::Other, Custom); 328 setOperationAction(ISD::VAARG, MVT::i64, Custom); 329 } 330 } else 331 setOperationAction(ISD::VAARG, MVT::Other, Expand); 332 333 if (Subtarget.isSVR4ABI() && !isPPC64) 334 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 335 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 336 else 337 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 338 339 // Use the default implementation. 340 setOperationAction(ISD::VAEND , MVT::Other, Expand); 341 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 342 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 343 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 344 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 345 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 346 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 347 348 // We want to custom lower some of our intrinsics. 349 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 350 351 // To handle counter-based loop conditions. 352 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 353 354 // Comparisons that require checking two conditions. 355 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 356 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 357 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 358 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 359 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 360 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 361 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 362 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 363 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 364 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 365 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 366 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 367 368 if (Subtarget.has64BitSupport()) { 369 // They also have instructions for converting between i64 and fp. 370 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 371 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 372 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 373 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 374 // This is just the low 32 bits of a (signed) fp->i64 conversion. 375 // We cannot do this with Promote because i64 is not a legal type. 376 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 377 378 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 379 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 380 } else { 381 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 382 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 383 } 384 385 // With the instructions enabled under FPCVT, we can do everything. 386 if (Subtarget.hasFPCVT()) { 387 if (Subtarget.has64BitSupport()) { 388 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 389 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 390 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 391 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 392 } 393 394 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 395 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 396 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 397 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 398 } 399 400 if (Subtarget.use64BitRegs()) { 401 // 64-bit PowerPC implementations can support i64 types directly 402 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 403 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 404 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 405 // 64-bit PowerPC wants to expand i128 shifts itself. 406 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 407 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 408 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 409 } else { 410 // 32-bit PowerPC wants to expand i64 shifts itself. 411 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 412 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 413 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 414 } 415 416 if (Subtarget.hasAltivec()) { 417 // First set operation action for all vector types to expand. Then we 418 // will selectively turn on ones that can be effectively codegen'd. 419 for (MVT VT : MVT::vector_valuetypes()) { 420 // add/sub are legal for all supported vector VT's. 421 setOperationAction(ISD::ADD, VT, Legal); 422 setOperationAction(ISD::SUB, VT, Legal); 423 424 // Vector instructions introduced in P8 425 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 426 setOperationAction(ISD::CTPOP, VT, Legal); 427 setOperationAction(ISD::CTLZ, VT, Legal); 428 } 429 else { 430 setOperationAction(ISD::CTPOP, VT, Expand); 431 setOperationAction(ISD::CTLZ, VT, Expand); 432 } 433 434 // We promote all shuffles to v16i8. 435 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 436 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 437 438 // We promote all non-typed operations to v4i32. 439 setOperationAction(ISD::AND , VT, Promote); 440 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 441 setOperationAction(ISD::OR , VT, Promote); 442 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 443 setOperationAction(ISD::XOR , VT, Promote); 444 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 445 setOperationAction(ISD::LOAD , VT, Promote); 446 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 447 setOperationAction(ISD::SELECT, VT, Promote); 448 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 449 setOperationAction(ISD::SELECT_CC, VT, Promote); 450 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 451 setOperationAction(ISD::STORE, VT, Promote); 452 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 453 454 // No other operations are legal. 455 setOperationAction(ISD::MUL , VT, Expand); 456 setOperationAction(ISD::SDIV, VT, Expand); 457 setOperationAction(ISD::SREM, VT, Expand); 458 setOperationAction(ISD::UDIV, VT, Expand); 459 setOperationAction(ISD::UREM, VT, Expand); 460 setOperationAction(ISD::FDIV, VT, Expand); 461 setOperationAction(ISD::FREM, VT, Expand); 462 setOperationAction(ISD::FNEG, VT, Expand); 463 setOperationAction(ISD::FSQRT, VT, Expand); 464 setOperationAction(ISD::FLOG, VT, Expand); 465 setOperationAction(ISD::FLOG10, VT, Expand); 466 setOperationAction(ISD::FLOG2, VT, Expand); 467 setOperationAction(ISD::FEXP, VT, Expand); 468 setOperationAction(ISD::FEXP2, VT, Expand); 469 setOperationAction(ISD::FSIN, VT, Expand); 470 setOperationAction(ISD::FCOS, VT, Expand); 471 setOperationAction(ISD::FABS, VT, Expand); 472 setOperationAction(ISD::FPOWI, VT, Expand); 473 setOperationAction(ISD::FFLOOR, VT, Expand); 474 setOperationAction(ISD::FCEIL, VT, Expand); 475 setOperationAction(ISD::FTRUNC, VT, Expand); 476 setOperationAction(ISD::FRINT, VT, Expand); 477 setOperationAction(ISD::FNEARBYINT, VT, Expand); 478 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 479 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 480 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 481 setOperationAction(ISD::MULHU, VT, Expand); 482 setOperationAction(ISD::MULHS, VT, Expand); 483 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 484 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 485 setOperationAction(ISD::UDIVREM, VT, Expand); 486 setOperationAction(ISD::SDIVREM, VT, Expand); 487 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 488 setOperationAction(ISD::FPOW, VT, Expand); 489 setOperationAction(ISD::BSWAP, VT, Expand); 490 setOperationAction(ISD::CTTZ, VT, Expand); 491 setOperationAction(ISD::VSELECT, VT, Expand); 492 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 493 setOperationAction(ISD::ROTL, VT, Expand); 494 setOperationAction(ISD::ROTR, VT, Expand); 495 496 for (MVT InnerVT : MVT::vector_valuetypes()) { 497 setTruncStoreAction(VT, InnerVT, Expand); 498 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 499 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 500 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 501 } 502 } 503 504 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 505 // with merges, splats, etc. 506 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 507 508 setOperationAction(ISD::AND , MVT::v4i32, Legal); 509 setOperationAction(ISD::OR , MVT::v4i32, Legal); 510 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 511 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 512 setOperationAction(ISD::SELECT, MVT::v4i32, 513 Subtarget.useCRBits() ? Legal : Expand); 514 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 515 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 516 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 517 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 518 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 519 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 520 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 521 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 522 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 523 524 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 525 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 526 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 527 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 528 529 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 530 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 531 532 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 533 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 534 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 535 } 536 537 if (Subtarget.hasP8Altivec()) 538 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 539 else 540 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 541 542 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 543 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 544 545 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 546 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 547 548 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 549 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 550 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 551 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 552 553 // Altivec does not contain unordered floating-point compare instructions 554 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 555 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 556 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 557 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 558 559 if (Subtarget.hasVSX()) { 560 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 561 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 562 if (Subtarget.hasP8Vector()) { 563 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 564 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 565 } 566 if (Subtarget.hasDirectMove() && isPPC64) { 567 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 568 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 569 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 570 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 571 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 572 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 573 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 574 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 575 } 576 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 577 578 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 579 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 580 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 581 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 582 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 583 584 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 585 586 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 587 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 588 589 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 590 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 591 592 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 593 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 594 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 595 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 596 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 597 598 // Share the Altivec comparison restrictions. 599 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 600 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 601 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 602 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 603 604 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 605 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 606 607 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 608 609 if (Subtarget.hasP8Vector()) 610 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 611 612 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 613 614 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 615 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 616 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 617 618 if (Subtarget.hasP8Altivec()) { 619 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 620 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 621 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 622 623 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 624 } 625 else { 626 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 627 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 628 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 629 630 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 631 632 // VSX v2i64 only supports non-arithmetic operations. 633 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 634 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 635 } 636 637 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 638 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 639 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 640 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 641 642 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 643 644 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 645 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 646 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 647 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 648 649 // Vector operation legalization checks the result type of 650 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 651 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 652 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 653 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 654 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 655 656 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 657 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 658 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 659 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 660 661 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 662 } 663 664 if (Subtarget.hasP8Altivec()) { 665 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 666 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 667 } 668 if (Subtarget.hasP9Vector()) { 669 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 670 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Legal); 671 } 672 } 673 674 if (Subtarget.hasQPX()) { 675 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 676 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 677 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 678 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 679 680 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 681 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 682 683 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 684 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 685 686 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 687 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 688 689 if (!Subtarget.useCRBits()) 690 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 691 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 692 693 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 694 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 695 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 696 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 697 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 698 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 699 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 700 701 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 702 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 703 704 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 705 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 706 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 707 708 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 709 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 710 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 711 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 712 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 713 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 714 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 715 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 716 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 717 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 718 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 719 720 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 721 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 722 723 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 724 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 725 726 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 727 728 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 729 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 730 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 731 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 732 733 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 734 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 735 736 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 737 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 738 739 if (!Subtarget.useCRBits()) 740 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 741 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 742 743 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 744 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 745 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 746 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 747 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 748 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 749 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 750 751 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 752 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 753 754 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 755 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 756 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 757 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 758 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 759 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 760 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 761 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 762 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 763 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 764 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 765 766 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 767 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 768 769 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 770 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 771 772 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 773 774 setOperationAction(ISD::AND , MVT::v4i1, Legal); 775 setOperationAction(ISD::OR , MVT::v4i1, Legal); 776 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 777 778 if (!Subtarget.useCRBits()) 779 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 780 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 781 782 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 783 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 784 785 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 786 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 787 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 788 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 789 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 790 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 791 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 792 793 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 794 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 795 796 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 797 798 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 799 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 800 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 801 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 802 803 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 804 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 805 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 806 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 807 808 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 809 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 810 811 // These need to set FE_INEXACT, and so cannot be vectorized here. 812 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 813 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 814 815 if (TM.Options.UnsafeFPMath) { 816 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 817 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 818 819 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 820 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 821 } else { 822 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 823 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 824 825 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 826 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 827 } 828 } 829 830 if (Subtarget.has64BitSupport()) 831 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 832 833 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 834 835 if (!isPPC64) { 836 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 837 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 838 } 839 840 setBooleanContents(ZeroOrOneBooleanContent); 841 842 if (Subtarget.hasAltivec()) { 843 // Altivec instructions set fields to all zeros or all ones. 844 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 845 } 846 847 if (!isPPC64) { 848 // These libcalls are not available in 32-bit. 849 setLibcallName(RTLIB::SHL_I128, nullptr); 850 setLibcallName(RTLIB::SRL_I128, nullptr); 851 setLibcallName(RTLIB::SRA_I128, nullptr); 852 } 853 854 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 855 856 // We have target-specific dag combine patterns for the following nodes: 857 setTargetDAGCombine(ISD::SINT_TO_FP); 858 setTargetDAGCombine(ISD::BUILD_VECTOR); 859 if (Subtarget.hasFPCVT()) 860 setTargetDAGCombine(ISD::UINT_TO_FP); 861 setTargetDAGCombine(ISD::LOAD); 862 setTargetDAGCombine(ISD::STORE); 863 setTargetDAGCombine(ISD::BR_CC); 864 if (Subtarget.useCRBits()) 865 setTargetDAGCombine(ISD::BRCOND); 866 setTargetDAGCombine(ISD::BSWAP); 867 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 868 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 869 setTargetDAGCombine(ISD::INTRINSIC_VOID); 870 871 setTargetDAGCombine(ISD::SIGN_EXTEND); 872 setTargetDAGCombine(ISD::ZERO_EXTEND); 873 setTargetDAGCombine(ISD::ANY_EXTEND); 874 875 if (Subtarget.useCRBits()) { 876 setTargetDAGCombine(ISD::TRUNCATE); 877 setTargetDAGCombine(ISD::SETCC); 878 setTargetDAGCombine(ISD::SELECT_CC); 879 } 880 881 // Use reciprocal estimates. 882 if (TM.Options.UnsafeFPMath) { 883 setTargetDAGCombine(ISD::FDIV); 884 setTargetDAGCombine(ISD::FSQRT); 885 } 886 887 // Darwin long double math library functions have $LDBL128 appended. 888 if (Subtarget.isDarwin()) { 889 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 890 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 891 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 892 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 893 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 894 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 895 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 896 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 897 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 898 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 899 } 900 901 // With 32 condition bits, we don't need to sink (and duplicate) compares 902 // aggressively in CodeGenPrep. 903 if (Subtarget.useCRBits()) { 904 setHasMultipleConditionRegisters(); 905 setJumpIsExpensive(); 906 } 907 908 setMinFunctionAlignment(2); 909 if (Subtarget.isDarwin()) 910 setPrefFunctionAlignment(4); 911 912 switch (Subtarget.getDarwinDirective()) { 913 default: break; 914 case PPC::DIR_970: 915 case PPC::DIR_A2: 916 case PPC::DIR_E500mc: 917 case PPC::DIR_E5500: 918 case PPC::DIR_PWR4: 919 case PPC::DIR_PWR5: 920 case PPC::DIR_PWR5X: 921 case PPC::DIR_PWR6: 922 case PPC::DIR_PWR6X: 923 case PPC::DIR_PWR7: 924 case PPC::DIR_PWR8: 925 case PPC::DIR_PWR9: 926 setPrefFunctionAlignment(4); 927 setPrefLoopAlignment(4); 928 break; 929 } 930 931 if (Subtarget.enableMachineScheduler()) 932 setSchedulingPreference(Sched::Source); 933 else 934 setSchedulingPreference(Sched::Hybrid); 935 936 computeRegisterProperties(STI.getRegisterInfo()); 937 938 // The Freescale cores do better with aggressive inlining of memcpy and 939 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 940 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 941 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 942 MaxStoresPerMemset = 32; 943 MaxStoresPerMemsetOptSize = 16; 944 MaxStoresPerMemcpy = 32; 945 MaxStoresPerMemcpyOptSize = 8; 946 MaxStoresPerMemmove = 32; 947 MaxStoresPerMemmoveOptSize = 8; 948 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 949 // The A2 also benefits from (very) aggressive inlining of memcpy and 950 // friends. The overhead of a the function call, even when warm, can be 951 // over one hundred cycles. 952 MaxStoresPerMemset = 128; 953 MaxStoresPerMemcpy = 128; 954 MaxStoresPerMemmove = 128; 955 } 956 } 957 958 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 959 /// the desired ByVal argument alignment. 960 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 961 unsigned MaxMaxAlign) { 962 if (MaxAlign == MaxMaxAlign) 963 return; 964 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 965 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 966 MaxAlign = 32; 967 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 968 MaxAlign = 16; 969 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 970 unsigned EltAlign = 0; 971 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 972 if (EltAlign > MaxAlign) 973 MaxAlign = EltAlign; 974 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 975 for (auto *EltTy : STy->elements()) { 976 unsigned EltAlign = 0; 977 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 978 if (EltAlign > MaxAlign) 979 MaxAlign = EltAlign; 980 if (MaxAlign == MaxMaxAlign) 981 break; 982 } 983 } 984 } 985 986 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 987 /// function arguments in the caller parameter area. 988 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 989 const DataLayout &DL) const { 990 // Darwin passes everything on 4 byte boundary. 991 if (Subtarget.isDarwin()) 992 return 4; 993 994 // 16byte and wider vectors are passed on 16byte boundary. 995 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 996 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 997 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 998 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 999 return Align; 1000 } 1001 1002 bool PPCTargetLowering::useSoftFloat() const { 1003 return Subtarget.useSoftFloat(); 1004 } 1005 1006 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1007 switch ((PPCISD::NodeType)Opcode) { 1008 case PPCISD::FIRST_NUMBER: break; 1009 case PPCISD::FSEL: return "PPCISD::FSEL"; 1010 case PPCISD::FCFID: return "PPCISD::FCFID"; 1011 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1012 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1013 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1014 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1015 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1016 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1017 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1018 case PPCISD::FRE: return "PPCISD::FRE"; 1019 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1020 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1021 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1022 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1023 case PPCISD::VPERM: return "PPCISD::VPERM"; 1024 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1025 case PPCISD::XXINSERT: return "PPCISD::XXINSERT"; 1026 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1027 case PPCISD::CMPB: return "PPCISD::CMPB"; 1028 case PPCISD::Hi: return "PPCISD::Hi"; 1029 case PPCISD::Lo: return "PPCISD::Lo"; 1030 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1031 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1032 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1033 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1034 case PPCISD::SRL: return "PPCISD::SRL"; 1035 case PPCISD::SRA: return "PPCISD::SRA"; 1036 case PPCISD::SHL: return "PPCISD::SHL"; 1037 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1038 case PPCISD::CALL: return "PPCISD::CALL"; 1039 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1040 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1041 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1042 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1043 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1044 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1045 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1046 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1047 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1048 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1049 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1050 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1051 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1052 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1053 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1054 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1055 case PPCISD::VCMP: return "PPCISD::VCMP"; 1056 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1057 case PPCISD::LBRX: return "PPCISD::LBRX"; 1058 case PPCISD::STBRX: return "PPCISD::STBRX"; 1059 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1060 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1061 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1062 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1063 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1064 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1065 case PPCISD::BDZ: return "PPCISD::BDZ"; 1066 case PPCISD::MFFS: return "PPCISD::MFFS"; 1067 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1068 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1069 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1070 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1071 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1072 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1073 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1074 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1075 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1076 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1077 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1078 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1079 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1080 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1081 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1082 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1083 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1084 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1085 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1086 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1087 case PPCISD::SC: return "PPCISD::SC"; 1088 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1089 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1090 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1091 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1092 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1093 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1094 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1095 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1096 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1097 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1098 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1099 } 1100 return nullptr; 1101 } 1102 1103 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1104 EVT VT) const { 1105 if (!VT.isVector()) 1106 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1107 1108 if (Subtarget.hasQPX()) 1109 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1110 1111 return VT.changeVectorElementTypeToInteger(); 1112 } 1113 1114 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1115 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1116 return true; 1117 } 1118 1119 //===----------------------------------------------------------------------===// 1120 // Node matching predicates, for use by the tblgen matching code. 1121 //===----------------------------------------------------------------------===// 1122 1123 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1124 static bool isFloatingPointZero(SDValue Op) { 1125 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1126 return CFP->getValueAPF().isZero(); 1127 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1128 // Maybe this has already been legalized into the constant pool? 1129 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1130 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1131 return CFP->getValueAPF().isZero(); 1132 } 1133 return false; 1134 } 1135 1136 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1137 /// true if Op is undef or if it matches the specified value. 1138 static bool isConstantOrUndef(int Op, int Val) { 1139 return Op < 0 || Op == Val; 1140 } 1141 1142 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1143 /// VPKUHUM instruction. 1144 /// The ShuffleKind distinguishes between big-endian operations with 1145 /// two different inputs (0), either-endian operations with two identical 1146 /// inputs (1), and little-endian operations with two different inputs (2). 1147 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1148 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1149 SelectionDAG &DAG) { 1150 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1151 if (ShuffleKind == 0) { 1152 if (IsLE) 1153 return false; 1154 for (unsigned i = 0; i != 16; ++i) 1155 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1156 return false; 1157 } else if (ShuffleKind == 2) { 1158 if (!IsLE) 1159 return false; 1160 for (unsigned i = 0; i != 16; ++i) 1161 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1162 return false; 1163 } else if (ShuffleKind == 1) { 1164 unsigned j = IsLE ? 0 : 1; 1165 for (unsigned i = 0; i != 8; ++i) 1166 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1167 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1168 return false; 1169 } 1170 return true; 1171 } 1172 1173 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1174 /// VPKUWUM instruction. 1175 /// The ShuffleKind distinguishes between big-endian operations with 1176 /// two different inputs (0), either-endian operations with two identical 1177 /// inputs (1), and little-endian operations with two different inputs (2). 1178 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1179 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1180 SelectionDAG &DAG) { 1181 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1182 if (ShuffleKind == 0) { 1183 if (IsLE) 1184 return false; 1185 for (unsigned i = 0; i != 16; i += 2) 1186 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1187 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1188 return false; 1189 } else if (ShuffleKind == 2) { 1190 if (!IsLE) 1191 return false; 1192 for (unsigned i = 0; i != 16; i += 2) 1193 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1194 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1195 return false; 1196 } else if (ShuffleKind == 1) { 1197 unsigned j = IsLE ? 0 : 2; 1198 for (unsigned i = 0; i != 8; i += 2) 1199 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1200 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1201 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1202 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1203 return false; 1204 } 1205 return true; 1206 } 1207 1208 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1209 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1210 /// current subtarget. 1211 /// 1212 /// The ShuffleKind distinguishes between big-endian operations with 1213 /// two different inputs (0), either-endian operations with two identical 1214 /// inputs (1), and little-endian operations with two different inputs (2). 1215 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1216 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1217 SelectionDAG &DAG) { 1218 const PPCSubtarget& Subtarget = 1219 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1220 if (!Subtarget.hasP8Vector()) 1221 return false; 1222 1223 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1224 if (ShuffleKind == 0) { 1225 if (IsLE) 1226 return false; 1227 for (unsigned i = 0; i != 16; i += 4) 1228 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1229 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1230 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1231 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1232 return false; 1233 } else if (ShuffleKind == 2) { 1234 if (!IsLE) 1235 return false; 1236 for (unsigned i = 0; i != 16; i += 4) 1237 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1238 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1239 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1240 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1241 return false; 1242 } else if (ShuffleKind == 1) { 1243 unsigned j = IsLE ? 0 : 4; 1244 for (unsigned i = 0; i != 8; i += 4) 1245 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1246 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1247 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1248 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1249 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1250 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1251 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1252 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1253 return false; 1254 } 1255 return true; 1256 } 1257 1258 /// isVMerge - Common function, used to match vmrg* shuffles. 1259 /// 1260 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1261 unsigned LHSStart, unsigned RHSStart) { 1262 if (N->getValueType(0) != MVT::v16i8) 1263 return false; 1264 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1265 "Unsupported merge size!"); 1266 1267 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1268 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1269 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1270 LHSStart+j+i*UnitSize) || 1271 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1272 RHSStart+j+i*UnitSize)) 1273 return false; 1274 } 1275 return true; 1276 } 1277 1278 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1279 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1280 /// The ShuffleKind distinguishes between big-endian merges with two 1281 /// different inputs (0), either-endian merges with two identical inputs (1), 1282 /// and little-endian merges with two different inputs (2). For the latter, 1283 /// the input operands are swapped (see PPCInstrAltivec.td). 1284 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1285 unsigned ShuffleKind, SelectionDAG &DAG) { 1286 if (DAG.getDataLayout().isLittleEndian()) { 1287 if (ShuffleKind == 1) // unary 1288 return isVMerge(N, UnitSize, 0, 0); 1289 else if (ShuffleKind == 2) // swapped 1290 return isVMerge(N, UnitSize, 0, 16); 1291 else 1292 return false; 1293 } else { 1294 if (ShuffleKind == 1) // unary 1295 return isVMerge(N, UnitSize, 8, 8); 1296 else if (ShuffleKind == 0) // normal 1297 return isVMerge(N, UnitSize, 8, 24); 1298 else 1299 return false; 1300 } 1301 } 1302 1303 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1304 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1305 /// The ShuffleKind distinguishes between big-endian merges with two 1306 /// different inputs (0), either-endian merges with two identical inputs (1), 1307 /// and little-endian merges with two different inputs (2). For the latter, 1308 /// the input operands are swapped (see PPCInstrAltivec.td). 1309 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1310 unsigned ShuffleKind, SelectionDAG &DAG) { 1311 if (DAG.getDataLayout().isLittleEndian()) { 1312 if (ShuffleKind == 1) // unary 1313 return isVMerge(N, UnitSize, 8, 8); 1314 else if (ShuffleKind == 2) // swapped 1315 return isVMerge(N, UnitSize, 8, 24); 1316 else 1317 return false; 1318 } else { 1319 if (ShuffleKind == 1) // unary 1320 return isVMerge(N, UnitSize, 0, 0); 1321 else if (ShuffleKind == 0) // normal 1322 return isVMerge(N, UnitSize, 0, 16); 1323 else 1324 return false; 1325 } 1326 } 1327 1328 /** 1329 * \brief Common function used to match vmrgew and vmrgow shuffles 1330 * 1331 * The indexOffset determines whether to look for even or odd words in 1332 * the shuffle mask. This is based on the of the endianness of the target 1333 * machine. 1334 * - Little Endian: 1335 * - Use offset of 0 to check for odd elements 1336 * - Use offset of 4 to check for even elements 1337 * - Big Endian: 1338 * - Use offset of 0 to check for even elements 1339 * - Use offset of 4 to check for odd elements 1340 * A detailed description of the vector element ordering for little endian and 1341 * big endian can be found at 1342 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1343 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1344 * compiler differences mean to you 1345 * 1346 * The mask to the shuffle vector instruction specifies the indices of the 1347 * elements from the two input vectors to place in the result. The elements are 1348 * numbered in array-access order, starting with the first vector. These vectors 1349 * are always of type v16i8, thus each vector will contain 16 elements of size 1350 * 8. More info on the shuffle vector can be found in the 1351 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1352 * Language Reference. 1353 * 1354 * The RHSStartValue indicates whether the same input vectors are used (unary) 1355 * or two different input vectors are used, based on the following: 1356 * - If the instruction uses the same vector for both inputs, the range of the 1357 * indices will be 0 to 15. In this case, the RHSStart value passed should 1358 * be 0. 1359 * - If the instruction has two different vectors then the range of the 1360 * indices will be 0 to 31. In this case, the RHSStart value passed should 1361 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1362 * to 31 specify elements in the second vector). 1363 * 1364 * \param[in] N The shuffle vector SD Node to analyze 1365 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1366 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1367 * vector to the shuffle_vector instruction 1368 * \return true iff this shuffle vector represents an even or odd word merge 1369 */ 1370 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1371 unsigned RHSStartValue) { 1372 if (N->getValueType(0) != MVT::v16i8) 1373 return false; 1374 1375 for (unsigned i = 0; i < 2; ++i) 1376 for (unsigned j = 0; j < 4; ++j) 1377 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1378 i*RHSStartValue+j+IndexOffset) || 1379 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1380 i*RHSStartValue+j+IndexOffset+8)) 1381 return false; 1382 return true; 1383 } 1384 1385 /** 1386 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1387 * vmrgow instructions. 1388 * 1389 * \param[in] N The shuffle vector SD Node to analyze 1390 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1391 * \param[in] ShuffleKind Identify the type of merge: 1392 * - 0 = big-endian merge with two different inputs; 1393 * - 1 = either-endian merge with two identical inputs; 1394 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1395 * little-endian merges). 1396 * \param[in] DAG The current SelectionDAG 1397 * \return true iff this shuffle mask 1398 */ 1399 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1400 unsigned ShuffleKind, SelectionDAG &DAG) { 1401 if (DAG.getDataLayout().isLittleEndian()) { 1402 unsigned indexOffset = CheckEven ? 4 : 0; 1403 if (ShuffleKind == 1) // Unary 1404 return isVMerge(N, indexOffset, 0); 1405 else if (ShuffleKind == 2) // swapped 1406 return isVMerge(N, indexOffset, 16); 1407 else 1408 return false; 1409 } 1410 else { 1411 unsigned indexOffset = CheckEven ? 0 : 4; 1412 if (ShuffleKind == 1) // Unary 1413 return isVMerge(N, indexOffset, 0); 1414 else if (ShuffleKind == 0) // Normal 1415 return isVMerge(N, indexOffset, 16); 1416 else 1417 return false; 1418 } 1419 return false; 1420 } 1421 1422 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1423 /// amount, otherwise return -1. 1424 /// The ShuffleKind distinguishes between big-endian operations with two 1425 /// different inputs (0), either-endian operations with two identical inputs 1426 /// (1), and little-endian operations with two different inputs (2). For the 1427 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1428 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1429 SelectionDAG &DAG) { 1430 if (N->getValueType(0) != MVT::v16i8) 1431 return -1; 1432 1433 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1434 1435 // Find the first non-undef value in the shuffle mask. 1436 unsigned i; 1437 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1438 /*search*/; 1439 1440 if (i == 16) return -1; // all undef. 1441 1442 // Otherwise, check to see if the rest of the elements are consecutively 1443 // numbered from this value. 1444 unsigned ShiftAmt = SVOp->getMaskElt(i); 1445 if (ShiftAmt < i) return -1; 1446 1447 ShiftAmt -= i; 1448 bool isLE = DAG.getDataLayout().isLittleEndian(); 1449 1450 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1451 // Check the rest of the elements to see if they are consecutive. 1452 for (++i; i != 16; ++i) 1453 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1454 return -1; 1455 } else if (ShuffleKind == 1) { 1456 // Check the rest of the elements to see if they are consecutive. 1457 for (++i; i != 16; ++i) 1458 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1459 return -1; 1460 } else 1461 return -1; 1462 1463 if (isLE) 1464 ShiftAmt = 16 - ShiftAmt; 1465 1466 return ShiftAmt; 1467 } 1468 1469 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1470 /// specifies a splat of a single element that is suitable for input to 1471 /// VSPLTB/VSPLTH/VSPLTW. 1472 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1473 assert(N->getValueType(0) == MVT::v16i8 && 1474 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1475 1476 // The consecutive indices need to specify an element, not part of two 1477 // different elements. So abandon ship early if this isn't the case. 1478 if (N->getMaskElt(0) % EltSize != 0) 1479 return false; 1480 1481 // This is a splat operation if each element of the permute is the same, and 1482 // if the value doesn't reference the second vector. 1483 unsigned ElementBase = N->getMaskElt(0); 1484 1485 // FIXME: Handle UNDEF elements too! 1486 if (ElementBase >= 16) 1487 return false; 1488 1489 // Check that the indices are consecutive, in the case of a multi-byte element 1490 // splatted with a v16i8 mask. 1491 for (unsigned i = 1; i != EltSize; ++i) 1492 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1493 return false; 1494 1495 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1496 if (N->getMaskElt(i) < 0) continue; 1497 for (unsigned j = 0; j != EltSize; ++j) 1498 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1499 return false; 1500 } 1501 return true; 1502 } 1503 1504 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1505 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1506 1507 // Check that the mask is shuffling words 1508 for (unsigned i = 0; i < 4; ++i) { 1509 unsigned B0 = N->getMaskElt(i*4); 1510 unsigned B1 = N->getMaskElt(i*4+1); 1511 unsigned B2 = N->getMaskElt(i*4+2); 1512 unsigned B3 = N->getMaskElt(i*4+3); 1513 if (B0 % 4) 1514 return false; 1515 if (B1 != B0+1 || B2 != B1+1 || B3 != B2+1) 1516 return false; 1517 } 1518 1519 // Now we look at mask elements 0,4,8,12 1520 unsigned M0 = N->getMaskElt(0) / 4; 1521 unsigned M1 = N->getMaskElt(4) / 4; 1522 unsigned M2 = N->getMaskElt(8) / 4; 1523 unsigned M3 = N->getMaskElt(12) / 4; 1524 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1525 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1526 1527 // Below, let H and L be arbitrary elements of the shuffle mask 1528 // where H is in the range [4,7] and L is in the range [0,3]. 1529 // H, 1, 2, 3 or L, 5, 6, 7 1530 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1531 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1532 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1533 InsertAtByte = IsLE ? 12 : 0; 1534 Swap = M0 < 4; 1535 return true; 1536 } 1537 // 0, H, 2, 3 or 4, L, 6, 7 1538 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1539 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1540 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1541 InsertAtByte = IsLE ? 8 : 4; 1542 Swap = M1 < 4; 1543 return true; 1544 } 1545 // 0, 1, H, 3 or 4, 5, L, 7 1546 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1547 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1548 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1549 InsertAtByte = IsLE ? 4 : 8; 1550 Swap = M2 < 4; 1551 return true; 1552 } 1553 // 0, 1, 2, H or 4, 5, 6, L 1554 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1555 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1556 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1557 InsertAtByte = IsLE ? 0 : 12; 1558 Swap = M3 < 4; 1559 return true; 1560 } 1561 1562 // If both vector operands for the shuffle are the same vector, the mask will 1563 // contain only elements from the first one and the second one will be undef. 1564 if (N->getOperand(1).isUndef()) { 1565 ShiftElts = 0; 1566 Swap = true; 1567 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1568 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1569 InsertAtByte = IsLE ? 12 : 0; 1570 return true; 1571 } 1572 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1573 InsertAtByte = IsLE ? 8 : 4; 1574 return true; 1575 } 1576 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1577 InsertAtByte = IsLE ? 4 : 8; 1578 return true; 1579 } 1580 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1581 InsertAtByte = IsLE ? 0 : 12; 1582 return true; 1583 } 1584 } 1585 1586 return false; 1587 } 1588 1589 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1590 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1591 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1592 SelectionDAG &DAG) { 1593 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1594 assert(isSplatShuffleMask(SVOp, EltSize)); 1595 if (DAG.getDataLayout().isLittleEndian()) 1596 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1597 else 1598 return SVOp->getMaskElt(0) / EltSize; 1599 } 1600 1601 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1602 /// by using a vspltis[bhw] instruction of the specified element size, return 1603 /// the constant being splatted. The ByteSize field indicates the number of 1604 /// bytes of each element [124] -> [bhw]. 1605 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1606 SDValue OpVal(nullptr, 0); 1607 1608 // If ByteSize of the splat is bigger than the element size of the 1609 // build_vector, then we have a case where we are checking for a splat where 1610 // multiple elements of the buildvector are folded together into a single 1611 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1612 unsigned EltSize = 16/N->getNumOperands(); 1613 if (EltSize < ByteSize) { 1614 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1615 SDValue UniquedVals[4]; 1616 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1617 1618 // See if all of the elements in the buildvector agree across. 1619 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1620 if (N->getOperand(i).isUndef()) continue; 1621 // If the element isn't a constant, bail fully out. 1622 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1623 1624 1625 if (!UniquedVals[i&(Multiple-1)].getNode()) 1626 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1627 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1628 return SDValue(); // no match. 1629 } 1630 1631 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1632 // either constant or undef values that are identical for each chunk. See 1633 // if these chunks can form into a larger vspltis*. 1634 1635 // Check to see if all of the leading entries are either 0 or -1. If 1636 // neither, then this won't fit into the immediate field. 1637 bool LeadingZero = true; 1638 bool LeadingOnes = true; 1639 for (unsigned i = 0; i != Multiple-1; ++i) { 1640 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1641 1642 LeadingZero &= isNullConstant(UniquedVals[i]); 1643 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1644 } 1645 // Finally, check the least significant entry. 1646 if (LeadingZero) { 1647 if (!UniquedVals[Multiple-1].getNode()) 1648 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1649 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1650 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1651 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1652 } 1653 if (LeadingOnes) { 1654 if (!UniquedVals[Multiple-1].getNode()) 1655 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1656 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1657 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1658 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1659 } 1660 1661 return SDValue(); 1662 } 1663 1664 // Check to see if this buildvec has a single non-undef value in its elements. 1665 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1666 if (N->getOperand(i).isUndef()) continue; 1667 if (!OpVal.getNode()) 1668 OpVal = N->getOperand(i); 1669 else if (OpVal != N->getOperand(i)) 1670 return SDValue(); 1671 } 1672 1673 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1674 1675 unsigned ValSizeInBytes = EltSize; 1676 uint64_t Value = 0; 1677 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1678 Value = CN->getZExtValue(); 1679 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1680 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1681 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1682 } 1683 1684 // If the splat value is larger than the element value, then we can never do 1685 // this splat. The only case that we could fit the replicated bits into our 1686 // immediate field for would be zero, and we prefer to use vxor for it. 1687 if (ValSizeInBytes < ByteSize) return SDValue(); 1688 1689 // If the element value is larger than the splat value, check if it consists 1690 // of a repeated bit pattern of size ByteSize. 1691 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1692 return SDValue(); 1693 1694 // Properly sign extend the value. 1695 int MaskVal = SignExtend32(Value, ByteSize * 8); 1696 1697 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1698 if (MaskVal == 0) return SDValue(); 1699 1700 // Finally, if this value fits in a 5 bit sext field, return it 1701 if (SignExtend32<5>(MaskVal) == MaskVal) 1702 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1703 return SDValue(); 1704 } 1705 1706 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1707 /// amount, otherwise return -1. 1708 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1709 EVT VT = N->getValueType(0); 1710 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1711 return -1; 1712 1713 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1714 1715 // Find the first non-undef value in the shuffle mask. 1716 unsigned i; 1717 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1718 /*search*/; 1719 1720 if (i == 4) return -1; // all undef. 1721 1722 // Otherwise, check to see if the rest of the elements are consecutively 1723 // numbered from this value. 1724 unsigned ShiftAmt = SVOp->getMaskElt(i); 1725 if (ShiftAmt < i) return -1; 1726 ShiftAmt -= i; 1727 1728 // Check the rest of the elements to see if they are consecutive. 1729 for (++i; i != 4; ++i) 1730 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1731 return -1; 1732 1733 return ShiftAmt; 1734 } 1735 1736 //===----------------------------------------------------------------------===// 1737 // Addressing Mode Selection 1738 //===----------------------------------------------------------------------===// 1739 1740 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1741 /// or 64-bit immediate, and if the value can be accurately represented as a 1742 /// sign extension from a 16-bit value. If so, this returns true and the 1743 /// immediate. 1744 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1745 if (!isa<ConstantSDNode>(N)) 1746 return false; 1747 1748 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1749 if (N->getValueType(0) == MVT::i32) 1750 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1751 else 1752 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1753 } 1754 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1755 return isIntS16Immediate(Op.getNode(), Imm); 1756 } 1757 1758 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1759 /// can be represented as an indexed [r+r] operation. Returns false if it 1760 /// can be more efficiently represented with [r+imm]. 1761 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1762 SDValue &Index, 1763 SelectionDAG &DAG) const { 1764 short imm = 0; 1765 if (N.getOpcode() == ISD::ADD) { 1766 if (isIntS16Immediate(N.getOperand(1), imm)) 1767 return false; // r+i 1768 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1769 return false; // r+i 1770 1771 Base = N.getOperand(0); 1772 Index = N.getOperand(1); 1773 return true; 1774 } else if (N.getOpcode() == ISD::OR) { 1775 if (isIntS16Immediate(N.getOperand(1), imm)) 1776 return false; // r+i can fold it if we can. 1777 1778 // If this is an or of disjoint bitfields, we can codegen this as an add 1779 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1780 // disjoint. 1781 APInt LHSKnownZero, LHSKnownOne; 1782 APInt RHSKnownZero, RHSKnownOne; 1783 DAG.computeKnownBits(N.getOperand(0), 1784 LHSKnownZero, LHSKnownOne); 1785 1786 if (LHSKnownZero.getBoolValue()) { 1787 DAG.computeKnownBits(N.getOperand(1), 1788 RHSKnownZero, RHSKnownOne); 1789 // If all of the bits are known zero on the LHS or RHS, the add won't 1790 // carry. 1791 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1792 Base = N.getOperand(0); 1793 Index = N.getOperand(1); 1794 return true; 1795 } 1796 } 1797 } 1798 1799 return false; 1800 } 1801 1802 // If we happen to be doing an i64 load or store into a stack slot that has 1803 // less than a 4-byte alignment, then the frame-index elimination may need to 1804 // use an indexed load or store instruction (because the offset may not be a 1805 // multiple of 4). The extra register needed to hold the offset comes from the 1806 // register scavenger, and it is possible that the scavenger will need to use 1807 // an emergency spill slot. As a result, we need to make sure that a spill slot 1808 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1809 // stack slot. 1810 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1811 // FIXME: This does not handle the LWA case. 1812 if (VT != MVT::i64) 1813 return; 1814 1815 // NOTE: We'll exclude negative FIs here, which come from argument 1816 // lowering, because there are no known test cases triggering this problem 1817 // using packed structures (or similar). We can remove this exclusion if 1818 // we find such a test case. The reason why this is so test-case driven is 1819 // because this entire 'fixup' is only to prevent crashes (from the 1820 // register scavenger) on not-really-valid inputs. For example, if we have: 1821 // %a = alloca i1 1822 // %b = bitcast i1* %a to i64* 1823 // store i64* a, i64 b 1824 // then the store should really be marked as 'align 1', but is not. If it 1825 // were marked as 'align 1' then the indexed form would have been 1826 // instruction-selected initially, and the problem this 'fixup' is preventing 1827 // won't happen regardless. 1828 if (FrameIdx < 0) 1829 return; 1830 1831 MachineFunction &MF = DAG.getMachineFunction(); 1832 MachineFrameInfo &MFI = MF.getFrameInfo(); 1833 1834 unsigned Align = MFI.getObjectAlignment(FrameIdx); 1835 if (Align >= 4) 1836 return; 1837 1838 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1839 FuncInfo->setHasNonRISpills(); 1840 } 1841 1842 /// Returns true if the address N can be represented by a base register plus 1843 /// a signed 16-bit displacement [r+imm], and if it is not better 1844 /// represented as reg+reg. If Aligned is true, only accept displacements 1845 /// suitable for STD and friends, i.e. multiples of 4. 1846 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1847 SDValue &Base, 1848 SelectionDAG &DAG, 1849 bool Aligned) const { 1850 // FIXME dl should come from parent load or store, not from address 1851 SDLoc dl(N); 1852 // If this can be more profitably realized as r+r, fail. 1853 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1854 return false; 1855 1856 if (N.getOpcode() == ISD::ADD) { 1857 short imm = 0; 1858 if (isIntS16Immediate(N.getOperand(1), imm) && 1859 (!Aligned || (imm & 3) == 0)) { 1860 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1861 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1862 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1863 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1864 } else { 1865 Base = N.getOperand(0); 1866 } 1867 return true; // [r+i] 1868 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1869 // Match LOAD (ADD (X, Lo(G))). 1870 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1871 && "Cannot handle constant offsets yet!"); 1872 Disp = N.getOperand(1).getOperand(0); // The global address. 1873 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1874 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1875 Disp.getOpcode() == ISD::TargetConstantPool || 1876 Disp.getOpcode() == ISD::TargetJumpTable); 1877 Base = N.getOperand(0); 1878 return true; // [&g+r] 1879 } 1880 } else if (N.getOpcode() == ISD::OR) { 1881 short imm = 0; 1882 if (isIntS16Immediate(N.getOperand(1), imm) && 1883 (!Aligned || (imm & 3) == 0)) { 1884 // If this is an or of disjoint bitfields, we can codegen this as an add 1885 // (for better address arithmetic) if the LHS and RHS of the OR are 1886 // provably disjoint. 1887 APInt LHSKnownZero, LHSKnownOne; 1888 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1889 1890 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1891 // If all of the bits are known zero on the LHS or RHS, the add won't 1892 // carry. 1893 if (FrameIndexSDNode *FI = 1894 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1895 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1896 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1897 } else { 1898 Base = N.getOperand(0); 1899 } 1900 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1901 return true; 1902 } 1903 } 1904 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1905 // Loading from a constant address. 1906 1907 // If this address fits entirely in a 16-bit sext immediate field, codegen 1908 // this as "d, 0" 1909 short Imm; 1910 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1911 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1912 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1913 CN->getValueType(0)); 1914 return true; 1915 } 1916 1917 // Handle 32-bit sext immediates with LIS + addr mode. 1918 if ((CN->getValueType(0) == MVT::i32 || 1919 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1920 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1921 int Addr = (int)CN->getZExtValue(); 1922 1923 // Otherwise, break this down into an LIS + disp. 1924 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1925 1926 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1927 MVT::i32); 1928 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1929 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1930 return true; 1931 } 1932 } 1933 1934 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 1935 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1936 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1937 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1938 } else 1939 Base = N; 1940 return true; // [r+0] 1941 } 1942 1943 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1944 /// represented as an indexed [r+r] operation. 1945 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1946 SDValue &Index, 1947 SelectionDAG &DAG) const { 1948 // Check to see if we can easily represent this as an [r+r] address. This 1949 // will fail if it thinks that the address is more profitably represented as 1950 // reg+imm, e.g. where imm = 0. 1951 if (SelectAddressRegReg(N, Base, Index, DAG)) 1952 return true; 1953 1954 // If the operand is an addition, always emit this as [r+r], since this is 1955 // better (for code size, and execution, as the memop does the add for free) 1956 // than emitting an explicit add. 1957 if (N.getOpcode() == ISD::ADD) { 1958 Base = N.getOperand(0); 1959 Index = N.getOperand(1); 1960 return true; 1961 } 1962 1963 // Otherwise, do it the hard way, using R0 as the base register. 1964 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1965 N.getValueType()); 1966 Index = N; 1967 return true; 1968 } 1969 1970 /// getPreIndexedAddressParts - returns true by value, base pointer and 1971 /// offset pointer and addressing mode by reference if the node's address 1972 /// can be legally represented as pre-indexed load / store address. 1973 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1974 SDValue &Offset, 1975 ISD::MemIndexedMode &AM, 1976 SelectionDAG &DAG) const { 1977 if (DisablePPCPreinc) return false; 1978 1979 bool isLoad = true; 1980 SDValue Ptr; 1981 EVT VT; 1982 unsigned Alignment; 1983 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1984 Ptr = LD->getBasePtr(); 1985 VT = LD->getMemoryVT(); 1986 Alignment = LD->getAlignment(); 1987 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1988 Ptr = ST->getBasePtr(); 1989 VT = ST->getMemoryVT(); 1990 Alignment = ST->getAlignment(); 1991 isLoad = false; 1992 } else 1993 return false; 1994 1995 // PowerPC doesn't have preinc load/store instructions for vectors (except 1996 // for QPX, which does have preinc r+r forms). 1997 if (VT.isVector()) { 1998 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 1999 return false; 2000 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2001 AM = ISD::PRE_INC; 2002 return true; 2003 } 2004 } 2005 2006 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2007 2008 // Common code will reject creating a pre-inc form if the base pointer 2009 // is a frame index, or if N is a store and the base pointer is either 2010 // the same as or a predecessor of the value being stored. Check for 2011 // those situations here, and try with swapped Base/Offset instead. 2012 bool Swap = false; 2013 2014 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2015 Swap = true; 2016 else if (!isLoad) { 2017 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2018 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2019 Swap = true; 2020 } 2021 2022 if (Swap) 2023 std::swap(Base, Offset); 2024 2025 AM = ISD::PRE_INC; 2026 return true; 2027 } 2028 2029 // LDU/STU can only handle immediates that are a multiple of 4. 2030 if (VT != MVT::i64) { 2031 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 2032 return false; 2033 } else { 2034 // LDU/STU need an address with at least 4-byte alignment. 2035 if (Alignment < 4) 2036 return false; 2037 2038 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 2039 return false; 2040 } 2041 2042 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2043 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2044 // sext i32 to i64 when addr mode is r+i. 2045 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2046 LD->getExtensionType() == ISD::SEXTLOAD && 2047 isa<ConstantSDNode>(Offset)) 2048 return false; 2049 } 2050 2051 AM = ISD::PRE_INC; 2052 return true; 2053 } 2054 2055 //===----------------------------------------------------------------------===// 2056 // LowerOperation implementation 2057 //===----------------------------------------------------------------------===// 2058 2059 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2060 /// and LoOpFlags to the target MO flags. 2061 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2062 unsigned &HiOpFlags, unsigned &LoOpFlags, 2063 const GlobalValue *GV = nullptr) { 2064 HiOpFlags = PPCII::MO_HA; 2065 LoOpFlags = PPCII::MO_LO; 2066 2067 // Don't use the pic base if not in PIC relocation model. 2068 if (IsPIC) { 2069 HiOpFlags |= PPCII::MO_PIC_FLAG; 2070 LoOpFlags |= PPCII::MO_PIC_FLAG; 2071 } 2072 2073 // If this is a reference to a global value that requires a non-lazy-ptr, make 2074 // sure that instruction lowering adds it. 2075 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2076 HiOpFlags |= PPCII::MO_NLP_FLAG; 2077 LoOpFlags |= PPCII::MO_NLP_FLAG; 2078 2079 if (GV->hasHiddenVisibility()) { 2080 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2081 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2082 } 2083 } 2084 } 2085 2086 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2087 SelectionDAG &DAG) { 2088 SDLoc DL(HiPart); 2089 EVT PtrVT = HiPart.getValueType(); 2090 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2091 2092 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2093 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2094 2095 // With PIC, the first instruction is actually "GR+hi(&G)". 2096 if (isPIC) 2097 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2098 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2099 2100 // Generate non-pic code that has direct accesses to the constant pool. 2101 // The address of the global is just (hi(&g)+lo(&g)). 2102 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2103 } 2104 2105 static void setUsesTOCBasePtr(MachineFunction &MF) { 2106 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2107 FuncInfo->setUsesTOCBasePtr(); 2108 } 2109 2110 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2111 setUsesTOCBasePtr(DAG.getMachineFunction()); 2112 } 2113 2114 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, 2115 SDValue GA) { 2116 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2117 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2118 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2119 2120 SDValue Ops[] = { GA, Reg }; 2121 return DAG.getMemIntrinsicNode( 2122 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2123 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2124 false, 0); 2125 } 2126 2127 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2128 SelectionDAG &DAG) const { 2129 EVT PtrVT = Op.getValueType(); 2130 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2131 const Constant *C = CP->getConstVal(); 2132 2133 // 64-bit SVR4 ABI code is always position-independent. 2134 // The actual address of the GlobalValue is stored in the TOC. 2135 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2136 setUsesTOCBasePtr(DAG); 2137 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2138 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2139 } 2140 2141 unsigned MOHiFlag, MOLoFlag; 2142 bool IsPIC = isPositionIndependent(); 2143 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2144 2145 if (IsPIC && Subtarget.isSVR4ABI()) { 2146 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2147 PPCII::MO_PIC_FLAG); 2148 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2149 } 2150 2151 SDValue CPIHi = 2152 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2153 SDValue CPILo = 2154 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2155 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2156 } 2157 2158 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2159 EVT PtrVT = Op.getValueType(); 2160 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2161 2162 // 64-bit SVR4 ABI code is always position-independent. 2163 // The actual address of the GlobalValue is stored in the TOC. 2164 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2165 setUsesTOCBasePtr(DAG); 2166 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2167 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2168 } 2169 2170 unsigned MOHiFlag, MOLoFlag; 2171 bool IsPIC = isPositionIndependent(); 2172 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2173 2174 if (IsPIC && Subtarget.isSVR4ABI()) { 2175 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2176 PPCII::MO_PIC_FLAG); 2177 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2178 } 2179 2180 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2181 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2182 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2183 } 2184 2185 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2186 SelectionDAG &DAG) const { 2187 EVT PtrVT = Op.getValueType(); 2188 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2189 const BlockAddress *BA = BASDN->getBlockAddress(); 2190 2191 // 64-bit SVR4 ABI code is always position-independent. 2192 // The actual BlockAddress is stored in the TOC. 2193 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2194 setUsesTOCBasePtr(DAG); 2195 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2196 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2197 } 2198 2199 unsigned MOHiFlag, MOLoFlag; 2200 bool IsPIC = isPositionIndependent(); 2201 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2202 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2203 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2204 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2205 } 2206 2207 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2208 SelectionDAG &DAG) const { 2209 2210 // FIXME: TLS addresses currently use medium model code sequences, 2211 // which is the most useful form. Eventually support for small and 2212 // large models could be added if users need it, at the cost of 2213 // additional complexity. 2214 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2215 if (DAG.getTarget().Options.EmulatedTLS) 2216 return LowerToTLSEmulatedModel(GA, DAG); 2217 2218 SDLoc dl(GA); 2219 const GlobalValue *GV = GA->getGlobal(); 2220 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2221 bool is64bit = Subtarget.isPPC64(); 2222 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2223 PICLevel::Level picLevel = M->getPICLevel(); 2224 2225 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2226 2227 if (Model == TLSModel::LocalExec) { 2228 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2229 PPCII::MO_TPREL_HA); 2230 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2231 PPCII::MO_TPREL_LO); 2232 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2233 is64bit ? MVT::i64 : MVT::i32); 2234 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2235 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2236 } 2237 2238 if (Model == TLSModel::InitialExec) { 2239 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2240 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2241 PPCII::MO_TLS); 2242 SDValue GOTPtr; 2243 if (is64bit) { 2244 setUsesTOCBasePtr(DAG); 2245 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2246 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2247 PtrVT, GOTReg, TGA); 2248 } else 2249 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2250 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2251 PtrVT, TGA, GOTPtr); 2252 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2253 } 2254 2255 if (Model == TLSModel::GeneralDynamic) { 2256 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2257 SDValue GOTPtr; 2258 if (is64bit) { 2259 setUsesTOCBasePtr(DAG); 2260 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2261 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2262 GOTReg, TGA); 2263 } else { 2264 if (picLevel == PICLevel::SmallPIC) 2265 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2266 else 2267 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2268 } 2269 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2270 GOTPtr, TGA, TGA); 2271 } 2272 2273 if (Model == TLSModel::LocalDynamic) { 2274 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2275 SDValue GOTPtr; 2276 if (is64bit) { 2277 setUsesTOCBasePtr(DAG); 2278 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2279 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2280 GOTReg, TGA); 2281 } else { 2282 if (picLevel == PICLevel::SmallPIC) 2283 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2284 else 2285 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2286 } 2287 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2288 PtrVT, GOTPtr, TGA, TGA); 2289 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2290 PtrVT, TLSAddr, TGA); 2291 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2292 } 2293 2294 llvm_unreachable("Unknown TLS model!"); 2295 } 2296 2297 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2298 SelectionDAG &DAG) const { 2299 EVT PtrVT = Op.getValueType(); 2300 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2301 SDLoc DL(GSDN); 2302 const GlobalValue *GV = GSDN->getGlobal(); 2303 2304 // 64-bit SVR4 ABI code is always position-independent. 2305 // The actual address of the GlobalValue is stored in the TOC. 2306 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2307 setUsesTOCBasePtr(DAG); 2308 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2309 return getTOCEntry(DAG, DL, true, GA); 2310 } 2311 2312 unsigned MOHiFlag, MOLoFlag; 2313 bool IsPIC = isPositionIndependent(); 2314 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 2315 2316 if (IsPIC && Subtarget.isSVR4ABI()) { 2317 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2318 GSDN->getOffset(), 2319 PPCII::MO_PIC_FLAG); 2320 return getTOCEntry(DAG, DL, false, GA); 2321 } 2322 2323 SDValue GAHi = 2324 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2325 SDValue GALo = 2326 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2327 2328 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 2329 2330 // If the global reference is actually to a non-lazy-pointer, we have to do an 2331 // extra load to get the address of the global. 2332 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2333 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2334 return Ptr; 2335 } 2336 2337 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2338 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2339 SDLoc dl(Op); 2340 2341 if (Op.getValueType() == MVT::v2i64) { 2342 // When the operands themselves are v2i64 values, we need to do something 2343 // special because VSX has no underlying comparison operations for these. 2344 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2345 // Equality can be handled by casting to the legal type for Altivec 2346 // comparisons, everything else needs to be expanded. 2347 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2348 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2349 DAG.getSetCC(dl, MVT::v4i32, 2350 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2351 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2352 CC)); 2353 } 2354 2355 return SDValue(); 2356 } 2357 2358 // We handle most of these in the usual way. 2359 return Op; 2360 } 2361 2362 // If we're comparing for equality to zero, expose the fact that this is 2363 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 2364 // fold the new nodes. 2365 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2366 if (C->isNullValue() && CC == ISD::SETEQ) { 2367 EVT VT = Op.getOperand(0).getValueType(); 2368 SDValue Zext = Op.getOperand(0); 2369 if (VT.bitsLT(MVT::i32)) { 2370 VT = MVT::i32; 2371 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 2372 } 2373 unsigned Log2b = Log2_32(VT.getSizeInBits()); 2374 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 2375 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 2376 DAG.getConstant(Log2b, dl, MVT::i32)); 2377 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 2378 } 2379 // Leave comparisons against 0 and -1 alone for now, since they're usually 2380 // optimized. FIXME: revisit this when we can custom lower all setcc 2381 // optimizations. 2382 if (C->isAllOnesValue() || C->isNullValue()) 2383 return SDValue(); 2384 } 2385 2386 // If we have an integer seteq/setne, turn it into a compare against zero 2387 // by xor'ing the rhs with the lhs, which is faster than setting a 2388 // condition register, reading it back out, and masking the correct bit. The 2389 // normal approach here uses sub to do this instead of xor. Using xor exposes 2390 // the result to other bit-twiddling opportunities. 2391 EVT LHSVT = Op.getOperand(0).getValueType(); 2392 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2393 EVT VT = Op.getValueType(); 2394 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2395 Op.getOperand(1)); 2396 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2397 } 2398 return SDValue(); 2399 } 2400 2401 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 2402 SDNode *Node = Op.getNode(); 2403 EVT VT = Node->getValueType(0); 2404 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2405 SDValue InChain = Node->getOperand(0); 2406 SDValue VAListPtr = Node->getOperand(1); 2407 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2408 SDLoc dl(Node); 2409 2410 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2411 2412 // gpr_index 2413 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2414 VAListPtr, MachinePointerInfo(SV), MVT::i8); 2415 InChain = GprIndex.getValue(1); 2416 2417 if (VT == MVT::i64) { 2418 // Check if GprIndex is even 2419 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2420 DAG.getConstant(1, dl, MVT::i32)); 2421 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2422 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2423 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2424 DAG.getConstant(1, dl, MVT::i32)); 2425 // Align GprIndex to be even if it isn't 2426 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2427 GprIndex); 2428 } 2429 2430 // fpr index is 1 byte after gpr 2431 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2432 DAG.getConstant(1, dl, MVT::i32)); 2433 2434 // fpr 2435 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2436 FprPtr, MachinePointerInfo(SV), MVT::i8); 2437 InChain = FprIndex.getValue(1); 2438 2439 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2440 DAG.getConstant(8, dl, MVT::i32)); 2441 2442 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2443 DAG.getConstant(4, dl, MVT::i32)); 2444 2445 // areas 2446 SDValue OverflowArea = 2447 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 2448 InChain = OverflowArea.getValue(1); 2449 2450 SDValue RegSaveArea = 2451 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 2452 InChain = RegSaveArea.getValue(1); 2453 2454 // select overflow_area if index > 8 2455 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2456 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2457 2458 // adjustment constant gpr_index * 4/8 2459 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2460 VT.isInteger() ? GprIndex : FprIndex, 2461 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2462 MVT::i32)); 2463 2464 // OurReg = RegSaveArea + RegConstant 2465 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2466 RegConstant); 2467 2468 // Floating types are 32 bytes into RegSaveArea 2469 if (VT.isFloatingPoint()) 2470 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2471 DAG.getConstant(32, dl, MVT::i32)); 2472 2473 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2474 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2475 VT.isInteger() ? GprIndex : FprIndex, 2476 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2477 MVT::i32)); 2478 2479 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2480 VT.isInteger() ? VAListPtr : FprPtr, 2481 MachinePointerInfo(SV), MVT::i8); 2482 2483 // determine if we should load from reg_save_area or overflow_area 2484 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2485 2486 // increase overflow_area by 4/8 if gpr/fpr > 8 2487 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2488 DAG.getConstant(VT.isInteger() ? 4 : 8, 2489 dl, MVT::i32)); 2490 2491 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2492 OverflowAreaPlusN); 2493 2494 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 2495 MachinePointerInfo(), MVT::i32); 2496 2497 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 2498 } 2499 2500 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2501 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2502 2503 // We have to copy the entire va_list struct: 2504 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2505 return DAG.getMemcpy(Op.getOperand(0), Op, 2506 Op.getOperand(1), Op.getOperand(2), 2507 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2508 false, MachinePointerInfo(), MachinePointerInfo()); 2509 } 2510 2511 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2512 SelectionDAG &DAG) const { 2513 return Op.getOperand(0); 2514 } 2515 2516 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2517 SelectionDAG &DAG) const { 2518 SDValue Chain = Op.getOperand(0); 2519 SDValue Trmp = Op.getOperand(1); // trampoline 2520 SDValue FPtr = Op.getOperand(2); // nested function 2521 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2522 SDLoc dl(Op); 2523 2524 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2525 bool isPPC64 = (PtrVT == MVT::i64); 2526 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2527 2528 TargetLowering::ArgListTy Args; 2529 TargetLowering::ArgListEntry Entry; 2530 2531 Entry.Ty = IntPtrTy; 2532 Entry.Node = Trmp; Args.push_back(Entry); 2533 2534 // TrampSize == (isPPC64 ? 48 : 40); 2535 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2536 isPPC64 ? MVT::i64 : MVT::i32); 2537 Args.push_back(Entry); 2538 2539 Entry.Node = FPtr; Args.push_back(Entry); 2540 Entry.Node = Nest; Args.push_back(Entry); 2541 2542 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2543 TargetLowering::CallLoweringInfo CLI(DAG); 2544 CLI.setDebugLoc(dl).setChain(Chain) 2545 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2546 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2547 std::move(Args)); 2548 2549 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2550 return CallResult.second; 2551 } 2552 2553 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2554 MachineFunction &MF = DAG.getMachineFunction(); 2555 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2556 EVT PtrVT = getPointerTy(MF.getDataLayout()); 2557 2558 SDLoc dl(Op); 2559 2560 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2561 // vastart just stores the address of the VarArgsFrameIndex slot into the 2562 // memory location argument. 2563 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2564 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2565 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2566 MachinePointerInfo(SV)); 2567 } 2568 2569 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2570 // We suppose the given va_list is already allocated. 2571 // 2572 // typedef struct { 2573 // char gpr; /* index into the array of 8 GPRs 2574 // * stored in the register save area 2575 // * gpr=0 corresponds to r3, 2576 // * gpr=1 to r4, etc. 2577 // */ 2578 // char fpr; /* index into the array of 8 FPRs 2579 // * stored in the register save area 2580 // * fpr=0 corresponds to f1, 2581 // * fpr=1 to f2, etc. 2582 // */ 2583 // char *overflow_arg_area; 2584 // /* location on stack that holds 2585 // * the next overflow argument 2586 // */ 2587 // char *reg_save_area; 2588 // /* where r3:r10 and f1:f8 (if saved) 2589 // * are stored 2590 // */ 2591 // } va_list[1]; 2592 2593 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2594 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2595 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2596 PtrVT); 2597 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2598 PtrVT); 2599 2600 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2601 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2602 2603 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2604 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2605 2606 uint64_t FPROffset = 1; 2607 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2608 2609 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2610 2611 // Store first byte : number of int regs 2612 SDValue firstStore = 2613 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 2614 MachinePointerInfo(SV), MVT::i8); 2615 uint64_t nextOffset = FPROffset; 2616 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2617 ConstFPROffset); 2618 2619 // Store second byte : number of float regs 2620 SDValue secondStore = 2621 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2622 MachinePointerInfo(SV, nextOffset), MVT::i8); 2623 nextOffset += StackOffset; 2624 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2625 2626 // Store second word : arguments given on stack 2627 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2628 MachinePointerInfo(SV, nextOffset)); 2629 nextOffset += FrameOffset; 2630 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2631 2632 // Store third word : arguments given in registers 2633 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2634 MachinePointerInfo(SV, nextOffset)); 2635 } 2636 2637 #include "PPCGenCallingConv.inc" 2638 2639 // Function whose sole purpose is to kill compiler warnings 2640 // stemming from unused functions included from PPCGenCallingConv.inc. 2641 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2642 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2643 } 2644 2645 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2646 CCValAssign::LocInfo &LocInfo, 2647 ISD::ArgFlagsTy &ArgFlags, 2648 CCState &State) { 2649 return true; 2650 } 2651 2652 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2653 MVT &LocVT, 2654 CCValAssign::LocInfo &LocInfo, 2655 ISD::ArgFlagsTy &ArgFlags, 2656 CCState &State) { 2657 static const MCPhysReg ArgRegs[] = { 2658 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2659 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2660 }; 2661 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2662 2663 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2664 2665 // Skip one register if the first unallocated register has an even register 2666 // number and there are still argument registers available which have not been 2667 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2668 // need to skip a register if RegNum is odd. 2669 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2670 State.AllocateReg(ArgRegs[RegNum]); 2671 } 2672 2673 // Always return false here, as this function only makes sure that the first 2674 // unallocated register has an odd register number and does not actually 2675 // allocate a register for the current argument. 2676 return false; 2677 } 2678 2679 bool 2680 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, 2681 MVT &LocVT, 2682 CCValAssign::LocInfo &LocInfo, 2683 ISD::ArgFlagsTy &ArgFlags, 2684 CCState &State) { 2685 static const MCPhysReg ArgRegs[] = { 2686 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2687 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2688 }; 2689 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2690 2691 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2692 int RegsLeft = NumArgRegs - RegNum; 2693 2694 // Skip if there is not enough registers left for long double type (4 gpr regs 2695 // in soft float mode) and put long double argument on the stack. 2696 if (RegNum != NumArgRegs && RegsLeft < 4) { 2697 for (int i = 0; i < RegsLeft; i++) { 2698 State.AllocateReg(ArgRegs[RegNum + i]); 2699 } 2700 } 2701 2702 return false; 2703 } 2704 2705 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2706 MVT &LocVT, 2707 CCValAssign::LocInfo &LocInfo, 2708 ISD::ArgFlagsTy &ArgFlags, 2709 CCState &State) { 2710 static const MCPhysReg ArgRegs[] = { 2711 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2712 PPC::F8 2713 }; 2714 2715 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2716 2717 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2718 2719 // If there is only one Floating-point register left we need to put both f64 2720 // values of a split ppc_fp128 value on the stack. 2721 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2722 State.AllocateReg(ArgRegs[RegNum]); 2723 } 2724 2725 // Always return false here, as this function only makes sure that the two f64 2726 // values a ppc_fp128 value is split into are both passed in registers or both 2727 // passed on the stack and does not actually allocate a register for the 2728 // current argument. 2729 return false; 2730 } 2731 2732 /// FPR - The set of FP registers that should be allocated for arguments, 2733 /// on Darwin. 2734 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2735 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2736 PPC::F11, PPC::F12, PPC::F13}; 2737 2738 /// QFPR - The set of QPX registers that should be allocated for arguments. 2739 static const MCPhysReg QFPR[] = { 2740 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2741 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2742 2743 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2744 /// the stack. 2745 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2746 unsigned PtrByteSize) { 2747 unsigned ArgSize = ArgVT.getStoreSize(); 2748 if (Flags.isByVal()) 2749 ArgSize = Flags.getByValSize(); 2750 2751 // Round up to multiples of the pointer size, except for array members, 2752 // which are always packed. 2753 if (!Flags.isInConsecutiveRegs()) 2754 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2755 2756 return ArgSize; 2757 } 2758 2759 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2760 /// on the stack. 2761 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2762 ISD::ArgFlagsTy Flags, 2763 unsigned PtrByteSize) { 2764 unsigned Align = PtrByteSize; 2765 2766 // Altivec parameters are padded to a 16 byte boundary. 2767 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2768 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2769 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2770 ArgVT == MVT::v1i128) 2771 Align = 16; 2772 // QPX vector types stored in double-precision are padded to a 32 byte 2773 // boundary. 2774 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2775 Align = 32; 2776 2777 // ByVal parameters are aligned as requested. 2778 if (Flags.isByVal()) { 2779 unsigned BVAlign = Flags.getByValAlign(); 2780 if (BVAlign > PtrByteSize) { 2781 if (BVAlign % PtrByteSize != 0) 2782 llvm_unreachable( 2783 "ByVal alignment is not a multiple of the pointer size"); 2784 2785 Align = BVAlign; 2786 } 2787 } 2788 2789 // Array members are always packed to their original alignment. 2790 if (Flags.isInConsecutiveRegs()) { 2791 // If the array member was split into multiple registers, the first 2792 // needs to be aligned to the size of the full type. (Except for 2793 // ppcf128, which is only aligned as its f64 components.) 2794 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2795 Align = OrigVT.getStoreSize(); 2796 else 2797 Align = ArgVT.getStoreSize(); 2798 } 2799 2800 return Align; 2801 } 2802 2803 /// CalculateStackSlotUsed - Return whether this argument will use its 2804 /// stack slot (instead of being passed in registers). ArgOffset, 2805 /// AvailableFPRs, and AvailableVRs must hold the current argument 2806 /// position, and will be updated to account for this argument. 2807 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2808 ISD::ArgFlagsTy Flags, 2809 unsigned PtrByteSize, 2810 unsigned LinkageSize, 2811 unsigned ParamAreaSize, 2812 unsigned &ArgOffset, 2813 unsigned &AvailableFPRs, 2814 unsigned &AvailableVRs, bool HasQPX) { 2815 bool UseMemory = false; 2816 2817 // Respect alignment of argument on the stack. 2818 unsigned Align = 2819 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2820 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2821 // If there's no space left in the argument save area, we must 2822 // use memory (this check also catches zero-sized arguments). 2823 if (ArgOffset >= LinkageSize + ParamAreaSize) 2824 UseMemory = true; 2825 2826 // Allocate argument on the stack. 2827 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2828 if (Flags.isInConsecutiveRegsLast()) 2829 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2830 // If we overran the argument save area, we must use memory 2831 // (this check catches arguments passed partially in memory) 2832 if (ArgOffset > LinkageSize + ParamAreaSize) 2833 UseMemory = true; 2834 2835 // However, if the argument is actually passed in an FPR or a VR, 2836 // we don't use memory after all. 2837 if (!Flags.isByVal()) { 2838 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2839 // QPX registers overlap with the scalar FP registers. 2840 (HasQPX && (ArgVT == MVT::v4f32 || 2841 ArgVT == MVT::v4f64 || 2842 ArgVT == MVT::v4i1))) 2843 if (AvailableFPRs > 0) { 2844 --AvailableFPRs; 2845 return false; 2846 } 2847 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2848 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2849 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2850 ArgVT == MVT::v1i128) 2851 if (AvailableVRs > 0) { 2852 --AvailableVRs; 2853 return false; 2854 } 2855 } 2856 2857 return UseMemory; 2858 } 2859 2860 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2861 /// ensure minimum alignment required for target. 2862 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2863 unsigned NumBytes) { 2864 unsigned TargetAlign = Lowering->getStackAlignment(); 2865 unsigned AlignMask = TargetAlign - 1; 2866 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2867 return NumBytes; 2868 } 2869 2870 SDValue PPCTargetLowering::LowerFormalArguments( 2871 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2872 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2873 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2874 if (Subtarget.isSVR4ABI()) { 2875 if (Subtarget.isPPC64()) 2876 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2877 dl, DAG, InVals); 2878 else 2879 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2880 dl, DAG, InVals); 2881 } else { 2882 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2883 dl, DAG, InVals); 2884 } 2885 } 2886 2887 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 2888 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2889 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2890 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2891 2892 // 32-bit SVR4 ABI Stack Frame Layout: 2893 // +-----------------------------------+ 2894 // +--> | Back chain | 2895 // | +-----------------------------------+ 2896 // | | Floating-point register save area | 2897 // | +-----------------------------------+ 2898 // | | General register save area | 2899 // | +-----------------------------------+ 2900 // | | CR save word | 2901 // | +-----------------------------------+ 2902 // | | VRSAVE save word | 2903 // | +-----------------------------------+ 2904 // | | Alignment padding | 2905 // | +-----------------------------------+ 2906 // | | Vector register save area | 2907 // | +-----------------------------------+ 2908 // | | Local variable space | 2909 // | +-----------------------------------+ 2910 // | | Parameter list area | 2911 // | +-----------------------------------+ 2912 // | | LR save word | 2913 // | +-----------------------------------+ 2914 // SP--> +--- | Back chain | 2915 // +-----------------------------------+ 2916 // 2917 // Specifications: 2918 // System V Application Binary Interface PowerPC Processor Supplement 2919 // AltiVec Technology Programming Interface Manual 2920 2921 MachineFunction &MF = DAG.getMachineFunction(); 2922 MachineFrameInfo &MFI = MF.getFrameInfo(); 2923 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2924 2925 EVT PtrVT = getPointerTy(MF.getDataLayout()); 2926 // Potential tail calls could cause overwriting of argument stack slots. 2927 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2928 (CallConv == CallingConv::Fast)); 2929 unsigned PtrByteSize = 4; 2930 2931 // Assign locations to all of the incoming arguments. 2932 SmallVector<CCValAssign, 16> ArgLocs; 2933 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2934 *DAG.getContext()); 2935 2936 // Reserve space for the linkage area on the stack. 2937 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2938 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2939 if (useSoftFloat()) 2940 CCInfo.PreAnalyzeFormalArguments(Ins); 2941 2942 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2943 CCInfo.clearWasPPCF128(); 2944 2945 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2946 CCValAssign &VA = ArgLocs[i]; 2947 2948 // Arguments stored in registers. 2949 if (VA.isRegLoc()) { 2950 const TargetRegisterClass *RC; 2951 EVT ValVT = VA.getValVT(); 2952 2953 switch (ValVT.getSimpleVT().SimpleTy) { 2954 default: 2955 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2956 case MVT::i1: 2957 case MVT::i32: 2958 RC = &PPC::GPRCRegClass; 2959 break; 2960 case MVT::f32: 2961 if (Subtarget.hasP8Vector()) 2962 RC = &PPC::VSSRCRegClass; 2963 else 2964 RC = &PPC::F4RCRegClass; 2965 break; 2966 case MVT::f64: 2967 if (Subtarget.hasVSX()) 2968 RC = &PPC::VSFRCRegClass; 2969 else 2970 RC = &PPC::F8RCRegClass; 2971 break; 2972 case MVT::v16i8: 2973 case MVT::v8i16: 2974 case MVT::v4i32: 2975 RC = &PPC::VRRCRegClass; 2976 break; 2977 case MVT::v4f32: 2978 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 2979 break; 2980 case MVT::v2f64: 2981 case MVT::v2i64: 2982 RC = &PPC::VSHRCRegClass; 2983 break; 2984 case MVT::v4f64: 2985 RC = &PPC::QFRCRegClass; 2986 break; 2987 case MVT::v4i1: 2988 RC = &PPC::QBRCRegClass; 2989 break; 2990 } 2991 2992 // Transform the arguments stored in physical registers into virtual ones. 2993 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2994 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2995 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2996 2997 if (ValVT == MVT::i1) 2998 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2999 3000 InVals.push_back(ArgValue); 3001 } else { 3002 // Argument stored in memory. 3003 assert(VA.isMemLoc()); 3004 3005 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3006 int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(), 3007 isImmutable); 3008 3009 // Create load nodes to retrieve arguments from the stack. 3010 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3011 InVals.push_back( 3012 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3013 } 3014 } 3015 3016 // Assign locations to all of the incoming aggregate by value arguments. 3017 // Aggregates passed by value are stored in the local variable space of the 3018 // caller's stack frame, right above the parameter list area. 3019 SmallVector<CCValAssign, 16> ByValArgLocs; 3020 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3021 ByValArgLocs, *DAG.getContext()); 3022 3023 // Reserve stack space for the allocations in CCInfo. 3024 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3025 3026 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3027 3028 // Area that is at least reserved in the caller of this function. 3029 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3030 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3031 3032 // Set the size that is at least reserved in caller of this function. Tail 3033 // call optimized function's reserved stack space needs to be aligned so that 3034 // taking the difference between two stack areas will result in an aligned 3035 // stack. 3036 MinReservedArea = 3037 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3038 FuncInfo->setMinReservedArea(MinReservedArea); 3039 3040 SmallVector<SDValue, 8> MemOps; 3041 3042 // If the function takes variable number of arguments, make a frame index for 3043 // the start of the first vararg value... for expansion of llvm.va_start. 3044 if (isVarArg) { 3045 static const MCPhysReg GPArgRegs[] = { 3046 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3047 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3048 }; 3049 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3050 3051 static const MCPhysReg FPArgRegs[] = { 3052 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3053 PPC::F8 3054 }; 3055 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3056 3057 if (useSoftFloat()) 3058 NumFPArgRegs = 0; 3059 3060 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3061 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3062 3063 // Make room for NumGPArgRegs and NumFPArgRegs. 3064 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3065 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3066 3067 FuncInfo->setVarArgsStackOffset( 3068 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3069 CCInfo.getNextStackOffset(), true)); 3070 3071 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3072 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3073 3074 // The fixed integer arguments of a variadic function are stored to the 3075 // VarArgsFrameIndex on the stack so that they may be loaded by 3076 // dereferencing the result of va_next. 3077 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3078 // Get an existing live-in vreg, or add a new one. 3079 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3080 if (!VReg) 3081 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3082 3083 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3084 SDValue Store = 3085 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3086 MemOps.push_back(Store); 3087 // Increment the address by four for the next argument to store 3088 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3089 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3090 } 3091 3092 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3093 // is set. 3094 // The double arguments are stored to the VarArgsFrameIndex 3095 // on the stack. 3096 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3097 // Get an existing live-in vreg, or add a new one. 3098 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3099 if (!VReg) 3100 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3101 3102 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3103 SDValue Store = 3104 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3105 MemOps.push_back(Store); 3106 // Increment the address by eight for the next argument to store 3107 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3108 PtrVT); 3109 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3110 } 3111 } 3112 3113 if (!MemOps.empty()) 3114 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3115 3116 return Chain; 3117 } 3118 3119 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3120 // value to MVT::i64 and then truncate to the correct register size. 3121 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3122 EVT ObjectVT, SelectionDAG &DAG, 3123 SDValue ArgVal, 3124 const SDLoc &dl) const { 3125 if (Flags.isSExt()) 3126 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3127 DAG.getValueType(ObjectVT)); 3128 else if (Flags.isZExt()) 3129 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3130 DAG.getValueType(ObjectVT)); 3131 3132 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3133 } 3134 3135 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3136 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3137 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3138 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3139 // TODO: add description of PPC stack frame format, or at least some docs. 3140 // 3141 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3142 bool isLittleEndian = Subtarget.isLittleEndian(); 3143 MachineFunction &MF = DAG.getMachineFunction(); 3144 MachineFrameInfo &MFI = MF.getFrameInfo(); 3145 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3146 3147 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3148 "fastcc not supported on varargs functions"); 3149 3150 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3151 // Potential tail calls could cause overwriting of argument stack slots. 3152 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3153 (CallConv == CallingConv::Fast)); 3154 unsigned PtrByteSize = 8; 3155 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3156 3157 static const MCPhysReg GPR[] = { 3158 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3159 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3160 }; 3161 static const MCPhysReg VR[] = { 3162 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3163 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3164 }; 3165 static const MCPhysReg VSRH[] = { 3166 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 3167 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 3168 }; 3169 3170 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3171 const unsigned Num_FPR_Regs = 13; 3172 const unsigned Num_VR_Regs = array_lengthof(VR); 3173 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3174 3175 // Do a first pass over the arguments to determine whether the ABI 3176 // guarantees that our caller has allocated the parameter save area 3177 // on its stack frame. In the ELFv1 ABI, this is always the case; 3178 // in the ELFv2 ABI, it is true if this is a vararg function or if 3179 // any parameter is located in a stack slot. 3180 3181 bool HasParameterArea = !isELFv2ABI || isVarArg; 3182 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3183 unsigned NumBytes = LinkageSize; 3184 unsigned AvailableFPRs = Num_FPR_Regs; 3185 unsigned AvailableVRs = Num_VR_Regs; 3186 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3187 if (Ins[i].Flags.isNest()) 3188 continue; 3189 3190 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3191 PtrByteSize, LinkageSize, ParamAreaSize, 3192 NumBytes, AvailableFPRs, AvailableVRs, 3193 Subtarget.hasQPX())) 3194 HasParameterArea = true; 3195 } 3196 3197 // Add DAG nodes to load the arguments or copy them out of registers. On 3198 // entry to a function on PPC, the arguments start after the linkage area, 3199 // although the first ones are often in registers. 3200 3201 unsigned ArgOffset = LinkageSize; 3202 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3203 unsigned &QFPR_idx = FPR_idx; 3204 SmallVector<SDValue, 8> MemOps; 3205 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3206 unsigned CurArgIdx = 0; 3207 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3208 SDValue ArgVal; 3209 bool needsLoad = false; 3210 EVT ObjectVT = Ins[ArgNo].VT; 3211 EVT OrigVT = Ins[ArgNo].ArgVT; 3212 unsigned ObjSize = ObjectVT.getStoreSize(); 3213 unsigned ArgSize = ObjSize; 3214 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3215 if (Ins[ArgNo].isOrigArg()) { 3216 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3217 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3218 } 3219 // We re-align the argument offset for each argument, except when using the 3220 // fast calling convention, when we need to make sure we do that only when 3221 // we'll actually use a stack slot. 3222 unsigned CurArgOffset, Align; 3223 auto ComputeArgOffset = [&]() { 3224 /* Respect alignment of argument on the stack. */ 3225 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3226 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3227 CurArgOffset = ArgOffset; 3228 }; 3229 3230 if (CallConv != CallingConv::Fast) { 3231 ComputeArgOffset(); 3232 3233 /* Compute GPR index associated with argument offset. */ 3234 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3235 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3236 } 3237 3238 // FIXME the codegen can be much improved in some cases. 3239 // We do not have to keep everything in memory. 3240 if (Flags.isByVal()) { 3241 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3242 3243 if (CallConv == CallingConv::Fast) 3244 ComputeArgOffset(); 3245 3246 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3247 ObjSize = Flags.getByValSize(); 3248 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3249 // Empty aggregate parameters do not take up registers. Examples: 3250 // struct { } a; 3251 // union { } b; 3252 // int c[0]; 3253 // etc. However, we have to provide a place-holder in InVals, so 3254 // pretend we have an 8-byte item at the current address for that 3255 // purpose. 3256 if (!ObjSize) { 3257 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3258 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3259 InVals.push_back(FIN); 3260 continue; 3261 } 3262 3263 // Create a stack object covering all stack doublewords occupied 3264 // by the argument. If the argument is (fully or partially) on 3265 // the stack, or if the argument is fully in registers but the 3266 // caller has allocated the parameter save anyway, we can refer 3267 // directly to the caller's stack frame. Otherwise, create a 3268 // local copy in our own frame. 3269 int FI; 3270 if (HasParameterArea || 3271 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3272 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3273 else 3274 FI = MFI.CreateStackObject(ArgSize, Align, false); 3275 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3276 3277 // Handle aggregates smaller than 8 bytes. 3278 if (ObjSize < PtrByteSize) { 3279 // The value of the object is its address, which differs from the 3280 // address of the enclosing doubleword on big-endian systems. 3281 SDValue Arg = FIN; 3282 if (!isLittleEndian) { 3283 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3284 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3285 } 3286 InVals.push_back(Arg); 3287 3288 if (GPR_idx != Num_GPR_Regs) { 3289 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3290 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3291 SDValue Store; 3292 3293 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3294 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3295 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3296 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3297 MachinePointerInfo(&*FuncArg), ObjType); 3298 } else { 3299 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3300 // store the whole register as-is to the parameter save area 3301 // slot. 3302 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3303 MachinePointerInfo(&*FuncArg)); 3304 } 3305 3306 MemOps.push_back(Store); 3307 } 3308 // Whether we copied from a register or not, advance the offset 3309 // into the parameter save area by a full doubleword. 3310 ArgOffset += PtrByteSize; 3311 continue; 3312 } 3313 3314 // The value of the object is its address, which is the address of 3315 // its first stack doubleword. 3316 InVals.push_back(FIN); 3317 3318 // Store whatever pieces of the object are in registers to memory. 3319 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3320 if (GPR_idx == Num_GPR_Regs) 3321 break; 3322 3323 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3324 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3325 SDValue Addr = FIN; 3326 if (j) { 3327 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3328 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3329 } 3330 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3331 MachinePointerInfo(&*FuncArg, j)); 3332 MemOps.push_back(Store); 3333 ++GPR_idx; 3334 } 3335 ArgOffset += ArgSize; 3336 continue; 3337 } 3338 3339 switch (ObjectVT.getSimpleVT().SimpleTy) { 3340 default: llvm_unreachable("Unhandled argument type!"); 3341 case MVT::i1: 3342 case MVT::i32: 3343 case MVT::i64: 3344 if (Flags.isNest()) { 3345 // The 'nest' parameter, if any, is passed in R11. 3346 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3347 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3348 3349 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3350 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3351 3352 break; 3353 } 3354 3355 // These can be scalar arguments or elements of an integer array type 3356 // passed directly. Clang may use those instead of "byval" aggregate 3357 // types to avoid forcing arguments to memory unnecessarily. 3358 if (GPR_idx != Num_GPR_Regs) { 3359 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3360 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3361 3362 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3363 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3364 // value to MVT::i64 and then truncate to the correct register size. 3365 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3366 } else { 3367 if (CallConv == CallingConv::Fast) 3368 ComputeArgOffset(); 3369 3370 needsLoad = true; 3371 ArgSize = PtrByteSize; 3372 } 3373 if (CallConv != CallingConv::Fast || needsLoad) 3374 ArgOffset += 8; 3375 break; 3376 3377 case MVT::f32: 3378 case MVT::f64: 3379 // These can be scalar arguments or elements of a float array type 3380 // passed directly. The latter are used to implement ELFv2 homogenous 3381 // float aggregates. 3382 if (FPR_idx != Num_FPR_Regs) { 3383 unsigned VReg; 3384 3385 if (ObjectVT == MVT::f32) 3386 VReg = MF.addLiveIn(FPR[FPR_idx], 3387 Subtarget.hasP8Vector() 3388 ? &PPC::VSSRCRegClass 3389 : &PPC::F4RCRegClass); 3390 else 3391 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3392 ? &PPC::VSFRCRegClass 3393 : &PPC::F8RCRegClass); 3394 3395 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3396 ++FPR_idx; 3397 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3398 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3399 // once we support fp <-> gpr moves. 3400 3401 // This can only ever happen in the presence of f32 array types, 3402 // since otherwise we never run out of FPRs before running out 3403 // of GPRs. 3404 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3405 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3406 3407 if (ObjectVT == MVT::f32) { 3408 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3409 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3410 DAG.getConstant(32, dl, MVT::i32)); 3411 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3412 } 3413 3414 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3415 } else { 3416 if (CallConv == CallingConv::Fast) 3417 ComputeArgOffset(); 3418 3419 needsLoad = true; 3420 } 3421 3422 // When passing an array of floats, the array occupies consecutive 3423 // space in the argument area; only round up to the next doubleword 3424 // at the end of the array. Otherwise, each float takes 8 bytes. 3425 if (CallConv != CallingConv::Fast || needsLoad) { 3426 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3427 ArgOffset += ArgSize; 3428 if (Flags.isInConsecutiveRegsLast()) 3429 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3430 } 3431 break; 3432 case MVT::v4f32: 3433 case MVT::v4i32: 3434 case MVT::v8i16: 3435 case MVT::v16i8: 3436 case MVT::v2f64: 3437 case MVT::v2i64: 3438 case MVT::v1i128: 3439 if (!Subtarget.hasQPX()) { 3440 // These can be scalar arguments or elements of a vector array type 3441 // passed directly. The latter are used to implement ELFv2 homogenous 3442 // vector aggregates. 3443 if (VR_idx != Num_VR_Regs) { 3444 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 3445 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 3446 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3447 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3448 ++VR_idx; 3449 } else { 3450 if (CallConv == CallingConv::Fast) 3451 ComputeArgOffset(); 3452 3453 needsLoad = true; 3454 } 3455 if (CallConv != CallingConv::Fast || needsLoad) 3456 ArgOffset += 16; 3457 break; 3458 } // not QPX 3459 3460 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3461 "Invalid QPX parameter type"); 3462 /* fall through */ 3463 3464 case MVT::v4f64: 3465 case MVT::v4i1: 3466 // QPX vectors are treated like their scalar floating-point subregisters 3467 // (except that they're larger). 3468 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3469 if (QFPR_idx != Num_QFPR_Regs) { 3470 const TargetRegisterClass *RC; 3471 switch (ObjectVT.getSimpleVT().SimpleTy) { 3472 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3473 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3474 default: RC = &PPC::QBRCRegClass; break; 3475 } 3476 3477 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3478 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3479 ++QFPR_idx; 3480 } else { 3481 if (CallConv == CallingConv::Fast) 3482 ComputeArgOffset(); 3483 needsLoad = true; 3484 } 3485 if (CallConv != CallingConv::Fast || needsLoad) 3486 ArgOffset += Sz; 3487 break; 3488 } 3489 3490 // We need to load the argument to a virtual register if we determined 3491 // above that we ran out of physical registers of the appropriate type. 3492 if (needsLoad) { 3493 if (ObjSize < ArgSize && !isLittleEndian) 3494 CurArgOffset += ArgSize - ObjSize; 3495 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3496 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3497 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3498 } 3499 3500 InVals.push_back(ArgVal); 3501 } 3502 3503 // Area that is at least reserved in the caller of this function. 3504 unsigned MinReservedArea; 3505 if (HasParameterArea) 3506 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3507 else 3508 MinReservedArea = LinkageSize; 3509 3510 // Set the size that is at least reserved in caller of this function. Tail 3511 // call optimized functions' reserved stack space needs to be aligned so that 3512 // taking the difference between two stack areas will result in an aligned 3513 // stack. 3514 MinReservedArea = 3515 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3516 FuncInfo->setMinReservedArea(MinReservedArea); 3517 3518 // If the function takes variable number of arguments, make a frame index for 3519 // the start of the first vararg value... for expansion of llvm.va_start. 3520 if (isVarArg) { 3521 int Depth = ArgOffset; 3522 3523 FuncInfo->setVarArgsFrameIndex( 3524 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 3525 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3526 3527 // If this function is vararg, store any remaining integer argument regs 3528 // to their spots on the stack so that they may be loaded by dereferencing 3529 // the result of va_next. 3530 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3531 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3532 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3533 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3534 SDValue Store = 3535 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3536 MemOps.push_back(Store); 3537 // Increment the address by four for the next argument to store 3538 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3539 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3540 } 3541 } 3542 3543 if (!MemOps.empty()) 3544 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3545 3546 return Chain; 3547 } 3548 3549 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 3550 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3551 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3552 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3553 // TODO: add description of PPC stack frame format, or at least some docs. 3554 // 3555 MachineFunction &MF = DAG.getMachineFunction(); 3556 MachineFrameInfo &MFI = MF.getFrameInfo(); 3557 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3558 3559 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3560 bool isPPC64 = PtrVT == MVT::i64; 3561 // Potential tail calls could cause overwriting of argument stack slots. 3562 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3563 (CallConv == CallingConv::Fast)); 3564 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3565 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3566 unsigned ArgOffset = LinkageSize; 3567 // Area that is at least reserved in caller of this function. 3568 unsigned MinReservedArea = ArgOffset; 3569 3570 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3571 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3572 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3573 }; 3574 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3575 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3576 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3577 }; 3578 static const MCPhysReg VR[] = { 3579 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3580 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3581 }; 3582 3583 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3584 const unsigned Num_FPR_Regs = 13; 3585 const unsigned Num_VR_Regs = array_lengthof( VR); 3586 3587 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3588 3589 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3590 3591 // In 32-bit non-varargs functions, the stack space for vectors is after the 3592 // stack space for non-vectors. We do not use this space unless we have 3593 // too many vectors to fit in registers, something that only occurs in 3594 // constructed examples:), but we have to walk the arglist to figure 3595 // that out...for the pathological case, compute VecArgOffset as the 3596 // start of the vector parameter area. Computing VecArgOffset is the 3597 // entire point of the following loop. 3598 unsigned VecArgOffset = ArgOffset; 3599 if (!isVarArg && !isPPC64) { 3600 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3601 ++ArgNo) { 3602 EVT ObjectVT = Ins[ArgNo].VT; 3603 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3604 3605 if (Flags.isByVal()) { 3606 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3607 unsigned ObjSize = Flags.getByValSize(); 3608 unsigned ArgSize = 3609 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3610 VecArgOffset += ArgSize; 3611 continue; 3612 } 3613 3614 switch(ObjectVT.getSimpleVT().SimpleTy) { 3615 default: llvm_unreachable("Unhandled argument type!"); 3616 case MVT::i1: 3617 case MVT::i32: 3618 case MVT::f32: 3619 VecArgOffset += 4; 3620 break; 3621 case MVT::i64: // PPC64 3622 case MVT::f64: 3623 // FIXME: We are guaranteed to be !isPPC64 at this point. 3624 // Does MVT::i64 apply? 3625 VecArgOffset += 8; 3626 break; 3627 case MVT::v4f32: 3628 case MVT::v4i32: 3629 case MVT::v8i16: 3630 case MVT::v16i8: 3631 // Nothing to do, we're only looking at Nonvector args here. 3632 break; 3633 } 3634 } 3635 } 3636 // We've found where the vector parameter area in memory is. Skip the 3637 // first 12 parameters; these don't use that memory. 3638 VecArgOffset = ((VecArgOffset+15)/16)*16; 3639 VecArgOffset += 12*16; 3640 3641 // Add DAG nodes to load the arguments or copy them out of registers. On 3642 // entry to a function on PPC, the arguments start after the linkage area, 3643 // although the first ones are often in registers. 3644 3645 SmallVector<SDValue, 8> MemOps; 3646 unsigned nAltivecParamsAtEnd = 0; 3647 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3648 unsigned CurArgIdx = 0; 3649 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3650 SDValue ArgVal; 3651 bool needsLoad = false; 3652 EVT ObjectVT = Ins[ArgNo].VT; 3653 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3654 unsigned ArgSize = ObjSize; 3655 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3656 if (Ins[ArgNo].isOrigArg()) { 3657 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3658 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3659 } 3660 unsigned CurArgOffset = ArgOffset; 3661 3662 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3663 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3664 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3665 if (isVarArg || isPPC64) { 3666 MinReservedArea = ((MinReservedArea+15)/16)*16; 3667 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3668 Flags, 3669 PtrByteSize); 3670 } else nAltivecParamsAtEnd++; 3671 } else 3672 // Calculate min reserved area. 3673 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3674 Flags, 3675 PtrByteSize); 3676 3677 // FIXME the codegen can be much improved in some cases. 3678 // We do not have to keep everything in memory. 3679 if (Flags.isByVal()) { 3680 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3681 3682 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3683 ObjSize = Flags.getByValSize(); 3684 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3685 // Objects of size 1 and 2 are right justified, everything else is 3686 // left justified. This means the memory address is adjusted forwards. 3687 if (ObjSize==1 || ObjSize==2) { 3688 CurArgOffset = CurArgOffset + (4 - ObjSize); 3689 } 3690 // The value of the object is its address. 3691 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 3692 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3693 InVals.push_back(FIN); 3694 if (ObjSize==1 || ObjSize==2) { 3695 if (GPR_idx != Num_GPR_Regs) { 3696 unsigned VReg; 3697 if (isPPC64) 3698 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3699 else 3700 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3701 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3702 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3703 SDValue Store = 3704 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3705 MachinePointerInfo(&*FuncArg), ObjType); 3706 MemOps.push_back(Store); 3707 ++GPR_idx; 3708 } 3709 3710 ArgOffset += PtrByteSize; 3711 3712 continue; 3713 } 3714 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3715 // Store whatever pieces of the object are in registers 3716 // to memory. ArgOffset will be the address of the beginning 3717 // of the object. 3718 if (GPR_idx != Num_GPR_Regs) { 3719 unsigned VReg; 3720 if (isPPC64) 3721 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3722 else 3723 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3724 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3725 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3726 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3727 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3728 MachinePointerInfo(&*FuncArg, j)); 3729 MemOps.push_back(Store); 3730 ++GPR_idx; 3731 ArgOffset += PtrByteSize; 3732 } else { 3733 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3734 break; 3735 } 3736 } 3737 continue; 3738 } 3739 3740 switch (ObjectVT.getSimpleVT().SimpleTy) { 3741 default: llvm_unreachable("Unhandled argument type!"); 3742 case MVT::i1: 3743 case MVT::i32: 3744 if (!isPPC64) { 3745 if (GPR_idx != Num_GPR_Regs) { 3746 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3747 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3748 3749 if (ObjectVT == MVT::i1) 3750 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3751 3752 ++GPR_idx; 3753 } else { 3754 needsLoad = true; 3755 ArgSize = PtrByteSize; 3756 } 3757 // All int arguments reserve stack space in the Darwin ABI. 3758 ArgOffset += PtrByteSize; 3759 break; 3760 } 3761 // FALLTHROUGH 3762 case MVT::i64: // PPC64 3763 if (GPR_idx != Num_GPR_Regs) { 3764 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3765 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3766 3767 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3768 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3769 // value to MVT::i64 and then truncate to the correct register size. 3770 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3771 3772 ++GPR_idx; 3773 } else { 3774 needsLoad = true; 3775 ArgSize = PtrByteSize; 3776 } 3777 // All int arguments reserve stack space in the Darwin ABI. 3778 ArgOffset += 8; 3779 break; 3780 3781 case MVT::f32: 3782 case MVT::f64: 3783 // Every 4 bytes of argument space consumes one of the GPRs available for 3784 // argument passing. 3785 if (GPR_idx != Num_GPR_Regs) { 3786 ++GPR_idx; 3787 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3788 ++GPR_idx; 3789 } 3790 if (FPR_idx != Num_FPR_Regs) { 3791 unsigned VReg; 3792 3793 if (ObjectVT == MVT::f32) 3794 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3795 else 3796 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3797 3798 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3799 ++FPR_idx; 3800 } else { 3801 needsLoad = true; 3802 } 3803 3804 // All FP arguments reserve stack space in the Darwin ABI. 3805 ArgOffset += isPPC64 ? 8 : ObjSize; 3806 break; 3807 case MVT::v4f32: 3808 case MVT::v4i32: 3809 case MVT::v8i16: 3810 case MVT::v16i8: 3811 // Note that vector arguments in registers don't reserve stack space, 3812 // except in varargs functions. 3813 if (VR_idx != Num_VR_Regs) { 3814 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3815 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3816 if (isVarArg) { 3817 while ((ArgOffset % 16) != 0) { 3818 ArgOffset += PtrByteSize; 3819 if (GPR_idx != Num_GPR_Regs) 3820 GPR_idx++; 3821 } 3822 ArgOffset += 16; 3823 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3824 } 3825 ++VR_idx; 3826 } else { 3827 if (!isVarArg && !isPPC64) { 3828 // Vectors go after all the nonvectors. 3829 CurArgOffset = VecArgOffset; 3830 VecArgOffset += 16; 3831 } else { 3832 // Vectors are aligned. 3833 ArgOffset = ((ArgOffset+15)/16)*16; 3834 CurArgOffset = ArgOffset; 3835 ArgOffset += 16; 3836 } 3837 needsLoad = true; 3838 } 3839 break; 3840 } 3841 3842 // We need to load the argument to a virtual register if we determined above 3843 // that we ran out of physical registers of the appropriate type. 3844 if (needsLoad) { 3845 int FI = MFI.CreateFixedObject(ObjSize, 3846 CurArgOffset + (ArgSize - ObjSize), 3847 isImmutable); 3848 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3849 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 3850 } 3851 3852 InVals.push_back(ArgVal); 3853 } 3854 3855 // Allow for Altivec parameters at the end, if needed. 3856 if (nAltivecParamsAtEnd) { 3857 MinReservedArea = ((MinReservedArea+15)/16)*16; 3858 MinReservedArea += 16*nAltivecParamsAtEnd; 3859 } 3860 3861 // Area that is at least reserved in the caller of this function. 3862 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3863 3864 // Set the size that is at least reserved in caller of this function. Tail 3865 // call optimized functions' reserved stack space needs to be aligned so that 3866 // taking the difference between two stack areas will result in an aligned 3867 // stack. 3868 MinReservedArea = 3869 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3870 FuncInfo->setMinReservedArea(MinReservedArea); 3871 3872 // If the function takes variable number of arguments, make a frame index for 3873 // the start of the first vararg value... for expansion of llvm.va_start. 3874 if (isVarArg) { 3875 int Depth = ArgOffset; 3876 3877 FuncInfo->setVarArgsFrameIndex( 3878 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3879 Depth, true)); 3880 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3881 3882 // If this function is vararg, store any remaining integer argument regs 3883 // to their spots on the stack so that they may be loaded by dereferencing 3884 // the result of va_next. 3885 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3886 unsigned VReg; 3887 3888 if (isPPC64) 3889 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3890 else 3891 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3892 3893 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3894 SDValue Store = 3895 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3896 MemOps.push_back(Store); 3897 // Increment the address by four for the next argument to store 3898 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3899 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3900 } 3901 } 3902 3903 if (!MemOps.empty()) 3904 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3905 3906 return Chain; 3907 } 3908 3909 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3910 /// adjusted to accommodate the arguments for the tailcall. 3911 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3912 unsigned ParamSize) { 3913 3914 if (!isTailCall) return 0; 3915 3916 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3917 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3918 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3919 // Remember only if the new adjustement is bigger. 3920 if (SPDiff < FI->getTailCallSPDelta()) 3921 FI->setTailCallSPDelta(SPDiff); 3922 3923 return SPDiff; 3924 } 3925 3926 static bool isFunctionGlobalAddress(SDValue Callee); 3927 3928 static bool 3929 resideInSameModule(SDValue Callee, Reloc::Model RelMod) { 3930 // If !G, Callee can be an external symbol. 3931 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 3932 if (!G) return false; 3933 3934 const GlobalValue *GV = G->getGlobal(); 3935 3936 if (GV->isDeclaration()) return false; 3937 3938 switch(GV->getLinkage()) { 3939 default: llvm_unreachable("unknow linkage type"); 3940 case GlobalValue::AvailableExternallyLinkage: 3941 case GlobalValue::ExternalWeakLinkage: 3942 return false; 3943 3944 // Callee with weak linkage is allowed if it has hidden or protected 3945 // visibility 3946 case GlobalValue::LinkOnceAnyLinkage: 3947 case GlobalValue::LinkOnceODRLinkage: // e.g. c++ inline functions 3948 case GlobalValue::WeakAnyLinkage: 3949 case GlobalValue::WeakODRLinkage: // e.g. c++ template instantiation 3950 if (GV->hasDefaultVisibility()) 3951 return false; 3952 3953 case GlobalValue::ExternalLinkage: 3954 case GlobalValue::InternalLinkage: 3955 case GlobalValue::PrivateLinkage: 3956 break; 3957 } 3958 3959 // With '-fPIC', calling default visiblity function need insert 'nop' after 3960 // function call, no matter that function resides in same module or not, so 3961 // we treat it as in different module. 3962 if (RelMod == Reloc::PIC_ && GV->hasDefaultVisibility()) 3963 return false; 3964 3965 return true; 3966 } 3967 3968 static bool 3969 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 3970 const SmallVectorImpl<ISD::OutputArg> &Outs) { 3971 assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); 3972 3973 const unsigned PtrByteSize = 8; 3974 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3975 3976 static const MCPhysReg GPR[] = { 3977 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3978 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3979 }; 3980 static const MCPhysReg VR[] = { 3981 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3982 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3983 }; 3984 3985 const unsigned NumGPRs = array_lengthof(GPR); 3986 const unsigned NumFPRs = 13; 3987 const unsigned NumVRs = array_lengthof(VR); 3988 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 3989 3990 unsigned NumBytes = LinkageSize; 3991 unsigned AvailableFPRs = NumFPRs; 3992 unsigned AvailableVRs = NumVRs; 3993 3994 for (const ISD::OutputArg& Param : Outs) { 3995 if (Param.Flags.isNest()) continue; 3996 3997 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 3998 PtrByteSize, LinkageSize, ParamAreaSize, 3999 NumBytes, AvailableFPRs, AvailableVRs, 4000 Subtarget.hasQPX())) 4001 return true; 4002 } 4003 return false; 4004 } 4005 4006 static bool 4007 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS) { 4008 if (CS->arg_size() != CallerFn->getArgumentList().size()) 4009 return false; 4010 4011 ImmutableCallSite::arg_iterator CalleeArgIter = CS->arg_begin(); 4012 ImmutableCallSite::arg_iterator CalleeArgEnd = CS->arg_end(); 4013 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4014 4015 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4016 const Value* CalleeArg = *CalleeArgIter; 4017 const Value* CallerArg = &(*CallerArgIter); 4018 if (CalleeArg == CallerArg) 4019 continue; 4020 4021 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4022 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4023 // } 4024 // 1st argument of callee is undef and has the same type as caller. 4025 if (CalleeArg->getType() == CallerArg->getType() && 4026 isa<UndefValue>(CalleeArg)) 4027 continue; 4028 4029 return false; 4030 } 4031 4032 return true; 4033 } 4034 4035 bool 4036 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4037 SDValue Callee, 4038 CallingConv::ID CalleeCC, 4039 ImmutableCallSite *CS, 4040 bool isVarArg, 4041 const SmallVectorImpl<ISD::OutputArg> &Outs, 4042 const SmallVectorImpl<ISD::InputArg> &Ins, 4043 SelectionDAG& DAG) const { 4044 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4045 4046 if (DisableSCO && !TailCallOpt) return false; 4047 4048 // Variadic argument functions are not supported. 4049 if (isVarArg) return false; 4050 4051 MachineFunction &MF = DAG.getMachineFunction(); 4052 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4053 4054 // Tail or Sibling call optimization (TCO/SCO) needs callee and caller has 4055 // the same calling convention 4056 if (CallerCC != CalleeCC) return false; 4057 4058 // SCO support C calling convention 4059 if (CalleeCC != CallingConv::Fast && CalleeCC != CallingConv::C) 4060 return false; 4061 4062 // Functions containing by val parameters are not supported. 4063 if (std::any_of(Ins.begin(), Ins.end(), 4064 [](const ISD::InputArg& IA) { return IA.Flags.isByVal(); })) 4065 return false; 4066 4067 // No TCO/SCO on indirect call because Caller have to restore its TOC 4068 if (!isFunctionGlobalAddress(Callee) && 4069 !isa<ExternalSymbolSDNode>(Callee)) 4070 return false; 4071 4072 // Check if Callee resides in the same module, because for now, PPC64 SVR4 ABI 4073 // (ELFv1/ELFv2) doesn't allow tail calls to a symbol resides in another 4074 // module. 4075 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4076 if (!resideInSameModule(Callee, getTargetMachine().getRelocationModel())) 4077 return false; 4078 4079 // TCO allows altering callee ABI, so we don't have to check further. 4080 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4081 return true; 4082 4083 if (DisableSCO) return false; 4084 4085 // If callee use the same argument list that caller is using, then we can 4086 // apply SCO on this case. If it is not, then we need to check if callee needs 4087 // stack for passing arguments. 4088 if (!hasSameArgumentList(MF.getFunction(), CS) && 4089 needStackSlotPassParameters(Subtarget, Outs)) { 4090 return false; 4091 } 4092 4093 return true; 4094 } 4095 4096 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4097 /// for tail call optimization. Targets which want to do tail call 4098 /// optimization should implement this function. 4099 bool 4100 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4101 CallingConv::ID CalleeCC, 4102 bool isVarArg, 4103 const SmallVectorImpl<ISD::InputArg> &Ins, 4104 SelectionDAG& DAG) const { 4105 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4106 return false; 4107 4108 // Variable argument functions are not supported. 4109 if (isVarArg) 4110 return false; 4111 4112 MachineFunction &MF = DAG.getMachineFunction(); 4113 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 4114 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4115 // Functions containing by val parameters are not supported. 4116 for (unsigned i = 0; i != Ins.size(); i++) { 4117 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4118 if (Flags.isByVal()) return false; 4119 } 4120 4121 // Non-PIC/GOT tail calls are supported. 4122 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4123 return true; 4124 4125 // At the moment we can only do local tail calls (in same module, hidden 4126 // or protected) if we are generating PIC. 4127 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4128 return G->getGlobal()->hasHiddenVisibility() 4129 || G->getGlobal()->hasProtectedVisibility(); 4130 } 4131 4132 return false; 4133 } 4134 4135 /// isCallCompatibleAddress - Return the immediate to use if the specified 4136 /// 32-bit value is representable in the immediate field of a BxA instruction. 4137 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4138 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4139 if (!C) return nullptr; 4140 4141 int Addr = C->getZExtValue(); 4142 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4143 SignExtend32<26>(Addr) != Addr) 4144 return nullptr; // Top 6 bits have to be sext of immediate. 4145 4146 return DAG 4147 .getConstant( 4148 (int)C->getZExtValue() >> 2, SDLoc(Op), 4149 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4150 .getNode(); 4151 } 4152 4153 namespace { 4154 4155 struct TailCallArgumentInfo { 4156 SDValue Arg; 4157 SDValue FrameIdxOp; 4158 int FrameIdx; 4159 4160 TailCallArgumentInfo() : FrameIdx(0) {} 4161 }; 4162 } 4163 4164 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4165 static void StoreTailCallArgumentsToStackSlot( 4166 SelectionDAG &DAG, SDValue Chain, 4167 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4168 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4169 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4170 SDValue Arg = TailCallArgs[i].Arg; 4171 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4172 int FI = TailCallArgs[i].FrameIdx; 4173 // Store relative to framepointer. 4174 MemOpChains.push_back(DAG.getStore( 4175 Chain, dl, Arg, FIN, 4176 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4177 } 4178 } 4179 4180 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4181 /// the appropriate stack slot for the tail call optimized function call. 4182 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4183 SDValue OldRetAddr, SDValue OldFP, 4184 int SPDiff, const SDLoc &dl) { 4185 if (SPDiff) { 4186 // Calculate the new stack slot for the return address. 4187 MachineFunction &MF = DAG.getMachineFunction(); 4188 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4189 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4190 bool isPPC64 = Subtarget.isPPC64(); 4191 int SlotSize = isPPC64 ? 8 : 4; 4192 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4193 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4194 NewRetAddrLoc, true); 4195 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4196 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4197 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4198 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4199 4200 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4201 // slot as the FP is never overwritten. 4202 if (Subtarget.isDarwinABI()) { 4203 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4204 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4205 true); 4206 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4207 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4208 MachinePointerInfo::getFixedStack( 4209 DAG.getMachineFunction(), NewFPIdx)); 4210 } 4211 } 4212 return Chain; 4213 } 4214 4215 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4216 /// the position of the argument. 4217 static void 4218 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4219 SDValue Arg, int SPDiff, unsigned ArgOffset, 4220 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4221 int Offset = ArgOffset + SPDiff; 4222 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 4223 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4224 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4225 SDValue FIN = DAG.getFrameIndex(FI, VT); 4226 TailCallArgumentInfo Info; 4227 Info.Arg = Arg; 4228 Info.FrameIdxOp = FIN; 4229 Info.FrameIdx = FI; 4230 TailCallArguments.push_back(Info); 4231 } 4232 4233 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4234 /// stack slot. Returns the chain as result and the loaded frame pointers in 4235 /// LROpOut/FPOpout. Used when tail calling. 4236 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4237 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4238 SDValue &FPOpOut, const SDLoc &dl) const { 4239 if (SPDiff) { 4240 // Load the LR and FP stack slot for later adjusting. 4241 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4242 LROpOut = getReturnAddrFrameIndex(DAG); 4243 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4244 Chain = SDValue(LROpOut.getNode(), 1); 4245 4246 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4247 // slot as the FP is never overwritten. 4248 if (Subtarget.isDarwinABI()) { 4249 FPOpOut = getFramePointerFrameIndex(DAG); 4250 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4251 Chain = SDValue(FPOpOut.getNode(), 1); 4252 } 4253 } 4254 return Chain; 4255 } 4256 4257 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4258 /// by "Src" to address "Dst" of size "Size". Alignment information is 4259 /// specified by the specific parameter attribute. The copy will be passed as 4260 /// a byval function parameter. 4261 /// Sometimes what we are copying is the end of a larger object, the part that 4262 /// does not fit in registers. 4263 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4264 SDValue Chain, ISD::ArgFlagsTy Flags, 4265 SelectionDAG &DAG, const SDLoc &dl) { 4266 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4267 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4268 false, false, false, MachinePointerInfo(), 4269 MachinePointerInfo()); 4270 } 4271 4272 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4273 /// tail calls. 4274 static void LowerMemOpCallTo( 4275 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4276 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4277 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4278 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4279 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4280 if (!isTailCall) { 4281 if (isVector) { 4282 SDValue StackPtr; 4283 if (isPPC64) 4284 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4285 else 4286 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4287 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4288 DAG.getConstant(ArgOffset, dl, PtrVT)); 4289 } 4290 MemOpChains.push_back( 4291 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4292 // Calculate and remember argument location. 4293 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4294 TailCallArguments); 4295 } 4296 4297 static void 4298 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4299 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4300 SDValue FPOp, 4301 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4302 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4303 // might overwrite each other in case of tail call optimization. 4304 SmallVector<SDValue, 8> MemOpChains2; 4305 // Do not flag preceding copytoreg stuff together with the following stuff. 4306 InFlag = SDValue(); 4307 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4308 MemOpChains2, dl); 4309 if (!MemOpChains2.empty()) 4310 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4311 4312 // Store the return address to the appropriate stack slot. 4313 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4314 4315 // Emit callseq_end just before tailcall node. 4316 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4317 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4318 InFlag = Chain.getValue(1); 4319 } 4320 4321 // Is this global address that of a function that can be called by name? (as 4322 // opposed to something that must hold a descriptor for an indirect call). 4323 static bool isFunctionGlobalAddress(SDValue Callee) { 4324 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4325 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4326 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4327 return false; 4328 4329 return G->getGlobal()->getValueType()->isFunctionTy(); 4330 } 4331 4332 return false; 4333 } 4334 4335 static unsigned 4336 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, 4337 SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, 4338 bool isPatchPoint, bool hasNest, 4339 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 4340 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4341 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4342 4343 bool isPPC64 = Subtarget.isPPC64(); 4344 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4345 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4346 4347 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4348 NodeTys.push_back(MVT::Other); // Returns a chain 4349 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4350 4351 unsigned CallOpc = PPCISD::CALL; 4352 4353 bool needIndirectCall = true; 4354 if (!isSVR4ABI || !isPPC64) 4355 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4356 // If this is an absolute destination address, use the munged value. 4357 Callee = SDValue(Dest, 0); 4358 needIndirectCall = false; 4359 } 4360 4361 // PC-relative references to external symbols should go through $stub, unless 4362 // we're building with the leopard linker or later, which automatically 4363 // synthesizes these stubs. 4364 const TargetMachine &TM = DAG.getTarget(); 4365 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent(); 4366 const GlobalValue *GV = nullptr; 4367 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4368 GV = G->getGlobal(); 4369 bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); 4370 bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64; 4371 4372 if (isFunctionGlobalAddress(Callee)) { 4373 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4374 // A call to a TLS address is actually an indirect call to a 4375 // thread-specific pointer. 4376 unsigned OpFlags = 0; 4377 if (UsePlt) 4378 OpFlags = PPCII::MO_PLT; 4379 4380 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4381 // every direct call is) turn it into a TargetGlobalAddress / 4382 // TargetExternalSymbol node so that legalize doesn't hack it. 4383 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4384 Callee.getValueType(), 0, OpFlags); 4385 needIndirectCall = false; 4386 } 4387 4388 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4389 unsigned char OpFlags = 0; 4390 4391 if (UsePlt) 4392 OpFlags = PPCII::MO_PLT; 4393 4394 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4395 OpFlags); 4396 needIndirectCall = false; 4397 } 4398 4399 if (isPatchPoint) { 4400 // We'll form an invalid direct call when lowering a patchpoint; the full 4401 // sequence for an indirect call is complicated, and many of the 4402 // instructions introduced might have side effects (and, thus, can't be 4403 // removed later). The call itself will be removed as soon as the 4404 // argument/return lowering is complete, so the fact that it has the wrong 4405 // kind of operands should not really matter. 4406 needIndirectCall = false; 4407 } 4408 4409 if (needIndirectCall) { 4410 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4411 // to do the call, we can't use PPCISD::CALL. 4412 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4413 4414 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4415 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4416 // entry point, but to the function descriptor (the function entry point 4417 // address is part of the function descriptor though). 4418 // The function descriptor is a three doubleword structure with the 4419 // following fields: function entry point, TOC base address and 4420 // environment pointer. 4421 // Thus for a call through a function pointer, the following actions need 4422 // to be performed: 4423 // 1. Save the TOC of the caller in the TOC save area of its stack 4424 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4425 // 2. Load the address of the function entry point from the function 4426 // descriptor. 4427 // 3. Load the TOC of the callee from the function descriptor into r2. 4428 // 4. Load the environment pointer from the function descriptor into 4429 // r11. 4430 // 5. Branch to the function entry point address. 4431 // 6. On return of the callee, the TOC of the caller needs to be 4432 // restored (this is done in FinishCall()). 4433 // 4434 // The loads are scheduled at the beginning of the call sequence, and the 4435 // register copies are flagged together to ensure that no other 4436 // operations can be scheduled in between. E.g. without flagging the 4437 // copies together, a TOC access in the caller could be scheduled between 4438 // the assignment of the callee TOC and the branch to the callee, which 4439 // results in the TOC access going through the TOC of the callee instead 4440 // of going through the TOC of the caller, which leads to incorrect code. 4441 4442 // Load the address of the function entry point from the function 4443 // descriptor. 4444 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4445 if (LDChain.getValueType() == MVT::Glue) 4446 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4447 4448 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 4449 ? MachineMemOperand::MOInvariant 4450 : MachineMemOperand::MONone; 4451 4452 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4453 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4454 /* Alignment = */ 8, MMOFlags); 4455 4456 // Load environment pointer into r11. 4457 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4458 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4459 SDValue LoadEnvPtr = 4460 DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16), 4461 /* Alignment = */ 8, MMOFlags); 4462 4463 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4464 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4465 SDValue TOCPtr = 4466 DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8), 4467 /* Alignment = */ 8, MMOFlags); 4468 4469 setUsesTOCBasePtr(DAG); 4470 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4471 InFlag); 4472 Chain = TOCVal.getValue(0); 4473 InFlag = TOCVal.getValue(1); 4474 4475 // If the function call has an explicit 'nest' parameter, it takes the 4476 // place of the environment pointer. 4477 if (!hasNest) { 4478 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4479 InFlag); 4480 4481 Chain = EnvVal.getValue(0); 4482 InFlag = EnvVal.getValue(1); 4483 } 4484 4485 MTCTROps[0] = Chain; 4486 MTCTROps[1] = LoadFuncPtr; 4487 MTCTROps[2] = InFlag; 4488 } 4489 4490 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4491 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4492 InFlag = Chain.getValue(1); 4493 4494 NodeTys.clear(); 4495 NodeTys.push_back(MVT::Other); 4496 NodeTys.push_back(MVT::Glue); 4497 Ops.push_back(Chain); 4498 CallOpc = PPCISD::BCTRL; 4499 Callee.setNode(nullptr); 4500 // Add use of X11 (holding environment pointer) 4501 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4502 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4503 // Add CTR register as callee so a bctr can be emitted later. 4504 if (isTailCall) 4505 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4506 } 4507 4508 // If this is a direct call, pass the chain and the callee. 4509 if (Callee.getNode()) { 4510 Ops.push_back(Chain); 4511 Ops.push_back(Callee); 4512 } 4513 // If this is a tail call add stack pointer delta. 4514 if (isTailCall) 4515 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4516 4517 // Add argument registers to the end of the list so that they are known live 4518 // into the call. 4519 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4520 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4521 RegsToPass[i].second.getValueType())); 4522 4523 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4524 // into the call. 4525 if (isSVR4ABI && isPPC64 && !isPatchPoint) { 4526 setUsesTOCBasePtr(DAG); 4527 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4528 } 4529 4530 return CallOpc; 4531 } 4532 4533 static 4534 bool isLocalCall(const SDValue &Callee) 4535 { 4536 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4537 return G->getGlobal()->isStrongDefinitionForLinker(); 4538 return false; 4539 } 4540 4541 SDValue PPCTargetLowering::LowerCallResult( 4542 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 4543 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4544 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4545 4546 SmallVector<CCValAssign, 16> RVLocs; 4547 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4548 *DAG.getContext()); 4549 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4550 4551 // Copy all of the result registers out of their specified physreg. 4552 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4553 CCValAssign &VA = RVLocs[i]; 4554 assert(VA.isRegLoc() && "Can only return in registers!"); 4555 4556 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4557 VA.getLocReg(), VA.getLocVT(), InFlag); 4558 Chain = Val.getValue(1); 4559 InFlag = Val.getValue(2); 4560 4561 switch (VA.getLocInfo()) { 4562 default: llvm_unreachable("Unknown loc info!"); 4563 case CCValAssign::Full: break; 4564 case CCValAssign::AExt: 4565 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4566 break; 4567 case CCValAssign::ZExt: 4568 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4569 DAG.getValueType(VA.getValVT())); 4570 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4571 break; 4572 case CCValAssign::SExt: 4573 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4574 DAG.getValueType(VA.getValVT())); 4575 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4576 break; 4577 } 4578 4579 InVals.push_back(Val); 4580 } 4581 4582 return Chain; 4583 } 4584 4585 SDValue PPCTargetLowering::FinishCall( 4586 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 4587 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 4588 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag, 4589 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 4590 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 4591 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite *CS) const { 4592 4593 std::vector<EVT> NodeTys; 4594 SmallVector<SDValue, 8> Ops; 4595 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4596 SPDiff, isTailCall, isPatchPoint, hasNest, 4597 RegsToPass, Ops, NodeTys, CS, Subtarget); 4598 4599 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4600 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4601 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4602 4603 // When performing tail call optimization the callee pops its arguments off 4604 // the stack. Account for this here so these bytes can be pushed back on in 4605 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4606 int BytesCalleePops = 4607 (CallConv == CallingConv::Fast && 4608 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4609 4610 // Add a register mask operand representing the call-preserved registers. 4611 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4612 const uint32_t *Mask = 4613 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4614 assert(Mask && "Missing call preserved mask for calling convention"); 4615 Ops.push_back(DAG.getRegisterMask(Mask)); 4616 4617 if (InFlag.getNode()) 4618 Ops.push_back(InFlag); 4619 4620 // Emit tail call. 4621 if (isTailCall) { 4622 assert(((Callee.getOpcode() == ISD::Register && 4623 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4624 Callee.getOpcode() == ISD::TargetExternalSymbol || 4625 Callee.getOpcode() == ISD::TargetGlobalAddress || 4626 isa<ConstantSDNode>(Callee)) && 4627 "Expecting an global address, external symbol, absolute value or register"); 4628 4629 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 4630 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4631 } 4632 4633 // Add a NOP immediately after the branch instruction when using the 64-bit 4634 // SVR4 ABI. At link time, if caller and callee are in a different module and 4635 // thus have a different TOC, the call will be replaced with a call to a stub 4636 // function which saves the current TOC, loads the TOC of the callee and 4637 // branches to the callee. The NOP will be replaced with a load instruction 4638 // which restores the TOC of the caller from the TOC save slot of the current 4639 // stack frame. If caller and callee belong to the same module (and have the 4640 // same TOC), the NOP will remain unchanged. 4641 4642 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4643 !isPatchPoint) { 4644 if (CallOpc == PPCISD::BCTRL) { 4645 // This is a call through a function pointer. 4646 // Restore the caller TOC from the save area into R2. 4647 // See PrepareCall() for more information about calls through function 4648 // pointers in the 64-bit SVR4 ABI. 4649 // We are using a target-specific load with r2 hard coded, because the 4650 // result of a target-independent load would never go directly into r2, 4651 // since r2 is a reserved register (which prevents the register allocator 4652 // from allocating it), resulting in an additional register being 4653 // allocated and an unnecessary move instruction being generated. 4654 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4655 4656 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 4657 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4658 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4659 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4660 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4661 4662 // The address needs to go after the chain input but before the flag (or 4663 // any other variadic arguments). 4664 Ops.insert(std::next(Ops.begin()), AddTOC); 4665 } else if ((CallOpc == PPCISD::CALL) && 4666 (!isLocalCall(Callee) || 4667 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) 4668 // Otherwise insert NOP for non-local calls. 4669 CallOpc = PPCISD::CALL_NOP; 4670 } 4671 4672 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4673 InFlag = Chain.getValue(1); 4674 4675 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4676 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4677 InFlag, dl); 4678 if (!Ins.empty()) 4679 InFlag = Chain.getValue(1); 4680 4681 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4682 Ins, dl, DAG, InVals); 4683 } 4684 4685 SDValue 4686 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4687 SmallVectorImpl<SDValue> &InVals) const { 4688 SelectionDAG &DAG = CLI.DAG; 4689 SDLoc &dl = CLI.DL; 4690 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4691 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4692 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4693 SDValue Chain = CLI.Chain; 4694 SDValue Callee = CLI.Callee; 4695 bool &isTailCall = CLI.IsTailCall; 4696 CallingConv::ID CallConv = CLI.CallConv; 4697 bool isVarArg = CLI.IsVarArg; 4698 bool isPatchPoint = CLI.IsPatchPoint; 4699 ImmutableCallSite *CS = CLI.CS; 4700 4701 if (isTailCall) { 4702 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 4703 isTailCall = 4704 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 4705 isVarArg, Outs, Ins, DAG); 4706 else 4707 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4708 Ins, DAG); 4709 if (isTailCall) { 4710 ++NumTailCalls; 4711 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4712 ++NumSiblingCalls; 4713 4714 assert(isa<GlobalAddressSDNode>(Callee) && 4715 "Callee should be an llvm::Function object."); 4716 DEBUG( 4717 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 4718 const unsigned Width = 80 - strlen("TCO caller: ") 4719 - strlen(", callee linkage: 0, 0"); 4720 dbgs() << "TCO caller: " 4721 << left_justify(DAG.getMachineFunction().getName(), Width) 4722 << ", callee linkage: " 4723 << GV->getVisibility() << ", " << GV->getLinkage() << "\n" 4724 ); 4725 } 4726 } 4727 4728 if (!isTailCall && CS && CS->isMustTailCall()) 4729 report_fatal_error("failed to perform tail call elimination on a call " 4730 "site marked musttail"); 4731 4732 if (Subtarget.isSVR4ABI()) { 4733 if (Subtarget.isPPC64()) 4734 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4735 isTailCall, isPatchPoint, Outs, OutVals, Ins, 4736 dl, DAG, InVals, CS); 4737 else 4738 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4739 isTailCall, isPatchPoint, Outs, OutVals, Ins, 4740 dl, DAG, InVals, CS); 4741 } 4742 4743 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4744 isTailCall, isPatchPoint, Outs, OutVals, Ins, 4745 dl, DAG, InVals, CS); 4746 } 4747 4748 SDValue PPCTargetLowering::LowerCall_32SVR4( 4749 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 4750 bool isTailCall, bool isPatchPoint, 4751 const SmallVectorImpl<ISD::OutputArg> &Outs, 4752 const SmallVectorImpl<SDValue> &OutVals, 4753 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4754 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 4755 ImmutableCallSite *CS) const { 4756 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4757 // of the 32-bit SVR4 ABI stack frame layout. 4758 4759 assert((CallConv == CallingConv::C || 4760 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4761 4762 unsigned PtrByteSize = 4; 4763 4764 MachineFunction &MF = DAG.getMachineFunction(); 4765 4766 // Mark this function as potentially containing a function that contains a 4767 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4768 // and restoring the callers stack pointer in this functions epilog. This is 4769 // done because by tail calling the called function might overwrite the value 4770 // in this function's (MF) stack pointer stack slot 0(SP). 4771 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4772 CallConv == CallingConv::Fast) 4773 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4774 4775 // Count how many bytes are to be pushed on the stack, including the linkage 4776 // area, parameter list area and the part of the local variable space which 4777 // contains copies of aggregates which are passed by value. 4778 4779 // Assign locations to all of the outgoing arguments. 4780 SmallVector<CCValAssign, 16> ArgLocs; 4781 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 4782 4783 // Reserve space for the linkage area on the stack. 4784 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4785 PtrByteSize); 4786 if (useSoftFloat()) 4787 CCInfo.PreAnalyzeCallOperands(Outs); 4788 4789 if (isVarArg) { 4790 // Handle fixed and variable vector arguments differently. 4791 // Fixed vector arguments go into registers as long as registers are 4792 // available. Variable vector arguments always go into memory. 4793 unsigned NumArgs = Outs.size(); 4794 4795 for (unsigned i = 0; i != NumArgs; ++i) { 4796 MVT ArgVT = Outs[i].VT; 4797 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4798 bool Result; 4799 4800 if (Outs[i].IsFixed) { 4801 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4802 CCInfo); 4803 } else { 4804 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4805 ArgFlags, CCInfo); 4806 } 4807 4808 if (Result) { 4809 #ifndef NDEBUG 4810 errs() << "Call operand #" << i << " has unhandled type " 4811 << EVT(ArgVT).getEVTString() << "\n"; 4812 #endif 4813 llvm_unreachable(nullptr); 4814 } 4815 } 4816 } else { 4817 // All arguments are treated the same. 4818 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4819 } 4820 CCInfo.clearWasPPCF128(); 4821 4822 // Assign locations to all of the outgoing aggregate by value arguments. 4823 SmallVector<CCValAssign, 16> ByValArgLocs; 4824 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 4825 4826 // Reserve stack space for the allocations in CCInfo. 4827 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4828 4829 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4830 4831 // Size of the linkage area, parameter list area and the part of the local 4832 // space variable where copies of aggregates which are passed by value are 4833 // stored. 4834 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4835 4836 // Calculate by how many bytes the stack has to be adjusted in case of tail 4837 // call optimization. 4838 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4839 4840 // Adjust the stack pointer for the new arguments... 4841 // These operations are automatically eliminated by the prolog/epilog pass 4842 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4843 dl); 4844 SDValue CallSeqStart = Chain; 4845 4846 // Load the return address and frame pointer so it can be moved somewhere else 4847 // later. 4848 SDValue LROp, FPOp; 4849 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 4850 4851 // Set up a copy of the stack pointer for use loading and storing any 4852 // arguments that may not fit in the registers available for argument 4853 // passing. 4854 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4855 4856 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4857 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4858 SmallVector<SDValue, 8> MemOpChains; 4859 4860 bool seenFloatArg = false; 4861 // Walk the register/memloc assignments, inserting copies/loads. 4862 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4863 i != e; 4864 ++i) { 4865 CCValAssign &VA = ArgLocs[i]; 4866 SDValue Arg = OutVals[i]; 4867 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4868 4869 if (Flags.isByVal()) { 4870 // Argument is an aggregate which is passed by value, thus we need to 4871 // create a copy of it in the local variable space of the current stack 4872 // frame (which is the stack frame of the caller) and pass the address of 4873 // this copy to the callee. 4874 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4875 CCValAssign &ByValVA = ByValArgLocs[j++]; 4876 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4877 4878 // Memory reserved in the local variable space of the callers stack frame. 4879 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4880 4881 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4882 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4883 StackPtr, PtrOff); 4884 4885 // Create a copy of the argument in the local area of the current 4886 // stack frame. 4887 SDValue MemcpyCall = 4888 CreateCopyOfByValArgument(Arg, PtrOff, 4889 CallSeqStart.getNode()->getOperand(0), 4890 Flags, DAG, dl); 4891 4892 // This must go outside the CALLSEQ_START..END. 4893 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4894 CallSeqStart.getNode()->getOperand(1), 4895 SDLoc(MemcpyCall)); 4896 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4897 NewCallSeqStart.getNode()); 4898 Chain = CallSeqStart = NewCallSeqStart; 4899 4900 // Pass the address of the aggregate copy on the stack either in a 4901 // physical register or in the parameter list area of the current stack 4902 // frame to the callee. 4903 Arg = PtrOff; 4904 } 4905 4906 if (VA.isRegLoc()) { 4907 if (Arg.getValueType() == MVT::i1) 4908 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4909 4910 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4911 // Put argument in a physical register. 4912 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4913 } else { 4914 // Put argument in the parameter list area of the current stack frame. 4915 assert(VA.isMemLoc()); 4916 unsigned LocMemOffset = VA.getLocMemOffset(); 4917 4918 if (!isTailCall) { 4919 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4920 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4921 StackPtr, PtrOff); 4922 4923 MemOpChains.push_back( 4924 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4925 } else { 4926 // Calculate and remember argument location. 4927 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4928 TailCallArguments); 4929 } 4930 } 4931 } 4932 4933 if (!MemOpChains.empty()) 4934 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4935 4936 // Build a sequence of copy-to-reg nodes chained together with token chain 4937 // and flag operands which copy the outgoing args into the appropriate regs. 4938 SDValue InFlag; 4939 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4940 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4941 RegsToPass[i].second, InFlag); 4942 InFlag = Chain.getValue(1); 4943 } 4944 4945 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4946 // registers. 4947 if (isVarArg) { 4948 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4949 SDValue Ops[] = { Chain, InFlag }; 4950 4951 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4952 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4953 4954 InFlag = Chain.getValue(1); 4955 } 4956 4957 if (isTailCall) 4958 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 4959 TailCallArguments); 4960 4961 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 4962 /* unused except on PPC64 ELFv1 */ false, DAG, 4963 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 4964 NumBytes, Ins, InVals, CS); 4965 } 4966 4967 // Copy an argument into memory, being careful to do this outside the 4968 // call sequence for the call to which the argument belongs. 4969 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 4970 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 4971 SelectionDAG &DAG, const SDLoc &dl) const { 4972 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4973 CallSeqStart.getNode()->getOperand(0), 4974 Flags, DAG, dl); 4975 // The MEMCPY must go outside the CALLSEQ_START..END. 4976 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4977 CallSeqStart.getNode()->getOperand(1), 4978 SDLoc(MemcpyCall)); 4979 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4980 NewCallSeqStart.getNode()); 4981 return NewCallSeqStart; 4982 } 4983 4984 SDValue PPCTargetLowering::LowerCall_64SVR4( 4985 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 4986 bool isTailCall, bool isPatchPoint, 4987 const SmallVectorImpl<ISD::OutputArg> &Outs, 4988 const SmallVectorImpl<SDValue> &OutVals, 4989 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4990 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 4991 ImmutableCallSite *CS) const { 4992 4993 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4994 bool isLittleEndian = Subtarget.isLittleEndian(); 4995 unsigned NumOps = Outs.size(); 4996 bool hasNest = false; 4997 bool IsSibCall = false; 4998 4999 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5000 unsigned PtrByteSize = 8; 5001 5002 MachineFunction &MF = DAG.getMachineFunction(); 5003 5004 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5005 IsSibCall = true; 5006 5007 // Mark this function as potentially containing a function that contains a 5008 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5009 // and restoring the callers stack pointer in this functions epilog. This is 5010 // done because by tail calling the called function might overwrite the value 5011 // in this function's (MF) stack pointer stack slot 0(SP). 5012 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5013 CallConv == CallingConv::Fast) 5014 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5015 5016 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5017 "fastcc not supported on varargs functions"); 5018 5019 // Count how many bytes are to be pushed on the stack, including the linkage 5020 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5021 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5022 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5023 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5024 unsigned NumBytes = LinkageSize; 5025 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5026 unsigned &QFPR_idx = FPR_idx; 5027 5028 static const MCPhysReg GPR[] = { 5029 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5030 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5031 }; 5032 static const MCPhysReg VR[] = { 5033 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5034 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5035 }; 5036 static const MCPhysReg VSRH[] = { 5037 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 5038 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 5039 }; 5040 5041 const unsigned NumGPRs = array_lengthof(GPR); 5042 const unsigned NumFPRs = 13; 5043 const unsigned NumVRs = array_lengthof(VR); 5044 const unsigned NumQFPRs = NumFPRs; 5045 5046 // When using the fast calling convention, we don't provide backing for 5047 // arguments that will be in registers. 5048 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5049 5050 // Add up all the space actually used. 5051 for (unsigned i = 0; i != NumOps; ++i) { 5052 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5053 EVT ArgVT = Outs[i].VT; 5054 EVT OrigVT = Outs[i].ArgVT; 5055 5056 if (Flags.isNest()) 5057 continue; 5058 5059 if (CallConv == CallingConv::Fast) { 5060 if (Flags.isByVal()) 5061 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5062 else 5063 switch (ArgVT.getSimpleVT().SimpleTy) { 5064 default: llvm_unreachable("Unexpected ValueType for argument!"); 5065 case MVT::i1: 5066 case MVT::i32: 5067 case MVT::i64: 5068 if (++NumGPRsUsed <= NumGPRs) 5069 continue; 5070 break; 5071 case MVT::v4i32: 5072 case MVT::v8i16: 5073 case MVT::v16i8: 5074 case MVT::v2f64: 5075 case MVT::v2i64: 5076 case MVT::v1i128: 5077 if (++NumVRsUsed <= NumVRs) 5078 continue; 5079 break; 5080 case MVT::v4f32: 5081 // When using QPX, this is handled like a FP register, otherwise, it 5082 // is an Altivec register. 5083 if (Subtarget.hasQPX()) { 5084 if (++NumFPRsUsed <= NumFPRs) 5085 continue; 5086 } else { 5087 if (++NumVRsUsed <= NumVRs) 5088 continue; 5089 } 5090 break; 5091 case MVT::f32: 5092 case MVT::f64: 5093 case MVT::v4f64: // QPX 5094 case MVT::v4i1: // QPX 5095 if (++NumFPRsUsed <= NumFPRs) 5096 continue; 5097 break; 5098 } 5099 } 5100 5101 /* Respect alignment of argument on the stack. */ 5102 unsigned Align = 5103 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5104 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5105 5106 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5107 if (Flags.isInConsecutiveRegsLast()) 5108 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5109 } 5110 5111 unsigned NumBytesActuallyUsed = NumBytes; 5112 5113 // The prolog code of the callee may store up to 8 GPR argument registers to 5114 // the stack, allowing va_start to index over them in memory if its varargs. 5115 // Because we cannot tell if this is needed on the caller side, we have to 5116 // conservatively assume that it is needed. As such, make sure we have at 5117 // least enough stack space for the caller to store the 8 GPRs. 5118 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 5119 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5120 5121 // Tail call needs the stack to be aligned. 5122 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5123 CallConv == CallingConv::Fast) 5124 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5125 5126 int SPDiff = 0; 5127 5128 // Calculate by how many bytes the stack has to be adjusted in case of tail 5129 // call optimization. 5130 if (!IsSibCall) 5131 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5132 5133 // To protect arguments on the stack from being clobbered in a tail call, 5134 // force all the loads to happen before doing any other lowering. 5135 if (isTailCall) 5136 Chain = DAG.getStackArgumentTokenFactor(Chain); 5137 5138 // Adjust the stack pointer for the new arguments... 5139 // These operations are automatically eliminated by the prolog/epilog pass 5140 if (!IsSibCall) 5141 Chain = DAG.getCALLSEQ_START(Chain, 5142 DAG.getIntPtrConstant(NumBytes, dl, true), dl); 5143 SDValue CallSeqStart = Chain; 5144 5145 // Load the return address and frame pointer so it can be move somewhere else 5146 // later. 5147 SDValue LROp, FPOp; 5148 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5149 5150 // Set up a copy of the stack pointer for use loading and storing any 5151 // arguments that may not fit in the registers available for argument 5152 // passing. 5153 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5154 5155 // Figure out which arguments are going to go in registers, and which in 5156 // memory. Also, if this is a vararg function, floating point operations 5157 // must be stored to our stack, and loaded into integer regs as well, if 5158 // any integer regs are available for argument passing. 5159 unsigned ArgOffset = LinkageSize; 5160 5161 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5162 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5163 5164 SmallVector<SDValue, 8> MemOpChains; 5165 for (unsigned i = 0; i != NumOps; ++i) { 5166 SDValue Arg = OutVals[i]; 5167 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5168 EVT ArgVT = Outs[i].VT; 5169 EVT OrigVT = Outs[i].ArgVT; 5170 5171 // PtrOff will be used to store the current argument to the stack if a 5172 // register cannot be found for it. 5173 SDValue PtrOff; 5174 5175 // We re-align the argument offset for each argument, except when using the 5176 // fast calling convention, when we need to make sure we do that only when 5177 // we'll actually use a stack slot. 5178 auto ComputePtrOff = [&]() { 5179 /* Respect alignment of argument on the stack. */ 5180 unsigned Align = 5181 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5182 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 5183 5184 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5185 5186 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5187 }; 5188 5189 if (CallConv != CallingConv::Fast) { 5190 ComputePtrOff(); 5191 5192 /* Compute GPR index associated with argument offset. */ 5193 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 5194 GPR_idx = std::min(GPR_idx, NumGPRs); 5195 } 5196 5197 // Promote integers to 64-bit values. 5198 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 5199 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5200 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5201 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5202 } 5203 5204 // FIXME memcpy is used way more than necessary. Correctness first. 5205 // Note: "by value" is code for passing a structure by value, not 5206 // basic types. 5207 if (Flags.isByVal()) { 5208 // Note: Size includes alignment padding, so 5209 // struct x { short a; char b; } 5210 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 5211 // These are the proper values we need for right-justifying the 5212 // aggregate in a parameter register. 5213 unsigned Size = Flags.getByValSize(); 5214 5215 // An empty aggregate parameter takes up no storage and no 5216 // registers. 5217 if (Size == 0) 5218 continue; 5219 5220 if (CallConv == CallingConv::Fast) 5221 ComputePtrOff(); 5222 5223 // All aggregates smaller than 8 bytes must be passed right-justified. 5224 if (Size==1 || Size==2 || Size==4) { 5225 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 5226 if (GPR_idx != NumGPRs) { 5227 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5228 MachinePointerInfo(), VT); 5229 MemOpChains.push_back(Load.getValue(1)); 5230 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5231 5232 ArgOffset += PtrByteSize; 5233 continue; 5234 } 5235 } 5236 5237 if (GPR_idx == NumGPRs && Size < 8) { 5238 SDValue AddPtr = PtrOff; 5239 if (!isLittleEndian) { 5240 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5241 PtrOff.getValueType()); 5242 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5243 } 5244 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5245 CallSeqStart, 5246 Flags, DAG, dl); 5247 ArgOffset += PtrByteSize; 5248 continue; 5249 } 5250 // Copy entire object into memory. There are cases where gcc-generated 5251 // code assumes it is there, even if it could be put entirely into 5252 // registers. (This is not what the doc says.) 5253 5254 // FIXME: The above statement is likely due to a misunderstanding of the 5255 // documents. All arguments must be copied into the parameter area BY 5256 // THE CALLEE in the event that the callee takes the address of any 5257 // formal argument. That has not yet been implemented. However, it is 5258 // reasonable to use the stack area as a staging area for the register 5259 // load. 5260 5261 // Skip this for small aggregates, as we will use the same slot for a 5262 // right-justified copy, below. 5263 if (Size >= 8) 5264 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5265 CallSeqStart, 5266 Flags, DAG, dl); 5267 5268 // When a register is available, pass a small aggregate right-justified. 5269 if (Size < 8 && GPR_idx != NumGPRs) { 5270 // The easiest way to get this right-justified in a register 5271 // is to copy the structure into the rightmost portion of a 5272 // local variable slot, then load the whole slot into the 5273 // register. 5274 // FIXME: The memcpy seems to produce pretty awful code for 5275 // small aggregates, particularly for packed ones. 5276 // FIXME: It would be preferable to use the slot in the 5277 // parameter save area instead of a new local variable. 5278 SDValue AddPtr = PtrOff; 5279 if (!isLittleEndian) { 5280 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5281 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5282 } 5283 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5284 CallSeqStart, 5285 Flags, DAG, dl); 5286 5287 // Load the slot into the register. 5288 SDValue Load = 5289 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 5290 MemOpChains.push_back(Load.getValue(1)); 5291 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5292 5293 // Done with this argument. 5294 ArgOffset += PtrByteSize; 5295 continue; 5296 } 5297 5298 // For aggregates larger than PtrByteSize, copy the pieces of the 5299 // object that fit into registers from the parameter save area. 5300 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5301 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5302 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5303 if (GPR_idx != NumGPRs) { 5304 SDValue Load = 5305 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5306 MemOpChains.push_back(Load.getValue(1)); 5307 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5308 ArgOffset += PtrByteSize; 5309 } else { 5310 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5311 break; 5312 } 5313 } 5314 continue; 5315 } 5316 5317 switch (Arg.getSimpleValueType().SimpleTy) { 5318 default: llvm_unreachable("Unexpected ValueType for argument!"); 5319 case MVT::i1: 5320 case MVT::i32: 5321 case MVT::i64: 5322 if (Flags.isNest()) { 5323 // The 'nest' parameter, if any, is passed in R11. 5324 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5325 hasNest = true; 5326 break; 5327 } 5328 5329 // These can be scalar arguments or elements of an integer array type 5330 // passed directly. Clang may use those instead of "byval" aggregate 5331 // types to avoid forcing arguments to memory unnecessarily. 5332 if (GPR_idx != NumGPRs) { 5333 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5334 } else { 5335 if (CallConv == CallingConv::Fast) 5336 ComputePtrOff(); 5337 5338 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5339 true, isTailCall, false, MemOpChains, 5340 TailCallArguments, dl); 5341 if (CallConv == CallingConv::Fast) 5342 ArgOffset += PtrByteSize; 5343 } 5344 if (CallConv != CallingConv::Fast) 5345 ArgOffset += PtrByteSize; 5346 break; 5347 case MVT::f32: 5348 case MVT::f64: { 5349 // These can be scalar arguments or elements of a float array type 5350 // passed directly. The latter are used to implement ELFv2 homogenous 5351 // float aggregates. 5352 5353 // Named arguments go into FPRs first, and once they overflow, the 5354 // remaining arguments go into GPRs and then the parameter save area. 5355 // Unnamed arguments for vararg functions always go to GPRs and 5356 // then the parameter save area. For now, put all arguments to vararg 5357 // routines always in both locations (FPR *and* GPR or stack slot). 5358 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5359 bool NeededLoad = false; 5360 5361 // First load the argument into the next available FPR. 5362 if (FPR_idx != NumFPRs) 5363 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5364 5365 // Next, load the argument into GPR or stack slot if needed. 5366 if (!NeedGPROrStack) 5367 ; 5368 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5369 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5370 // once we support fp <-> gpr moves. 5371 5372 // In the non-vararg case, this can only ever happen in the 5373 // presence of f32 array types, since otherwise we never run 5374 // out of FPRs before running out of GPRs. 5375 SDValue ArgVal; 5376 5377 // Double values are always passed in a single GPR. 5378 if (Arg.getValueType() != MVT::f32) { 5379 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5380 5381 // Non-array float values are extended and passed in a GPR. 5382 } else if (!Flags.isInConsecutiveRegs()) { 5383 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5384 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5385 5386 // If we have an array of floats, we collect every odd element 5387 // together with its predecessor into one GPR. 5388 } else if (ArgOffset % PtrByteSize != 0) { 5389 SDValue Lo, Hi; 5390 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5391 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5392 if (!isLittleEndian) 5393 std::swap(Lo, Hi); 5394 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5395 5396 // The final element, if even, goes into the first half of a GPR. 5397 } else if (Flags.isInConsecutiveRegsLast()) { 5398 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5399 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5400 if (!isLittleEndian) 5401 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5402 DAG.getConstant(32, dl, MVT::i32)); 5403 5404 // Non-final even elements are skipped; they will be handled 5405 // together the with subsequent argument on the next go-around. 5406 } else 5407 ArgVal = SDValue(); 5408 5409 if (ArgVal.getNode()) 5410 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5411 } else { 5412 if (CallConv == CallingConv::Fast) 5413 ComputePtrOff(); 5414 5415 // Single-precision floating-point values are mapped to the 5416 // second (rightmost) word of the stack doubleword. 5417 if (Arg.getValueType() == MVT::f32 && 5418 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5419 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5420 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5421 } 5422 5423 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5424 true, isTailCall, false, MemOpChains, 5425 TailCallArguments, dl); 5426 5427 NeededLoad = true; 5428 } 5429 // When passing an array of floats, the array occupies consecutive 5430 // space in the argument area; only round up to the next doubleword 5431 // at the end of the array. Otherwise, each float takes 8 bytes. 5432 if (CallConv != CallingConv::Fast || NeededLoad) { 5433 ArgOffset += (Arg.getValueType() == MVT::f32 && 5434 Flags.isInConsecutiveRegs()) ? 4 : 8; 5435 if (Flags.isInConsecutiveRegsLast()) 5436 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5437 } 5438 break; 5439 } 5440 case MVT::v4f32: 5441 case MVT::v4i32: 5442 case MVT::v8i16: 5443 case MVT::v16i8: 5444 case MVT::v2f64: 5445 case MVT::v2i64: 5446 case MVT::v1i128: 5447 if (!Subtarget.hasQPX()) { 5448 // These can be scalar arguments or elements of a vector array type 5449 // passed directly. The latter are used to implement ELFv2 homogenous 5450 // vector aggregates. 5451 5452 // For a varargs call, named arguments go into VRs or on the stack as 5453 // usual; unnamed arguments always go to the stack or the corresponding 5454 // GPRs when within range. For now, we always put the value in both 5455 // locations (or even all three). 5456 if (isVarArg) { 5457 // We could elide this store in the case where the object fits 5458 // entirely in R registers. Maybe later. 5459 SDValue Store = 5460 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5461 MemOpChains.push_back(Store); 5462 if (VR_idx != NumVRs) { 5463 SDValue Load = 5464 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 5465 MemOpChains.push_back(Load.getValue(1)); 5466 5467 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5468 Arg.getSimpleValueType() == MVT::v2i64) ? 5469 VSRH[VR_idx] : VR[VR_idx]; 5470 ++VR_idx; 5471 5472 RegsToPass.push_back(std::make_pair(VReg, Load)); 5473 } 5474 ArgOffset += 16; 5475 for (unsigned i=0; i<16; i+=PtrByteSize) { 5476 if (GPR_idx == NumGPRs) 5477 break; 5478 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5479 DAG.getConstant(i, dl, PtrVT)); 5480 SDValue Load = 5481 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5482 MemOpChains.push_back(Load.getValue(1)); 5483 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5484 } 5485 break; 5486 } 5487 5488 // Non-varargs Altivec params go into VRs or on the stack. 5489 if (VR_idx != NumVRs) { 5490 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5491 Arg.getSimpleValueType() == MVT::v2i64) ? 5492 VSRH[VR_idx] : VR[VR_idx]; 5493 ++VR_idx; 5494 5495 RegsToPass.push_back(std::make_pair(VReg, Arg)); 5496 } else { 5497 if (CallConv == CallingConv::Fast) 5498 ComputePtrOff(); 5499 5500 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5501 true, isTailCall, true, MemOpChains, 5502 TailCallArguments, dl); 5503 if (CallConv == CallingConv::Fast) 5504 ArgOffset += 16; 5505 } 5506 5507 if (CallConv != CallingConv::Fast) 5508 ArgOffset += 16; 5509 break; 5510 } // not QPX 5511 5512 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5513 "Invalid QPX parameter type"); 5514 5515 /* fall through */ 5516 case MVT::v4f64: 5517 case MVT::v4i1: { 5518 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5519 if (isVarArg) { 5520 // We could elide this store in the case where the object fits 5521 // entirely in R registers. Maybe later. 5522 SDValue Store = 5523 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5524 MemOpChains.push_back(Store); 5525 if (QFPR_idx != NumQFPRs) { 5526 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 5527 PtrOff, MachinePointerInfo()); 5528 MemOpChains.push_back(Load.getValue(1)); 5529 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5530 } 5531 ArgOffset += (IsF32 ? 16 : 32); 5532 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5533 if (GPR_idx == NumGPRs) 5534 break; 5535 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5536 DAG.getConstant(i, dl, PtrVT)); 5537 SDValue Load = 5538 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5539 MemOpChains.push_back(Load.getValue(1)); 5540 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5541 } 5542 break; 5543 } 5544 5545 // Non-varargs QPX params go into registers or on the stack. 5546 if (QFPR_idx != NumQFPRs) { 5547 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5548 } else { 5549 if (CallConv == CallingConv::Fast) 5550 ComputePtrOff(); 5551 5552 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5553 true, isTailCall, true, MemOpChains, 5554 TailCallArguments, dl); 5555 if (CallConv == CallingConv::Fast) 5556 ArgOffset += (IsF32 ? 16 : 32); 5557 } 5558 5559 if (CallConv != CallingConv::Fast) 5560 ArgOffset += (IsF32 ? 16 : 32); 5561 break; 5562 } 5563 } 5564 } 5565 5566 assert(NumBytesActuallyUsed == ArgOffset); 5567 (void)NumBytesActuallyUsed; 5568 5569 if (!MemOpChains.empty()) 5570 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5571 5572 // Check if this is an indirect call (MTCTR/BCTRL). 5573 // See PrepareCall() for more information about calls through function 5574 // pointers in the 64-bit SVR4 ABI. 5575 if (!isTailCall && !isPatchPoint && 5576 !isFunctionGlobalAddress(Callee) && 5577 !isa<ExternalSymbolSDNode>(Callee)) { 5578 // Load r2 into a virtual register and store it to the TOC save area. 5579 setUsesTOCBasePtr(DAG); 5580 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5581 // TOC save area offset. 5582 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5583 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5584 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5585 Chain = DAG.getStore( 5586 Val.getValue(1), dl, Val, AddPtr, 5587 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 5588 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5589 // This does not mean the MTCTR instruction must use R12; it's easier 5590 // to model this as an extra parameter, so do that. 5591 if (isELFv2ABI && !isPatchPoint) 5592 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5593 } 5594 5595 // Build a sequence of copy-to-reg nodes chained together with token chain 5596 // and flag operands which copy the outgoing args into the appropriate regs. 5597 SDValue InFlag; 5598 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5599 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5600 RegsToPass[i].second, InFlag); 5601 InFlag = Chain.getValue(1); 5602 } 5603 5604 if (isTailCall && !IsSibCall) 5605 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5606 TailCallArguments); 5607 5608 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 5609 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 5610 SPDiff, NumBytes, Ins, InVals, CS); 5611 } 5612 5613 SDValue PPCTargetLowering::LowerCall_Darwin( 5614 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5615 bool isTailCall, bool isPatchPoint, 5616 const SmallVectorImpl<ISD::OutputArg> &Outs, 5617 const SmallVectorImpl<SDValue> &OutVals, 5618 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5619 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5620 ImmutableCallSite *CS) const { 5621 5622 unsigned NumOps = Outs.size(); 5623 5624 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5625 bool isPPC64 = PtrVT == MVT::i64; 5626 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5627 5628 MachineFunction &MF = DAG.getMachineFunction(); 5629 5630 // Mark this function as potentially containing a function that contains a 5631 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5632 // and restoring the callers stack pointer in this functions epilog. This is 5633 // done because by tail calling the called function might overwrite the value 5634 // in this function's (MF) stack pointer stack slot 0(SP). 5635 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5636 CallConv == CallingConv::Fast) 5637 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5638 5639 // Count how many bytes are to be pushed on the stack, including the linkage 5640 // area, and parameter passing area. We start with 24/48 bytes, which is 5641 // prereserved space for [SP][CR][LR][3 x unused]. 5642 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5643 unsigned NumBytes = LinkageSize; 5644 5645 // Add up all the space actually used. 5646 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5647 // they all go in registers, but we must reserve stack space for them for 5648 // possible use by the caller. In varargs or 64-bit calls, parameters are 5649 // assigned stack space in order, with padding so Altivec parameters are 5650 // 16-byte aligned. 5651 unsigned nAltivecParamsAtEnd = 0; 5652 for (unsigned i = 0; i != NumOps; ++i) { 5653 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5654 EVT ArgVT = Outs[i].VT; 5655 // Varargs Altivec parameters are padded to a 16 byte boundary. 5656 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5657 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5658 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5659 if (!isVarArg && !isPPC64) { 5660 // Non-varargs Altivec parameters go after all the non-Altivec 5661 // parameters; handle those later so we know how much padding we need. 5662 nAltivecParamsAtEnd++; 5663 continue; 5664 } 5665 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5666 NumBytes = ((NumBytes+15)/16)*16; 5667 } 5668 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5669 } 5670 5671 // Allow for Altivec parameters at the end, if needed. 5672 if (nAltivecParamsAtEnd) { 5673 NumBytes = ((NumBytes+15)/16)*16; 5674 NumBytes += 16*nAltivecParamsAtEnd; 5675 } 5676 5677 // The prolog code of the callee may store up to 8 GPR argument registers to 5678 // the stack, allowing va_start to index over them in memory if its varargs. 5679 // Because we cannot tell if this is needed on the caller side, we have to 5680 // conservatively assume that it is needed. As such, make sure we have at 5681 // least enough stack space for the caller to store the 8 GPRs. 5682 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5683 5684 // Tail call needs the stack to be aligned. 5685 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5686 CallConv == CallingConv::Fast) 5687 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5688 5689 // Calculate by how many bytes the stack has to be adjusted in case of tail 5690 // call optimization. 5691 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5692 5693 // To protect arguments on the stack from being clobbered in a tail call, 5694 // force all the loads to happen before doing any other lowering. 5695 if (isTailCall) 5696 Chain = DAG.getStackArgumentTokenFactor(Chain); 5697 5698 // Adjust the stack pointer for the new arguments... 5699 // These operations are automatically eliminated by the prolog/epilog pass 5700 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5701 dl); 5702 SDValue CallSeqStart = Chain; 5703 5704 // Load the return address and frame pointer so it can be move somewhere else 5705 // later. 5706 SDValue LROp, FPOp; 5707 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5708 5709 // Set up a copy of the stack pointer for use loading and storing any 5710 // arguments that may not fit in the registers available for argument 5711 // passing. 5712 SDValue StackPtr; 5713 if (isPPC64) 5714 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5715 else 5716 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5717 5718 // Figure out which arguments are going to go in registers, and which in 5719 // memory. Also, if this is a vararg function, floating point operations 5720 // must be stored to our stack, and loaded into integer regs as well, if 5721 // any integer regs are available for argument passing. 5722 unsigned ArgOffset = LinkageSize; 5723 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5724 5725 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5726 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5727 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5728 }; 5729 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5730 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5731 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5732 }; 5733 static const MCPhysReg VR[] = { 5734 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5735 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5736 }; 5737 const unsigned NumGPRs = array_lengthof(GPR_32); 5738 const unsigned NumFPRs = 13; 5739 const unsigned NumVRs = array_lengthof(VR); 5740 5741 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5742 5743 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5744 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5745 5746 SmallVector<SDValue, 8> MemOpChains; 5747 for (unsigned i = 0; i != NumOps; ++i) { 5748 SDValue Arg = OutVals[i]; 5749 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5750 5751 // PtrOff will be used to store the current argument to the stack if a 5752 // register cannot be found for it. 5753 SDValue PtrOff; 5754 5755 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5756 5757 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5758 5759 // On PPC64, promote integers to 64-bit values. 5760 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5761 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5762 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5763 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5764 } 5765 5766 // FIXME memcpy is used way more than necessary. Correctness first. 5767 // Note: "by value" is code for passing a structure by value, not 5768 // basic types. 5769 if (Flags.isByVal()) { 5770 unsigned Size = Flags.getByValSize(); 5771 // Very small objects are passed right-justified. Everything else is 5772 // passed left-justified. 5773 if (Size==1 || Size==2) { 5774 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5775 if (GPR_idx != NumGPRs) { 5776 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5777 MachinePointerInfo(), VT); 5778 MemOpChains.push_back(Load.getValue(1)); 5779 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5780 5781 ArgOffset += PtrByteSize; 5782 } else { 5783 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5784 PtrOff.getValueType()); 5785 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5786 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5787 CallSeqStart, 5788 Flags, DAG, dl); 5789 ArgOffset += PtrByteSize; 5790 } 5791 continue; 5792 } 5793 // Copy entire object into memory. There are cases where gcc-generated 5794 // code assumes it is there, even if it could be put entirely into 5795 // registers. (This is not what the doc says.) 5796 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5797 CallSeqStart, 5798 Flags, DAG, dl); 5799 5800 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5801 // copy the pieces of the object that fit into registers from the 5802 // parameter save area. 5803 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5804 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5805 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5806 if (GPR_idx != NumGPRs) { 5807 SDValue Load = 5808 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 5809 MemOpChains.push_back(Load.getValue(1)); 5810 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5811 ArgOffset += PtrByteSize; 5812 } else { 5813 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5814 break; 5815 } 5816 } 5817 continue; 5818 } 5819 5820 switch (Arg.getSimpleValueType().SimpleTy) { 5821 default: llvm_unreachable("Unexpected ValueType for argument!"); 5822 case MVT::i1: 5823 case MVT::i32: 5824 case MVT::i64: 5825 if (GPR_idx != NumGPRs) { 5826 if (Arg.getValueType() == MVT::i1) 5827 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5828 5829 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5830 } else { 5831 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5832 isPPC64, isTailCall, false, MemOpChains, 5833 TailCallArguments, dl); 5834 } 5835 ArgOffset += PtrByteSize; 5836 break; 5837 case MVT::f32: 5838 case MVT::f64: 5839 if (FPR_idx != NumFPRs) { 5840 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5841 5842 if (isVarArg) { 5843 SDValue Store = 5844 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5845 MemOpChains.push_back(Store); 5846 5847 // Float varargs are always shadowed in available integer registers 5848 if (GPR_idx != NumGPRs) { 5849 SDValue Load = 5850 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 5851 MemOpChains.push_back(Load.getValue(1)); 5852 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5853 } 5854 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5855 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5856 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5857 SDValue Load = 5858 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 5859 MemOpChains.push_back(Load.getValue(1)); 5860 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5861 } 5862 } else { 5863 // If we have any FPRs remaining, we may also have GPRs remaining. 5864 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5865 // GPRs. 5866 if (GPR_idx != NumGPRs) 5867 ++GPR_idx; 5868 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 5869 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 5870 ++GPR_idx; 5871 } 5872 } else 5873 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5874 isPPC64, isTailCall, false, MemOpChains, 5875 TailCallArguments, dl); 5876 if (isPPC64) 5877 ArgOffset += 8; 5878 else 5879 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 5880 break; 5881 case MVT::v4f32: 5882 case MVT::v4i32: 5883 case MVT::v8i16: 5884 case MVT::v16i8: 5885 if (isVarArg) { 5886 // These go aligned on the stack, or in the corresponding R registers 5887 // when within range. The Darwin PPC ABI doc claims they also go in 5888 // V registers; in fact gcc does this only for arguments that are 5889 // prototyped, not for those that match the ... We do it for all 5890 // arguments, seems to work. 5891 while (ArgOffset % 16 !=0) { 5892 ArgOffset += PtrByteSize; 5893 if (GPR_idx != NumGPRs) 5894 GPR_idx++; 5895 } 5896 // We could elide this store in the case where the object fits 5897 // entirely in R registers. Maybe later. 5898 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5899 DAG.getConstant(ArgOffset, dl, PtrVT)); 5900 SDValue Store = 5901 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 5902 MemOpChains.push_back(Store); 5903 if (VR_idx != NumVRs) { 5904 SDValue Load = 5905 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 5906 MemOpChains.push_back(Load.getValue(1)); 5907 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5908 } 5909 ArgOffset += 16; 5910 for (unsigned i=0; i<16; i+=PtrByteSize) { 5911 if (GPR_idx == NumGPRs) 5912 break; 5913 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5914 DAG.getConstant(i, dl, PtrVT)); 5915 SDValue Load = 5916 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 5917 MemOpChains.push_back(Load.getValue(1)); 5918 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5919 } 5920 break; 5921 } 5922 5923 // Non-varargs Altivec params generally go in registers, but have 5924 // stack space allocated at the end. 5925 if (VR_idx != NumVRs) { 5926 // Doesn't have GPR space allocated. 5927 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5928 } else if (nAltivecParamsAtEnd==0) { 5929 // We are emitting Altivec params in order. 5930 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5931 isPPC64, isTailCall, true, MemOpChains, 5932 TailCallArguments, dl); 5933 ArgOffset += 16; 5934 } 5935 break; 5936 } 5937 } 5938 // If all Altivec parameters fit in registers, as they usually do, 5939 // they get stack space following the non-Altivec parameters. We 5940 // don't track this here because nobody below needs it. 5941 // If there are more Altivec parameters than fit in registers emit 5942 // the stores here. 5943 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5944 unsigned j = 0; 5945 // Offset is aligned; skip 1st 12 params which go in V registers. 5946 ArgOffset = ((ArgOffset+15)/16)*16; 5947 ArgOffset += 12*16; 5948 for (unsigned i = 0; i != NumOps; ++i) { 5949 SDValue Arg = OutVals[i]; 5950 EVT ArgType = Outs[i].VT; 5951 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5952 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5953 if (++j > NumVRs) { 5954 SDValue PtrOff; 5955 // We are emitting Altivec params in order. 5956 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5957 isPPC64, isTailCall, true, MemOpChains, 5958 TailCallArguments, dl); 5959 ArgOffset += 16; 5960 } 5961 } 5962 } 5963 } 5964 5965 if (!MemOpChains.empty()) 5966 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5967 5968 // On Darwin, R12 must contain the address of an indirect callee. This does 5969 // not mean the MTCTR instruction must use R12; it's easier to model this as 5970 // an extra parameter, so do that. 5971 if (!isTailCall && 5972 !isFunctionGlobalAddress(Callee) && 5973 !isa<ExternalSymbolSDNode>(Callee) && 5974 !isBLACompatibleAddress(Callee, DAG)) 5975 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5976 PPC::R12), Callee)); 5977 5978 // Build a sequence of copy-to-reg nodes chained together with token chain 5979 // and flag operands which copy the outgoing args into the appropriate regs. 5980 SDValue InFlag; 5981 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5982 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5983 RegsToPass[i].second, InFlag); 5984 InFlag = Chain.getValue(1); 5985 } 5986 5987 if (isTailCall) 5988 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5989 TailCallArguments); 5990 5991 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5992 /* unused except on PPC64 ELFv1 */ false, DAG, 5993 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5994 NumBytes, Ins, InVals, CS); 5995 } 5996 5997 bool 5998 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5999 MachineFunction &MF, bool isVarArg, 6000 const SmallVectorImpl<ISD::OutputArg> &Outs, 6001 LLVMContext &Context) const { 6002 SmallVector<CCValAssign, 16> RVLocs; 6003 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 6004 return CCInfo.CheckReturn(Outs, RetCC_PPC); 6005 } 6006 6007 SDValue 6008 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6009 bool isVarArg, 6010 const SmallVectorImpl<ISD::OutputArg> &Outs, 6011 const SmallVectorImpl<SDValue> &OutVals, 6012 const SDLoc &dl, SelectionDAG &DAG) const { 6013 6014 SmallVector<CCValAssign, 16> RVLocs; 6015 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 6016 *DAG.getContext()); 6017 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 6018 6019 SDValue Flag; 6020 SmallVector<SDValue, 4> RetOps(1, Chain); 6021 6022 // Copy the result values into the output registers. 6023 for (unsigned i = 0; i != RVLocs.size(); ++i) { 6024 CCValAssign &VA = RVLocs[i]; 6025 assert(VA.isRegLoc() && "Can only return in registers!"); 6026 6027 SDValue Arg = OutVals[i]; 6028 6029 switch (VA.getLocInfo()) { 6030 default: llvm_unreachable("Unknown loc info!"); 6031 case CCValAssign::Full: break; 6032 case CCValAssign::AExt: 6033 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 6034 break; 6035 case CCValAssign::ZExt: 6036 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 6037 break; 6038 case CCValAssign::SExt: 6039 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 6040 break; 6041 } 6042 6043 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 6044 Flag = Chain.getValue(1); 6045 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6046 } 6047 6048 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6049 const MCPhysReg *I = 6050 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 6051 if (I) { 6052 for (; *I; ++I) { 6053 6054 if (PPC::G8RCRegClass.contains(*I)) 6055 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 6056 else if (PPC::F8RCRegClass.contains(*I)) 6057 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 6058 else if (PPC::CRRCRegClass.contains(*I)) 6059 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 6060 else if (PPC::VRRCRegClass.contains(*I)) 6061 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 6062 else 6063 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 6064 } 6065 } 6066 6067 RetOps[0] = Chain; // Update chain. 6068 6069 // Add the flag if we have it. 6070 if (Flag.getNode()) 6071 RetOps.push_back(Flag); 6072 6073 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 6074 } 6075 6076 SDValue 6077 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 6078 SelectionDAG &DAG) const { 6079 SDLoc dl(Op); 6080 6081 // Get the corect type for integers. 6082 EVT IntVT = Op.getValueType(); 6083 6084 // Get the inputs. 6085 SDValue Chain = Op.getOperand(0); 6086 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6087 // Build a DYNAREAOFFSET node. 6088 SDValue Ops[2] = {Chain, FPSIdx}; 6089 SDVTList VTs = DAG.getVTList(IntVT); 6090 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 6091 } 6092 6093 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 6094 SelectionDAG &DAG) const { 6095 // When we pop the dynamic allocation we need to restore the SP link. 6096 SDLoc dl(Op); 6097 6098 // Get the corect type for pointers. 6099 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6100 6101 // Construct the stack pointer operand. 6102 bool isPPC64 = Subtarget.isPPC64(); 6103 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 6104 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 6105 6106 // Get the operands for the STACKRESTORE. 6107 SDValue Chain = Op.getOperand(0); 6108 SDValue SaveSP = Op.getOperand(1); 6109 6110 // Load the old link SP. 6111 SDValue LoadLinkSP = 6112 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 6113 6114 // Restore the stack pointer. 6115 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 6116 6117 // Store the old link SP. 6118 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 6119 } 6120 6121 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 6122 MachineFunction &MF = DAG.getMachineFunction(); 6123 bool isPPC64 = Subtarget.isPPC64(); 6124 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6125 6126 // Get current frame pointer save index. The users of this index will be 6127 // primarily DYNALLOC instructions. 6128 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6129 int RASI = FI->getReturnAddrSaveIndex(); 6130 6131 // If the frame pointer save index hasn't been defined yet. 6132 if (!RASI) { 6133 // Find out what the fix offset of the frame pointer save area. 6134 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 6135 // Allocate the frame index for frame pointer save area. 6136 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 6137 // Save the result. 6138 FI->setReturnAddrSaveIndex(RASI); 6139 } 6140 return DAG.getFrameIndex(RASI, PtrVT); 6141 } 6142 6143 SDValue 6144 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 6145 MachineFunction &MF = DAG.getMachineFunction(); 6146 bool isPPC64 = Subtarget.isPPC64(); 6147 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6148 6149 // Get current frame pointer save index. The users of this index will be 6150 // primarily DYNALLOC instructions. 6151 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 6152 int FPSI = FI->getFramePointerSaveIndex(); 6153 6154 // If the frame pointer save index hasn't been defined yet. 6155 if (!FPSI) { 6156 // Find out what the fix offset of the frame pointer save area. 6157 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 6158 // Allocate the frame index for frame pointer save area. 6159 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 6160 // Save the result. 6161 FI->setFramePointerSaveIndex(FPSI); 6162 } 6163 return DAG.getFrameIndex(FPSI, PtrVT); 6164 } 6165 6166 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 6167 SelectionDAG &DAG) const { 6168 // Get the inputs. 6169 SDValue Chain = Op.getOperand(0); 6170 SDValue Size = Op.getOperand(1); 6171 SDLoc dl(Op); 6172 6173 // Get the corect type for pointers. 6174 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6175 // Negate the size. 6176 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 6177 DAG.getConstant(0, dl, PtrVT), Size); 6178 // Construct a node for the frame pointer save index. 6179 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 6180 // Build a DYNALLOC node. 6181 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 6182 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 6183 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 6184 } 6185 6186 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 6187 SelectionDAG &DAG) const { 6188 SDLoc DL(Op); 6189 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 6190 DAG.getVTList(MVT::i32, MVT::Other), 6191 Op.getOperand(0), Op.getOperand(1)); 6192 } 6193 6194 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 6195 SelectionDAG &DAG) const { 6196 SDLoc DL(Op); 6197 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 6198 Op.getOperand(0), Op.getOperand(1)); 6199 } 6200 6201 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 6202 if (Op.getValueType().isVector()) 6203 return LowerVectorLoad(Op, DAG); 6204 6205 assert(Op.getValueType() == MVT::i1 && 6206 "Custom lowering only for i1 loads"); 6207 6208 // First, load 8 bits into 32 bits, then truncate to 1 bit. 6209 6210 SDLoc dl(Op); 6211 LoadSDNode *LD = cast<LoadSDNode>(Op); 6212 6213 SDValue Chain = LD->getChain(); 6214 SDValue BasePtr = LD->getBasePtr(); 6215 MachineMemOperand *MMO = LD->getMemOperand(); 6216 6217 SDValue NewLD = 6218 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 6219 BasePtr, MVT::i8, MMO); 6220 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 6221 6222 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 6223 return DAG.getMergeValues(Ops, dl); 6224 } 6225 6226 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6227 if (Op.getOperand(1).getValueType().isVector()) 6228 return LowerVectorStore(Op, DAG); 6229 6230 assert(Op.getOperand(1).getValueType() == MVT::i1 && 6231 "Custom lowering only for i1 stores"); 6232 6233 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 6234 6235 SDLoc dl(Op); 6236 StoreSDNode *ST = cast<StoreSDNode>(Op); 6237 6238 SDValue Chain = ST->getChain(); 6239 SDValue BasePtr = ST->getBasePtr(); 6240 SDValue Value = ST->getValue(); 6241 MachineMemOperand *MMO = ST->getMemOperand(); 6242 6243 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 6244 Value); 6245 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 6246 } 6247 6248 // FIXME: Remove this once the ANDI glue bug is fixed: 6249 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6250 assert(Op.getValueType() == MVT::i1 && 6251 "Custom lowering only for i1 results"); 6252 6253 SDLoc DL(Op); 6254 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6255 Op.getOperand(0)); 6256 } 6257 6258 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6259 /// possible. 6260 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6261 // Not FP? Not a fsel. 6262 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6263 !Op.getOperand(2).getValueType().isFloatingPoint()) 6264 return Op; 6265 6266 // We might be able to do better than this under some circumstances, but in 6267 // general, fsel-based lowering of select is a finite-math-only optimization. 6268 // For more information, see section F.3 of the 2.06 ISA specification. 6269 if (!DAG.getTarget().Options.NoInfsFPMath || 6270 !DAG.getTarget().Options.NoNaNsFPMath) 6271 return Op; 6272 // TODO: Propagate flags from the select rather than global settings. 6273 SDNodeFlags Flags; 6274 Flags.setNoInfs(true); 6275 Flags.setNoNaNs(true); 6276 6277 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 6278 6279 EVT ResVT = Op.getValueType(); 6280 EVT CmpVT = Op.getOperand(0).getValueType(); 6281 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 6282 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 6283 SDLoc dl(Op); 6284 6285 // If the RHS of the comparison is a 0.0, we don't need to do the 6286 // subtraction at all. 6287 SDValue Sel1; 6288 if (isFloatingPointZero(RHS)) 6289 switch (CC) { 6290 default: break; // SETUO etc aren't handled by fsel. 6291 case ISD::SETNE: 6292 std::swap(TV, FV); 6293 case ISD::SETEQ: 6294 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6295 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6296 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6297 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6298 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6299 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6300 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 6301 case ISD::SETULT: 6302 case ISD::SETLT: 6303 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6304 case ISD::SETOGE: 6305 case ISD::SETGE: 6306 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6307 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6308 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 6309 case ISD::SETUGT: 6310 case ISD::SETGT: 6311 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 6312 case ISD::SETOLE: 6313 case ISD::SETLE: 6314 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 6315 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 6316 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6317 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 6318 } 6319 6320 SDValue Cmp; 6321 switch (CC) { 6322 default: break; // SETUO etc aren't handled by fsel. 6323 case ISD::SETNE: 6324 std::swap(TV, FV); 6325 case ISD::SETEQ: 6326 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6327 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6328 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6329 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6330 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 6331 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 6332 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 6333 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 6334 case ISD::SETULT: 6335 case ISD::SETLT: 6336 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6337 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6338 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6339 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6340 case ISD::SETOGE: 6341 case ISD::SETGE: 6342 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags); 6343 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6344 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6345 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6346 case ISD::SETUGT: 6347 case ISD::SETGT: 6348 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags); 6349 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6350 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6351 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 6352 case ISD::SETOLE: 6353 case ISD::SETLE: 6354 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags); 6355 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 6356 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 6357 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 6358 } 6359 return Op; 6360 } 6361 6362 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 6363 SelectionDAG &DAG, 6364 const SDLoc &dl) const { 6365 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6366 SDValue Src = Op.getOperand(0); 6367 if (Src.getValueType() == MVT::f32) 6368 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6369 6370 SDValue Tmp; 6371 switch (Op.getSimpleValueType().SimpleTy) { 6372 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6373 case MVT::i32: 6374 Tmp = DAG.getNode( 6375 Op.getOpcode() == ISD::FP_TO_SINT 6376 ? PPCISD::FCTIWZ 6377 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6378 dl, MVT::f64, Src); 6379 break; 6380 case MVT::i64: 6381 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6382 "i64 FP_TO_UINT is supported only with FPCVT"); 6383 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6384 PPCISD::FCTIDUZ, 6385 dl, MVT::f64, Src); 6386 break; 6387 } 6388 6389 // Convert the FP value to an int value through memory. 6390 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 6391 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 6392 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 6393 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 6394 MachinePointerInfo MPI = 6395 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 6396 6397 // Emit a store to the stack slot. 6398 SDValue Chain; 6399 if (i32Stack) { 6400 MachineFunction &MF = DAG.getMachineFunction(); 6401 MachineMemOperand *MMO = 6402 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 6403 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 6404 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 6405 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 6406 } else 6407 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI); 6408 6409 // Result is a load from the stack slot. If loading 4 bytes, make sure to 6410 // add in a bias on big endian. 6411 if (Op.getValueType() == MVT::i32 && !i32Stack) { 6412 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 6413 DAG.getConstant(4, dl, FIPtr.getValueType())); 6414 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 6415 } 6416 6417 RLI.Chain = Chain; 6418 RLI.Ptr = FIPtr; 6419 RLI.MPI = MPI; 6420 } 6421 6422 /// \brief Custom lowers floating point to integer conversions to use 6423 /// the direct move instructions available in ISA 2.07 to avoid the 6424 /// need for load/store combinations. 6425 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 6426 SelectionDAG &DAG, 6427 const SDLoc &dl) const { 6428 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 6429 SDValue Src = Op.getOperand(0); 6430 6431 if (Src.getValueType() == MVT::f32) 6432 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 6433 6434 SDValue Tmp; 6435 switch (Op.getSimpleValueType().SimpleTy) { 6436 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 6437 case MVT::i32: 6438 Tmp = DAG.getNode( 6439 Op.getOpcode() == ISD::FP_TO_SINT 6440 ? PPCISD::FCTIWZ 6441 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 6442 dl, MVT::f64, Src); 6443 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 6444 break; 6445 case MVT::i64: 6446 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 6447 "i64 FP_TO_UINT is supported only with FPCVT"); 6448 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 6449 PPCISD::FCTIDUZ, 6450 dl, MVT::f64, Src); 6451 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 6452 break; 6453 } 6454 return Tmp; 6455 } 6456 6457 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 6458 const SDLoc &dl) const { 6459 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 6460 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 6461 6462 ReuseLoadInfo RLI; 6463 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6464 6465 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 6466 RLI.Alignment, 6467 RLI.IsInvariant ? MachineMemOperand::MOInvariant 6468 : MachineMemOperand::MONone, 6469 RLI.AAInfo, RLI.Ranges); 6470 } 6471 6472 // We're trying to insert a regular store, S, and then a load, L. If the 6473 // incoming value, O, is a load, we might just be able to have our load use the 6474 // address used by O. However, we don't know if anything else will store to 6475 // that address before we can load from it. To prevent this situation, we need 6476 // to insert our load, L, into the chain as a peer of O. To do this, we give L 6477 // the same chain operand as O, we create a token factor from the chain results 6478 // of O and L, and we replace all uses of O's chain result with that token 6479 // factor (see spliceIntoChain below for this last part). 6480 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 6481 ReuseLoadInfo &RLI, 6482 SelectionDAG &DAG, 6483 ISD::LoadExtType ET) const { 6484 SDLoc dl(Op); 6485 if (ET == ISD::NON_EXTLOAD && 6486 (Op.getOpcode() == ISD::FP_TO_UINT || 6487 Op.getOpcode() == ISD::FP_TO_SINT) && 6488 isOperationLegalOrCustom(Op.getOpcode(), 6489 Op.getOperand(0).getValueType())) { 6490 6491 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 6492 return true; 6493 } 6494 6495 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 6496 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 6497 LD->isNonTemporal()) 6498 return false; 6499 if (LD->getMemoryVT() != MemVT) 6500 return false; 6501 6502 RLI.Ptr = LD->getBasePtr(); 6503 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 6504 assert(LD->getAddressingMode() == ISD::PRE_INC && 6505 "Non-pre-inc AM on PPC?"); 6506 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 6507 LD->getOffset()); 6508 } 6509 6510 RLI.Chain = LD->getChain(); 6511 RLI.MPI = LD->getPointerInfo(); 6512 RLI.IsInvariant = LD->isInvariant(); 6513 RLI.Alignment = LD->getAlignment(); 6514 RLI.AAInfo = LD->getAAInfo(); 6515 RLI.Ranges = LD->getRanges(); 6516 6517 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 6518 return true; 6519 } 6520 6521 // Given the head of the old chain, ResChain, insert a token factor containing 6522 // it and NewResChain, and make users of ResChain now be users of that token 6523 // factor. 6524 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 6525 SDValue NewResChain, 6526 SelectionDAG &DAG) const { 6527 if (!ResChain) 6528 return; 6529 6530 SDLoc dl(NewResChain); 6531 6532 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6533 NewResChain, DAG.getUNDEF(MVT::Other)); 6534 assert(TF.getNode() != NewResChain.getNode() && 6535 "A new TF really is required here"); 6536 6537 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 6538 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 6539 } 6540 6541 /// \brief Analyze profitability of direct move 6542 /// prefer float load to int load plus direct move 6543 /// when there is no integer use of int load 6544 static bool directMoveIsProfitable(const SDValue &Op) { 6545 SDNode *Origin = Op.getOperand(0).getNode(); 6546 if (Origin->getOpcode() != ISD::LOAD) 6547 return true; 6548 6549 for (SDNode::use_iterator UI = Origin->use_begin(), 6550 UE = Origin->use_end(); 6551 UI != UE; ++UI) { 6552 6553 // Only look at the users of the loaded value. 6554 if (UI.getUse().get().getResNo() != 0) 6555 continue; 6556 6557 if (UI->getOpcode() != ISD::SINT_TO_FP && 6558 UI->getOpcode() != ISD::UINT_TO_FP) 6559 return true; 6560 } 6561 6562 return false; 6563 } 6564 6565 /// \brief Custom lowers integer to floating point conversions to use 6566 /// the direct move instructions available in ISA 2.07 to avoid the 6567 /// need for load/store combinations. 6568 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 6569 SelectionDAG &DAG, 6570 const SDLoc &dl) const { 6571 assert((Op.getValueType() == MVT::f32 || 6572 Op.getValueType() == MVT::f64) && 6573 "Invalid floating point type as target of conversion"); 6574 assert(Subtarget.hasFPCVT() && 6575 "Int to FP conversions with direct moves require FPCVT"); 6576 SDValue FP; 6577 SDValue Src = Op.getOperand(0); 6578 bool SinglePrec = Op.getValueType() == MVT::f32; 6579 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 6580 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 6581 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 6582 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 6583 6584 if (WordInt) { 6585 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 6586 dl, MVT::f64, Src); 6587 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6588 } 6589 else { 6590 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 6591 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 6592 } 6593 6594 return FP; 6595 } 6596 6597 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 6598 SelectionDAG &DAG) const { 6599 SDLoc dl(Op); 6600 6601 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 6602 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 6603 return SDValue(); 6604 6605 SDValue Value = Op.getOperand(0); 6606 // The values are now known to be -1 (false) or 1 (true). To convert this 6607 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 6608 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 6609 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 6610 6611 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 6612 6613 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 6614 6615 if (Op.getValueType() != MVT::v4f64) 6616 Value = DAG.getNode(ISD::FP_ROUND, dl, 6617 Op.getValueType(), Value, 6618 DAG.getIntPtrConstant(1, dl)); 6619 return Value; 6620 } 6621 6622 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 6623 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 6624 return SDValue(); 6625 6626 if (Op.getOperand(0).getValueType() == MVT::i1) 6627 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 6628 DAG.getConstantFP(1.0, dl, Op.getValueType()), 6629 DAG.getConstantFP(0.0, dl, Op.getValueType())); 6630 6631 // If we have direct moves, we can do all the conversion, skip the store/load 6632 // however, without FPCVT we can't do most conversions. 6633 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 6634 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 6635 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 6636 6637 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 6638 "UINT_TO_FP is supported only with FPCVT"); 6639 6640 // If we have FCFIDS, then use it when converting to single-precision. 6641 // Otherwise, convert to double-precision and then round. 6642 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6643 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 6644 : PPCISD::FCFIDS) 6645 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 6646 : PPCISD::FCFID); 6647 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 6648 ? MVT::f32 6649 : MVT::f64; 6650 6651 if (Op.getOperand(0).getValueType() == MVT::i64) { 6652 SDValue SINT = Op.getOperand(0); 6653 // When converting to single-precision, we actually need to convert 6654 // to double-precision first and then round to single-precision. 6655 // To avoid double-rounding effects during that operation, we have 6656 // to prepare the input operand. Bits that might be truncated when 6657 // converting to double-precision are replaced by a bit that won't 6658 // be lost at this stage, but is below the single-precision rounding 6659 // position. 6660 // 6661 // However, if -enable-unsafe-fp-math is in effect, accept double 6662 // rounding to avoid the extra overhead. 6663 if (Op.getValueType() == MVT::f32 && 6664 !Subtarget.hasFPCVT() && 6665 !DAG.getTarget().Options.UnsafeFPMath) { 6666 6667 // Twiddle input to make sure the low 11 bits are zero. (If this 6668 // is the case, we are guaranteed the value will fit into the 53 bit 6669 // mantissa of an IEEE double-precision value without rounding.) 6670 // If any of those low 11 bits were not zero originally, make sure 6671 // bit 12 (value 2048) is set instead, so that the final rounding 6672 // to single-precision gets the correct result. 6673 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6674 SINT, DAG.getConstant(2047, dl, MVT::i64)); 6675 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 6676 Round, DAG.getConstant(2047, dl, MVT::i64)); 6677 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 6678 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 6679 Round, DAG.getConstant(-2048, dl, MVT::i64)); 6680 6681 // However, we cannot use that value unconditionally: if the magnitude 6682 // of the input value is small, the bit-twiddling we did above might 6683 // end up visibly changing the output. Fortunately, in that case, we 6684 // don't need to twiddle bits since the original input will convert 6685 // exactly to double-precision floating-point already. Therefore, 6686 // construct a conditional to use the original value if the top 11 6687 // bits are all sign-bit copies, and use the rounded value computed 6688 // above otherwise. 6689 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 6690 SINT, DAG.getConstant(53, dl, MVT::i32)); 6691 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 6692 Cond, DAG.getConstant(1, dl, MVT::i64)); 6693 Cond = DAG.getSetCC(dl, MVT::i32, 6694 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 6695 6696 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 6697 } 6698 6699 ReuseLoadInfo RLI; 6700 SDValue Bits; 6701 6702 MachineFunction &MF = DAG.getMachineFunction(); 6703 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 6704 Bits = 6705 DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, RLI.Alignment, 6706 RLI.IsInvariant ? MachineMemOperand::MOInvariant 6707 : MachineMemOperand::MONone, 6708 RLI.AAInfo, RLI.Ranges); 6709 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6710 } else if (Subtarget.hasLFIWAX() && 6711 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 6712 MachineMemOperand *MMO = 6713 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6714 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6715 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6716 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 6717 DAG.getVTList(MVT::f64, MVT::Other), 6718 Ops, MVT::i32, MMO); 6719 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6720 } else if (Subtarget.hasFPCVT() && 6721 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 6722 MachineMemOperand *MMO = 6723 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6724 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6725 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6726 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 6727 DAG.getVTList(MVT::f64, MVT::Other), 6728 Ops, MVT::i32, MMO); 6729 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 6730 } else if (((Subtarget.hasLFIWAX() && 6731 SINT.getOpcode() == ISD::SIGN_EXTEND) || 6732 (Subtarget.hasFPCVT() && 6733 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 6734 SINT.getOperand(0).getValueType() == MVT::i32) { 6735 MachineFrameInfo &MFI = MF.getFrameInfo(); 6736 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6737 6738 int FrameIdx = MFI.CreateStackObject(4, 4, false); 6739 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6740 6741 SDValue Store = 6742 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 6743 MachinePointerInfo::getFixedStack( 6744 DAG.getMachineFunction(), FrameIdx)); 6745 6746 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6747 "Expected an i32 store"); 6748 6749 RLI.Ptr = FIdx; 6750 RLI.Chain = Store; 6751 RLI.MPI = 6752 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6753 RLI.Alignment = 4; 6754 6755 MachineMemOperand *MMO = 6756 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6757 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6758 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6759 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 6760 PPCISD::LFIWZX : PPCISD::LFIWAX, 6761 dl, DAG.getVTList(MVT::f64, MVT::Other), 6762 Ops, MVT::i32, MMO); 6763 } else 6764 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 6765 6766 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 6767 6768 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6769 FP = DAG.getNode(ISD::FP_ROUND, dl, 6770 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 6771 return FP; 6772 } 6773 6774 assert(Op.getOperand(0).getValueType() == MVT::i32 && 6775 "Unhandled INT_TO_FP type in custom expander!"); 6776 // Since we only generate this in 64-bit mode, we can take advantage of 6777 // 64-bit registers. In particular, sign extend the input value into the 6778 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 6779 // then lfd it and fcfid it. 6780 MachineFunction &MF = DAG.getMachineFunction(); 6781 MachineFrameInfo &MFI = MF.getFrameInfo(); 6782 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6783 6784 SDValue Ld; 6785 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 6786 ReuseLoadInfo RLI; 6787 bool ReusingLoad; 6788 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 6789 DAG))) { 6790 int FrameIdx = MFI.CreateStackObject(4, 4, false); 6791 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6792 6793 SDValue Store = 6794 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 6795 MachinePointerInfo::getFixedStack( 6796 DAG.getMachineFunction(), FrameIdx)); 6797 6798 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 6799 "Expected an i32 store"); 6800 6801 RLI.Ptr = FIdx; 6802 RLI.Chain = Store; 6803 RLI.MPI = 6804 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 6805 RLI.Alignment = 4; 6806 } 6807 6808 MachineMemOperand *MMO = 6809 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 6810 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 6811 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 6812 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 6813 PPCISD::LFIWZX : PPCISD::LFIWAX, 6814 dl, DAG.getVTList(MVT::f64, MVT::Other), 6815 Ops, MVT::i32, MMO); 6816 if (ReusingLoad) 6817 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 6818 } else { 6819 assert(Subtarget.isPPC64() && 6820 "i32->FP without LFIWAX supported only on PPC64"); 6821 6822 int FrameIdx = MFI.CreateStackObject(8, 8, false); 6823 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 6824 6825 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 6826 Op.getOperand(0)); 6827 6828 // STD the extended value into the stack slot. 6829 SDValue Store = DAG.getStore( 6830 DAG.getEntryNode(), dl, Ext64, FIdx, 6831 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 6832 6833 // Load the value as a double. 6834 Ld = DAG.getLoad( 6835 MVT::f64, dl, Store, FIdx, 6836 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 6837 } 6838 6839 // FCFID it and return it. 6840 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 6841 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 6842 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 6843 DAG.getIntPtrConstant(0, dl)); 6844 return FP; 6845 } 6846 6847 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6848 SelectionDAG &DAG) const { 6849 SDLoc dl(Op); 6850 /* 6851 The rounding mode is in bits 30:31 of FPSR, and has the following 6852 settings: 6853 00 Round to nearest 6854 01 Round to 0 6855 10 Round to +inf 6856 11 Round to -inf 6857 6858 FLT_ROUNDS, on the other hand, expects the following: 6859 -1 Undefined 6860 0 Round to 0 6861 1 Round to nearest 6862 2 Round to +inf 6863 3 Round to -inf 6864 6865 To perform the conversion, we do: 6866 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 6867 */ 6868 6869 MachineFunction &MF = DAG.getMachineFunction(); 6870 EVT VT = Op.getValueType(); 6871 EVT PtrVT = getPointerTy(MF.getDataLayout()); 6872 6873 // Save FP Control Word to register 6874 EVT NodeTys[] = { 6875 MVT::f64, // return register 6876 MVT::Glue // unused in this context 6877 }; 6878 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 6879 6880 // Save FP register to stack slot 6881 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 6882 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 6883 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 6884 MachinePointerInfo()); 6885 6886 // Load FP Control Word from low 32 bits of stack slot. 6887 SDValue Four = DAG.getConstant(4, dl, PtrVT); 6888 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 6889 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 6890 6891 // Transform as necessary 6892 SDValue CWD1 = 6893 DAG.getNode(ISD::AND, dl, MVT::i32, 6894 CWD, DAG.getConstant(3, dl, MVT::i32)); 6895 SDValue CWD2 = 6896 DAG.getNode(ISD::SRL, dl, MVT::i32, 6897 DAG.getNode(ISD::AND, dl, MVT::i32, 6898 DAG.getNode(ISD::XOR, dl, MVT::i32, 6899 CWD, DAG.getConstant(3, dl, MVT::i32)), 6900 DAG.getConstant(3, dl, MVT::i32)), 6901 DAG.getConstant(1, dl, MVT::i32)); 6902 6903 SDValue RetVal = 6904 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 6905 6906 return DAG.getNode((VT.getSizeInBits() < 16 ? 6907 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 6908 } 6909 6910 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6911 EVT VT = Op.getValueType(); 6912 unsigned BitWidth = VT.getSizeInBits(); 6913 SDLoc dl(Op); 6914 assert(Op.getNumOperands() == 3 && 6915 VT == Op.getOperand(1).getValueType() && 6916 "Unexpected SHL!"); 6917 6918 // Expand into a bunch of logical ops. Note that these ops 6919 // depend on the PPC behavior for oversized shift amounts. 6920 SDValue Lo = Op.getOperand(0); 6921 SDValue Hi = Op.getOperand(1); 6922 SDValue Amt = Op.getOperand(2); 6923 EVT AmtVT = Amt.getValueType(); 6924 6925 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6926 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6927 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 6928 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 6929 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 6930 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6931 DAG.getConstant(-BitWidth, dl, AmtVT)); 6932 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 6933 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6934 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 6935 SDValue OutOps[] = { OutLo, OutHi }; 6936 return DAG.getMergeValues(OutOps, dl); 6937 } 6938 6939 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 6940 EVT VT = Op.getValueType(); 6941 SDLoc dl(Op); 6942 unsigned BitWidth = VT.getSizeInBits(); 6943 assert(Op.getNumOperands() == 3 && 6944 VT == Op.getOperand(1).getValueType() && 6945 "Unexpected SRL!"); 6946 6947 // Expand into a bunch of logical ops. Note that these ops 6948 // depend on the PPC behavior for oversized shift amounts. 6949 SDValue Lo = Op.getOperand(0); 6950 SDValue Hi = Op.getOperand(1); 6951 SDValue Amt = Op.getOperand(2); 6952 EVT AmtVT = Amt.getValueType(); 6953 6954 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6955 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6956 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6957 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6958 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6959 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6960 DAG.getConstant(-BitWidth, dl, AmtVT)); 6961 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 6962 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 6963 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 6964 SDValue OutOps[] = { OutLo, OutHi }; 6965 return DAG.getMergeValues(OutOps, dl); 6966 } 6967 6968 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 6969 SDLoc dl(Op); 6970 EVT VT = Op.getValueType(); 6971 unsigned BitWidth = VT.getSizeInBits(); 6972 assert(Op.getNumOperands() == 3 && 6973 VT == Op.getOperand(1).getValueType() && 6974 "Unexpected SRA!"); 6975 6976 // Expand into a bunch of logical ops, followed by a select_cc. 6977 SDValue Lo = Op.getOperand(0); 6978 SDValue Hi = Op.getOperand(1); 6979 SDValue Amt = Op.getOperand(2); 6980 EVT AmtVT = Amt.getValueType(); 6981 6982 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 6983 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 6984 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 6985 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 6986 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 6987 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 6988 DAG.getConstant(-BitWidth, dl, AmtVT)); 6989 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 6990 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 6991 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 6992 Tmp4, Tmp6, ISD::SETLE); 6993 SDValue OutOps[] = { OutLo, OutHi }; 6994 return DAG.getMergeValues(OutOps, dl); 6995 } 6996 6997 //===----------------------------------------------------------------------===// 6998 // Vector related lowering. 6999 // 7000 7001 /// BuildSplatI - Build a canonical splati of Val with an element size of 7002 /// SplatSize. Cast the result to VT. 7003 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 7004 SelectionDAG &DAG, const SDLoc &dl) { 7005 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 7006 7007 static const MVT VTys[] = { // canonical VT to use for each size. 7008 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 7009 }; 7010 7011 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 7012 7013 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 7014 if (Val == -1) 7015 SplatSize = 1; 7016 7017 EVT CanonicalVT = VTys[SplatSize-1]; 7018 7019 // Build a canonical splat for this value. 7020 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 7021 } 7022 7023 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 7024 /// specified intrinsic ID. 7025 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 7026 const SDLoc &dl, EVT DestVT = MVT::Other) { 7027 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 7028 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7029 DAG.getConstant(IID, dl, MVT::i32), Op); 7030 } 7031 7032 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 7033 /// specified intrinsic ID. 7034 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 7035 SelectionDAG &DAG, const SDLoc &dl, 7036 EVT DestVT = MVT::Other) { 7037 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 7038 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7039 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 7040 } 7041 7042 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 7043 /// specified intrinsic ID. 7044 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 7045 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 7046 EVT DestVT = MVT::Other) { 7047 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 7048 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 7049 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 7050 } 7051 7052 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 7053 /// amount. The result has the specified value type. 7054 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 7055 SelectionDAG &DAG, const SDLoc &dl) { 7056 // Force LHS/RHS to be the right type. 7057 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 7058 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 7059 7060 int Ops[16]; 7061 for (unsigned i = 0; i != 16; ++i) 7062 Ops[i] = i + Amt; 7063 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 7064 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7065 } 7066 7067 // If this is a case we can't handle, return null and let the default 7068 // expansion code take care of it. If we CAN select this case, and if it 7069 // selects to a single instruction, return Op. Otherwise, if we can codegen 7070 // this case more efficiently than a constant pool load, lower it to the 7071 // sequence of ops that should be used. 7072 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 7073 SelectionDAG &DAG) const { 7074 SDLoc dl(Op); 7075 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7076 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 7077 7078 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 7079 // We first build an i32 vector, load it into a QPX register, 7080 // then convert it to a floating-point vector and compare it 7081 // to a zero vector to get the boolean result. 7082 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7083 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7084 MachinePointerInfo PtrInfo = 7085 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7086 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7087 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7088 7089 assert(BVN->getNumOperands() == 4 && 7090 "BUILD_VECTOR for v4i1 does not have 4 operands"); 7091 7092 bool IsConst = true; 7093 for (unsigned i = 0; i < 4; ++i) { 7094 if (BVN->getOperand(i).isUndef()) continue; 7095 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 7096 IsConst = false; 7097 break; 7098 } 7099 } 7100 7101 if (IsConst) { 7102 Constant *One = 7103 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 7104 Constant *NegOne = 7105 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 7106 7107 Constant *CV[4]; 7108 for (unsigned i = 0; i < 4; ++i) { 7109 if (BVN->getOperand(i).isUndef()) 7110 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 7111 else if (isNullConstant(BVN->getOperand(i))) 7112 CV[i] = NegOne; 7113 else 7114 CV[i] = One; 7115 } 7116 7117 Constant *CP = ConstantVector::get(CV); 7118 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 7119 16 /* alignment */); 7120 7121 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 7122 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 7123 return DAG.getMemIntrinsicNode( 7124 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 7125 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 7126 } 7127 7128 SmallVector<SDValue, 4> Stores; 7129 for (unsigned i = 0; i < 4; ++i) { 7130 if (BVN->getOperand(i).isUndef()) continue; 7131 7132 unsigned Offset = 4*i; 7133 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7134 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7135 7136 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 7137 if (StoreSize > 4) { 7138 Stores.push_back( 7139 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 7140 PtrInfo.getWithOffset(Offset), MVT::i32)); 7141 } else { 7142 SDValue StoreValue = BVN->getOperand(i); 7143 if (StoreSize < 4) 7144 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 7145 7146 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 7147 PtrInfo.getWithOffset(Offset))); 7148 } 7149 } 7150 7151 SDValue StoreChain; 7152 if (!Stores.empty()) 7153 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7154 else 7155 StoreChain = DAG.getEntryNode(); 7156 7157 // Now load from v4i32 into the QPX register; this will extend it to 7158 // v4i64 but not yet convert it to a floating point. Nevertheless, this 7159 // is typed as v4f64 because the QPX register integer states are not 7160 // explicitly represented. 7161 7162 SDValue Ops[] = {StoreChain, 7163 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 7164 FIdx}; 7165 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 7166 7167 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 7168 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7169 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7170 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 7171 LoadedVect); 7172 7173 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 7174 7175 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 7176 } 7177 7178 // All other QPX vectors are handled by generic code. 7179 if (Subtarget.hasQPX()) 7180 return SDValue(); 7181 7182 // Check if this is a splat of a constant value. 7183 APInt APSplatBits, APSplatUndef; 7184 unsigned SplatBitSize; 7185 bool HasAnyUndefs; 7186 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 7187 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 7188 SplatBitSize > 32) 7189 return SDValue(); 7190 7191 unsigned SplatBits = APSplatBits.getZExtValue(); 7192 unsigned SplatUndef = APSplatUndef.getZExtValue(); 7193 unsigned SplatSize = SplatBitSize / 8; 7194 7195 // First, handle single instruction cases. 7196 7197 // All zeros? 7198 if (SplatBits == 0) { 7199 // Canonicalize all zero vectors to be v4i32. 7200 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 7201 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 7202 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 7203 } 7204 return Op; 7205 } 7206 7207 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 7208 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 7209 (32-SplatBitSize)); 7210 if (SextVal >= -16 && SextVal <= 15) 7211 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 7212 7213 // Two instruction sequences. 7214 7215 // If this value is in the range [-32,30] and is even, use: 7216 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 7217 // If this value is in the range [17,31] and is odd, use: 7218 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 7219 // If this value is in the range [-31,-17] and is odd, use: 7220 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 7221 // Note the last two are three-instruction sequences. 7222 if (SextVal >= -32 && SextVal <= 31) { 7223 // To avoid having these optimizations undone by constant folding, 7224 // we convert to a pseudo that will be expanded later into one of 7225 // the above forms. 7226 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 7227 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 7228 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 7229 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 7230 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 7231 if (VT == Op.getValueType()) 7232 return RetVal; 7233 else 7234 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 7235 } 7236 7237 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 7238 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 7239 // for fneg/fabs. 7240 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 7241 // Make -1 and vspltisw -1: 7242 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 7243 7244 // Make the VSLW intrinsic, computing 0x8000_0000. 7245 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 7246 OnesV, DAG, dl); 7247 7248 // xor by OnesV to invert it. 7249 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 7250 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7251 } 7252 7253 // Check to see if this is a wide variety of vsplti*, binop self cases. 7254 static const signed char SplatCsts[] = { 7255 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 7256 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 7257 }; 7258 7259 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 7260 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 7261 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 7262 int i = SplatCsts[idx]; 7263 7264 // Figure out what shift amount will be used by altivec if shifted by i in 7265 // this splat size. 7266 unsigned TypeShiftAmt = i & (SplatBitSize-1); 7267 7268 // vsplti + shl self. 7269 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 7270 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7271 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7272 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 7273 Intrinsic::ppc_altivec_vslw 7274 }; 7275 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7276 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7277 } 7278 7279 // vsplti + srl self. 7280 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7281 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7282 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7283 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 7284 Intrinsic::ppc_altivec_vsrw 7285 }; 7286 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7287 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7288 } 7289 7290 // vsplti + sra self. 7291 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 7292 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7293 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7294 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 7295 Intrinsic::ppc_altivec_vsraw 7296 }; 7297 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7298 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7299 } 7300 7301 // vsplti + rol self. 7302 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 7303 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 7304 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 7305 static const unsigned IIDs[] = { // Intrinsic to use for each size. 7306 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 7307 Intrinsic::ppc_altivec_vrlw 7308 }; 7309 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 7310 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 7311 } 7312 7313 // t = vsplti c, result = vsldoi t, t, 1 7314 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 7315 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7316 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 7317 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7318 } 7319 // t = vsplti c, result = vsldoi t, t, 2 7320 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 7321 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7322 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 7323 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7324 } 7325 // t = vsplti c, result = vsldoi t, t, 3 7326 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 7327 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 7328 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 7329 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 7330 } 7331 } 7332 7333 return SDValue(); 7334 } 7335 7336 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7337 /// the specified operations to build the shuffle. 7338 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7339 SDValue RHS, SelectionDAG &DAG, 7340 const SDLoc &dl) { 7341 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7342 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7343 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7344 7345 enum { 7346 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7347 OP_VMRGHW, 7348 OP_VMRGLW, 7349 OP_VSPLTISW0, 7350 OP_VSPLTISW1, 7351 OP_VSPLTISW2, 7352 OP_VSPLTISW3, 7353 OP_VSLDOI4, 7354 OP_VSLDOI8, 7355 OP_VSLDOI12 7356 }; 7357 7358 if (OpNum == OP_COPY) { 7359 if (LHSID == (1*9+2)*9+3) return LHS; 7360 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7361 return RHS; 7362 } 7363 7364 SDValue OpLHS, OpRHS; 7365 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7366 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7367 7368 int ShufIdxs[16]; 7369 switch (OpNum) { 7370 default: llvm_unreachable("Unknown i32 permute!"); 7371 case OP_VMRGHW: 7372 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 7373 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 7374 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 7375 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 7376 break; 7377 case OP_VMRGLW: 7378 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 7379 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 7380 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 7381 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 7382 break; 7383 case OP_VSPLTISW0: 7384 for (unsigned i = 0; i != 16; ++i) 7385 ShufIdxs[i] = (i&3)+0; 7386 break; 7387 case OP_VSPLTISW1: 7388 for (unsigned i = 0; i != 16; ++i) 7389 ShufIdxs[i] = (i&3)+4; 7390 break; 7391 case OP_VSPLTISW2: 7392 for (unsigned i = 0; i != 16; ++i) 7393 ShufIdxs[i] = (i&3)+8; 7394 break; 7395 case OP_VSPLTISW3: 7396 for (unsigned i = 0; i != 16; ++i) 7397 ShufIdxs[i] = (i&3)+12; 7398 break; 7399 case OP_VSLDOI4: 7400 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 7401 case OP_VSLDOI8: 7402 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 7403 case OP_VSLDOI12: 7404 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 7405 } 7406 EVT VT = OpLHS.getValueType(); 7407 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 7408 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 7409 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 7410 return DAG.getNode(ISD::BITCAST, dl, VT, T); 7411 } 7412 7413 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 7414 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 7415 /// return the code it can be lowered into. Worst case, it can always be 7416 /// lowered into a vperm. 7417 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 7418 SelectionDAG &DAG) const { 7419 SDLoc dl(Op); 7420 SDValue V1 = Op.getOperand(0); 7421 SDValue V2 = Op.getOperand(1); 7422 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7423 EVT VT = Op.getValueType(); 7424 bool isLittleEndian = Subtarget.isLittleEndian(); 7425 7426 unsigned ShiftElts, InsertAtByte; 7427 bool Swap; 7428 if (Subtarget.hasP9Vector() && 7429 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 7430 isLittleEndian)) { 7431 if (Swap) 7432 std::swap(V1, V2); 7433 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7434 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 7435 if (ShiftElts) { 7436 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 7437 DAG.getConstant(ShiftElts, dl, MVT::i32)); 7438 SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Shl, 7439 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 7440 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 7441 } 7442 SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Conv2, 7443 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 7444 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 7445 } 7446 7447 if (Subtarget.hasVSX()) { 7448 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 7449 int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG); 7450 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 7451 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 7452 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7453 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 7454 } 7455 7456 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 7457 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 7458 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 7459 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 7460 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 7461 } 7462 7463 } 7464 7465 if (Subtarget.hasQPX()) { 7466 if (VT.getVectorNumElements() != 4) 7467 return SDValue(); 7468 7469 if (V2.isUndef()) V2 = V1; 7470 7471 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 7472 if (AlignIdx != -1) { 7473 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 7474 DAG.getConstant(AlignIdx, dl, MVT::i32)); 7475 } else if (SVOp->isSplat()) { 7476 int SplatIdx = SVOp->getSplatIndex(); 7477 if (SplatIdx >= 4) { 7478 std::swap(V1, V2); 7479 SplatIdx -= 4; 7480 } 7481 7482 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 7483 DAG.getConstant(SplatIdx, dl, MVT::i32)); 7484 } 7485 7486 // Lower this into a qvgpci/qvfperm pair. 7487 7488 // Compute the qvgpci literal 7489 unsigned idx = 0; 7490 for (unsigned i = 0; i < 4; ++i) { 7491 int m = SVOp->getMaskElt(i); 7492 unsigned mm = m >= 0 ? (unsigned) m : i; 7493 idx |= mm << (3-i)*3; 7494 } 7495 7496 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 7497 DAG.getConstant(idx, dl, MVT::i32)); 7498 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 7499 } 7500 7501 // Cases that are handled by instructions that take permute immediates 7502 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 7503 // selected by the instruction selector. 7504 if (V2.isUndef()) { 7505 if (PPC::isSplatShuffleMask(SVOp, 1) || 7506 PPC::isSplatShuffleMask(SVOp, 2) || 7507 PPC::isSplatShuffleMask(SVOp, 4) || 7508 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 7509 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 7510 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 7511 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 7512 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 7513 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 7514 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 7515 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 7516 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 7517 (Subtarget.hasP8Altivec() && ( 7518 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 7519 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 7520 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 7521 return Op; 7522 } 7523 } 7524 7525 // Altivec has a variety of "shuffle immediates" that take two vector inputs 7526 // and produce a fixed permutation. If any of these match, do not lower to 7527 // VPERM. 7528 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 7529 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 7530 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 7531 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 7532 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7533 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7534 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7535 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 7536 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 7537 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 7538 (Subtarget.hasP8Altivec() && ( 7539 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 7540 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 7541 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 7542 return Op; 7543 7544 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 7545 // perfect shuffle table to emit an optimal matching sequence. 7546 ArrayRef<int> PermMask = SVOp->getMask(); 7547 7548 unsigned PFIndexes[4]; 7549 bool isFourElementShuffle = true; 7550 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 7551 unsigned EltNo = 8; // Start out undef. 7552 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 7553 if (PermMask[i*4+j] < 0) 7554 continue; // Undef, ignore it. 7555 7556 unsigned ByteSource = PermMask[i*4+j]; 7557 if ((ByteSource & 3) != j) { 7558 isFourElementShuffle = false; 7559 break; 7560 } 7561 7562 if (EltNo == 8) { 7563 EltNo = ByteSource/4; 7564 } else if (EltNo != ByteSource/4) { 7565 isFourElementShuffle = false; 7566 break; 7567 } 7568 } 7569 PFIndexes[i] = EltNo; 7570 } 7571 7572 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 7573 // perfect shuffle vector to determine if it is cost effective to do this as 7574 // discrete instructions, or whether we should use a vperm. 7575 // For now, we skip this for little endian until such time as we have a 7576 // little-endian perfect shuffle table. 7577 if (isFourElementShuffle && !isLittleEndian) { 7578 // Compute the index in the perfect shuffle table. 7579 unsigned PFTableIndex = 7580 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7581 7582 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7583 unsigned Cost = (PFEntry >> 30); 7584 7585 // Determining when to avoid vperm is tricky. Many things affect the cost 7586 // of vperm, particularly how many times the perm mask needs to be computed. 7587 // For example, if the perm mask can be hoisted out of a loop or is already 7588 // used (perhaps because there are multiple permutes with the same shuffle 7589 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 7590 // the loop requires an extra register. 7591 // 7592 // As a compromise, we only emit discrete instructions if the shuffle can be 7593 // generated in 3 or fewer operations. When we have loop information 7594 // available, if this block is within a loop, we should avoid using vperm 7595 // for 3-operation perms and use a constant pool load instead. 7596 if (Cost < 3) 7597 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7598 } 7599 7600 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 7601 // vector that will get spilled to the constant pool. 7602 if (V2.isUndef()) V2 = V1; 7603 7604 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 7605 // that it is in input element units, not in bytes. Convert now. 7606 7607 // For little endian, the order of the input vectors is reversed, and 7608 // the permutation mask is complemented with respect to 31. This is 7609 // necessary to produce proper semantics with the big-endian-biased vperm 7610 // instruction. 7611 EVT EltVT = V1.getValueType().getVectorElementType(); 7612 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 7613 7614 SmallVector<SDValue, 16> ResultMask; 7615 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 7616 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 7617 7618 for (unsigned j = 0; j != BytesPerElement; ++j) 7619 if (isLittleEndian) 7620 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 7621 dl, MVT::i32)); 7622 else 7623 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 7624 MVT::i32)); 7625 } 7626 7627 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 7628 if (isLittleEndian) 7629 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7630 V2, V1, VPermMask); 7631 else 7632 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 7633 V1, V2, VPermMask); 7634 } 7635 7636 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 7637 /// vector comparison. If it is, return true and fill in Opc/isDot with 7638 /// information about the intrinsic. 7639 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 7640 bool &isDot, const PPCSubtarget &Subtarget) { 7641 unsigned IntrinsicID = 7642 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 7643 CompareOpc = -1; 7644 isDot = false; 7645 switch (IntrinsicID) { 7646 default: return false; 7647 // Comparison predicates. 7648 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 7649 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 7650 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 7651 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 7652 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 7653 case Intrinsic::ppc_altivec_vcmpequd_p: 7654 if (Subtarget.hasP8Altivec()) { 7655 CompareOpc = 199; 7656 isDot = 1; 7657 } else 7658 return false; 7659 7660 break; 7661 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 7662 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 7663 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 7664 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 7665 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 7666 case Intrinsic::ppc_altivec_vcmpgtsd_p: 7667 if (Subtarget.hasP8Altivec()) { 7668 CompareOpc = 967; 7669 isDot = 1; 7670 } else 7671 return false; 7672 7673 break; 7674 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 7675 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 7676 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 7677 case Intrinsic::ppc_altivec_vcmpgtud_p: 7678 if (Subtarget.hasP8Altivec()) { 7679 CompareOpc = 711; 7680 isDot = 1; 7681 } else 7682 return false; 7683 7684 break; 7685 // VSX predicate comparisons use the same infrastructure 7686 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 7687 case Intrinsic::ppc_vsx_xvcmpgedp_p: 7688 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 7689 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 7690 case Intrinsic::ppc_vsx_xvcmpgesp_p: 7691 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 7692 if (Subtarget.hasVSX()) { 7693 switch (IntrinsicID) { 7694 case Intrinsic::ppc_vsx_xvcmpeqdp_p: CompareOpc = 99; break; 7695 case Intrinsic::ppc_vsx_xvcmpgedp_p: CompareOpc = 115; break; 7696 case Intrinsic::ppc_vsx_xvcmpgtdp_p: CompareOpc = 107; break; 7697 case Intrinsic::ppc_vsx_xvcmpeqsp_p: CompareOpc = 67; break; 7698 case Intrinsic::ppc_vsx_xvcmpgesp_p: CompareOpc = 83; break; 7699 case Intrinsic::ppc_vsx_xvcmpgtsp_p: CompareOpc = 75; break; 7700 } 7701 isDot = 1; 7702 } 7703 else 7704 return false; 7705 7706 break; 7707 7708 // Normal Comparisons. 7709 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 7710 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 7711 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 7712 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 7713 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 7714 case Intrinsic::ppc_altivec_vcmpequd: 7715 if (Subtarget.hasP8Altivec()) { 7716 CompareOpc = 199; 7717 isDot = 0; 7718 } else 7719 return false; 7720 7721 break; 7722 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 7723 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 7724 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 7725 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 7726 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 7727 case Intrinsic::ppc_altivec_vcmpgtsd: 7728 if (Subtarget.hasP8Altivec()) { 7729 CompareOpc = 967; 7730 isDot = 0; 7731 } else 7732 return false; 7733 7734 break; 7735 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 7736 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 7737 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 7738 case Intrinsic::ppc_altivec_vcmpgtud: 7739 if (Subtarget.hasP8Altivec()) { 7740 CompareOpc = 711; 7741 isDot = 0; 7742 } else 7743 return false; 7744 7745 break; 7746 } 7747 return true; 7748 } 7749 7750 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 7751 /// lower, do it, otherwise return null. 7752 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 7753 SelectionDAG &DAG) const { 7754 unsigned IntrinsicID = 7755 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7756 7757 if (IntrinsicID == Intrinsic::thread_pointer) { 7758 // Reads the thread pointer register, used for __builtin_thread_pointer. 7759 bool is64bit = Subtarget.isPPC64(); 7760 return DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 7761 is64bit ? MVT::i64 : MVT::i32); 7762 } 7763 7764 // If this is a lowered altivec predicate compare, CompareOpc is set to the 7765 // opcode number of the comparison. 7766 SDLoc dl(Op); 7767 int CompareOpc; 7768 bool isDot; 7769 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 7770 return SDValue(); // Don't custom lower most intrinsics. 7771 7772 // If this is a non-dot comparison, make the VCMP node and we are done. 7773 if (!isDot) { 7774 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 7775 Op.getOperand(1), Op.getOperand(2), 7776 DAG.getConstant(CompareOpc, dl, MVT::i32)); 7777 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 7778 } 7779 7780 // Create the PPCISD altivec 'dot' comparison node. 7781 SDValue Ops[] = { 7782 Op.getOperand(2), // LHS 7783 Op.getOperand(3), // RHS 7784 DAG.getConstant(CompareOpc, dl, MVT::i32) 7785 }; 7786 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 7787 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 7788 7789 // Now that we have the comparison, emit a copy from the CR to a GPR. 7790 // This is flagged to the above dot comparison. 7791 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 7792 DAG.getRegister(PPC::CR6, MVT::i32), 7793 CompNode.getValue(1)); 7794 7795 // Unpack the result based on how the target uses it. 7796 unsigned BitNo; // Bit # of CR6. 7797 bool InvertBit; // Invert result? 7798 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 7799 default: // Can't happen, don't crash on invalid number though. 7800 case 0: // Return the value of the EQ bit of CR6. 7801 BitNo = 0; InvertBit = false; 7802 break; 7803 case 1: // Return the inverted value of the EQ bit of CR6. 7804 BitNo = 0; InvertBit = true; 7805 break; 7806 case 2: // Return the value of the LT bit of CR6. 7807 BitNo = 2; InvertBit = false; 7808 break; 7809 case 3: // Return the inverted value of the LT bit of CR6. 7810 BitNo = 2; InvertBit = true; 7811 break; 7812 } 7813 7814 // Shift the bit into the low position. 7815 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 7816 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 7817 // Isolate the bit. 7818 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 7819 DAG.getConstant(1, dl, MVT::i32)); 7820 7821 // If we are supposed to, toggle the bit. 7822 if (InvertBit) 7823 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 7824 DAG.getConstant(1, dl, MVT::i32)); 7825 return Flags; 7826 } 7827 7828 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 7829 SelectionDAG &DAG) const { 7830 SDLoc dl(Op); 7831 // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int 7832 // instructions), but for smaller types, we need to first extend up to v2i32 7833 // before doing going farther. 7834 if (Op.getValueType() == MVT::v2i64) { 7835 EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 7836 if (ExtVT != MVT::v2i32) { 7837 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)); 7838 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op, 7839 DAG.getValueType(EVT::getVectorVT(*DAG.getContext(), 7840 ExtVT.getVectorElementType(), 4))); 7841 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op); 7842 Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op, 7843 DAG.getValueType(MVT::v2i32)); 7844 } 7845 7846 return Op; 7847 } 7848 7849 return SDValue(); 7850 } 7851 7852 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 7853 SelectionDAG &DAG) const { 7854 SDLoc dl(Op); 7855 // Create a stack slot that is 16-byte aligned. 7856 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7857 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7858 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7859 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7860 7861 // Store the input value into Value#0 of the stack slot. 7862 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 7863 MachinePointerInfo()); 7864 // Load it out. 7865 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 7866 } 7867 7868 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7869 SelectionDAG &DAG) const { 7870 SDLoc dl(Op); 7871 SDNode *N = Op.getNode(); 7872 7873 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 7874 "Unknown extract_vector_elt type"); 7875 7876 SDValue Value = N->getOperand(0); 7877 7878 // The first part of this is like the store lowering except that we don't 7879 // need to track the chain. 7880 7881 // The values are now known to be -1 (false) or 1 (true). To convert this 7882 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 7883 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 7884 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 7885 7886 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 7887 // understand how to form the extending load. 7888 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 7889 7890 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 7891 7892 // Now convert to an integer and store. 7893 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 7894 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 7895 Value); 7896 7897 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7898 int FrameIdx = MFI.CreateStackObject(16, 16, false); 7899 MachinePointerInfo PtrInfo = 7900 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 7901 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7902 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 7903 7904 SDValue StoreChain = DAG.getEntryNode(); 7905 SDValue Ops[] = {StoreChain, 7906 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 7907 Value, FIdx}; 7908 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 7909 7910 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 7911 dl, VTs, Ops, MVT::v4i32, PtrInfo); 7912 7913 // Extract the value requested. 7914 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7915 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 7916 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 7917 7918 SDValue IntVal = 7919 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 7920 7921 if (!Subtarget.useCRBits()) 7922 return IntVal; 7923 7924 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 7925 } 7926 7927 /// Lowering for QPX v4i1 loads 7928 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 7929 SelectionDAG &DAG) const { 7930 SDLoc dl(Op); 7931 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 7932 SDValue LoadChain = LN->getChain(); 7933 SDValue BasePtr = LN->getBasePtr(); 7934 7935 if (Op.getValueType() == MVT::v4f64 || 7936 Op.getValueType() == MVT::v4f32) { 7937 EVT MemVT = LN->getMemoryVT(); 7938 unsigned Alignment = LN->getAlignment(); 7939 7940 // If this load is properly aligned, then it is legal. 7941 if (Alignment >= MemVT.getStoreSize()) 7942 return Op; 7943 7944 EVT ScalarVT = Op.getValueType().getScalarType(), 7945 ScalarMemVT = MemVT.getScalarType(); 7946 unsigned Stride = ScalarMemVT.getStoreSize(); 7947 7948 SDValue Vals[4], LoadChains[4]; 7949 for (unsigned Idx = 0; Idx < 4; ++Idx) { 7950 SDValue Load; 7951 if (ScalarVT != ScalarMemVT) 7952 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 7953 BasePtr, 7954 LN->getPointerInfo().getWithOffset(Idx * Stride), 7955 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 7956 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 7957 else 7958 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 7959 LN->getPointerInfo().getWithOffset(Idx * Stride), 7960 MinAlign(Alignment, Idx * Stride), 7961 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 7962 7963 if (Idx == 0 && LN->isIndexed()) { 7964 assert(LN->getAddressingMode() == ISD::PRE_INC && 7965 "Unknown addressing mode on vector load"); 7966 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 7967 LN->getAddressingMode()); 7968 } 7969 7970 Vals[Idx] = Load; 7971 LoadChains[Idx] = Load.getValue(1); 7972 7973 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 7974 DAG.getConstant(Stride, dl, 7975 BasePtr.getValueType())); 7976 } 7977 7978 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 7979 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 7980 7981 if (LN->isIndexed()) { 7982 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 7983 return DAG.getMergeValues(RetOps, dl); 7984 } 7985 7986 SDValue RetOps[] = { Value, TF }; 7987 return DAG.getMergeValues(RetOps, dl); 7988 } 7989 7990 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 7991 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 7992 7993 // To lower v4i1 from a byte array, we load the byte elements of the 7994 // vector and then reuse the BUILD_VECTOR logic. 7995 7996 SDValue VectElmts[4], VectElmtChains[4]; 7997 for (unsigned i = 0; i < 4; ++i) { 7998 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 7999 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8000 8001 VectElmts[i] = DAG.getExtLoad( 8002 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 8003 LN->getPointerInfo().getWithOffset(i), MVT::i8, 8004 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 8005 VectElmtChains[i] = VectElmts[i].getValue(1); 8006 } 8007 8008 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 8009 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 8010 8011 SDValue RVals[] = { Value, LoadChain }; 8012 return DAG.getMergeValues(RVals, dl); 8013 } 8014 8015 /// Lowering for QPX v4i1 stores 8016 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 8017 SelectionDAG &DAG) const { 8018 SDLoc dl(Op); 8019 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 8020 SDValue StoreChain = SN->getChain(); 8021 SDValue BasePtr = SN->getBasePtr(); 8022 SDValue Value = SN->getValue(); 8023 8024 if (Value.getValueType() == MVT::v4f64 || 8025 Value.getValueType() == MVT::v4f32) { 8026 EVT MemVT = SN->getMemoryVT(); 8027 unsigned Alignment = SN->getAlignment(); 8028 8029 // If this store is properly aligned, then it is legal. 8030 if (Alignment >= MemVT.getStoreSize()) 8031 return Op; 8032 8033 EVT ScalarVT = Value.getValueType().getScalarType(), 8034 ScalarMemVT = MemVT.getScalarType(); 8035 unsigned Stride = ScalarMemVT.getStoreSize(); 8036 8037 SDValue Stores[4]; 8038 for (unsigned Idx = 0; Idx < 4; ++Idx) { 8039 SDValue Ex = DAG.getNode( 8040 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 8041 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 8042 SDValue Store; 8043 if (ScalarVT != ScalarMemVT) 8044 Store = 8045 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 8046 SN->getPointerInfo().getWithOffset(Idx * Stride), 8047 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 8048 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 8049 else 8050 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 8051 SN->getPointerInfo().getWithOffset(Idx * Stride), 8052 MinAlign(Alignment, Idx * Stride), 8053 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 8054 8055 if (Idx == 0 && SN->isIndexed()) { 8056 assert(SN->getAddressingMode() == ISD::PRE_INC && 8057 "Unknown addressing mode on vector store"); 8058 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 8059 SN->getAddressingMode()); 8060 } 8061 8062 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 8063 DAG.getConstant(Stride, dl, 8064 BasePtr.getValueType())); 8065 Stores[Idx] = Store; 8066 } 8067 8068 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8069 8070 if (SN->isIndexed()) { 8071 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 8072 return DAG.getMergeValues(RetOps, dl); 8073 } 8074 8075 return TF; 8076 } 8077 8078 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 8079 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 8080 8081 // The values are now known to be -1 (false) or 1 (true). To convert this 8082 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8083 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8084 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8085 8086 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 8087 // understand how to form the extending load. 8088 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8089 8090 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8091 8092 // Now convert to an integer and store. 8093 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8094 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 8095 Value); 8096 8097 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8098 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8099 MachinePointerInfo PtrInfo = 8100 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8101 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8102 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8103 8104 SDValue Ops[] = {StoreChain, 8105 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 8106 Value, FIdx}; 8107 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 8108 8109 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 8110 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8111 8112 // Move data into the byte array. 8113 SDValue Loads[4], LoadChains[4]; 8114 for (unsigned i = 0; i < 4; ++i) { 8115 unsigned Offset = 4*i; 8116 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8117 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8118 8119 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 8120 PtrInfo.getWithOffset(Offset)); 8121 LoadChains[i] = Loads[i].getValue(1); 8122 } 8123 8124 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 8125 8126 SDValue Stores[4]; 8127 for (unsigned i = 0; i < 4; ++i) { 8128 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 8129 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 8130 8131 Stores[i] = DAG.getTruncStore( 8132 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 8133 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 8134 SN->getAAInfo()); 8135 } 8136 8137 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8138 8139 return StoreChain; 8140 } 8141 8142 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 8143 SDLoc dl(Op); 8144 if (Op.getValueType() == MVT::v4i32) { 8145 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8146 8147 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 8148 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 8149 8150 SDValue RHSSwap = // = vrlw RHS, 16 8151 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 8152 8153 // Shrinkify inputs to v8i16. 8154 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 8155 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 8156 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 8157 8158 // Low parts multiplied together, generating 32-bit results (we ignore the 8159 // top parts). 8160 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 8161 LHS, RHS, DAG, dl, MVT::v4i32); 8162 8163 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 8164 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 8165 // Shift the high parts up 16 bits. 8166 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 8167 Neg16, DAG, dl); 8168 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 8169 } else if (Op.getValueType() == MVT::v8i16) { 8170 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8171 8172 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 8173 8174 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 8175 LHS, RHS, Zero, DAG, dl); 8176 } else if (Op.getValueType() == MVT::v16i8) { 8177 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 8178 bool isLittleEndian = Subtarget.isLittleEndian(); 8179 8180 // Multiply the even 8-bit parts, producing 16-bit sums. 8181 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 8182 LHS, RHS, DAG, dl, MVT::v8i16); 8183 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 8184 8185 // Multiply the odd 8-bit parts, producing 16-bit sums. 8186 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 8187 LHS, RHS, DAG, dl, MVT::v8i16); 8188 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 8189 8190 // Merge the results together. Because vmuleub and vmuloub are 8191 // instructions with a big-endian bias, we must reverse the 8192 // element numbering and reverse the meaning of "odd" and "even" 8193 // when generating little endian code. 8194 int Ops[16]; 8195 for (unsigned i = 0; i != 8; ++i) { 8196 if (isLittleEndian) { 8197 Ops[i*2 ] = 2*i; 8198 Ops[i*2+1] = 2*i+16; 8199 } else { 8200 Ops[i*2 ] = 2*i+1; 8201 Ops[i*2+1] = 2*i+1+16; 8202 } 8203 } 8204 if (isLittleEndian) 8205 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 8206 else 8207 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 8208 } else { 8209 llvm_unreachable("Unknown mul to lower!"); 8210 } 8211 } 8212 8213 /// LowerOperation - Provide custom lowering hooks for some operations. 8214 /// 8215 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8216 switch (Op.getOpcode()) { 8217 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 8218 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 8219 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 8220 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 8221 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 8222 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 8223 case ISD::SETCC: return LowerSETCC(Op, DAG); 8224 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 8225 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 8226 case ISD::VASTART: 8227 return LowerVASTART(Op, DAG); 8228 8229 case ISD::VAARG: 8230 return LowerVAARG(Op, DAG); 8231 8232 case ISD::VACOPY: 8233 return LowerVACOPY(Op, DAG); 8234 8235 case ISD::STACKRESTORE: 8236 return LowerSTACKRESTORE(Op, DAG); 8237 8238 case ISD::DYNAMIC_STACKALLOC: 8239 return LowerDYNAMIC_STACKALLOC(Op, DAG); 8240 8241 case ISD::GET_DYNAMIC_AREA_OFFSET: 8242 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 8243 8244 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 8245 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 8246 8247 case ISD::LOAD: return LowerLOAD(Op, DAG); 8248 case ISD::STORE: return LowerSTORE(Op, DAG); 8249 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 8250 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 8251 case ISD::FP_TO_UINT: 8252 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 8253 SDLoc(Op)); 8254 case ISD::UINT_TO_FP: 8255 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 8256 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 8257 8258 // Lower 64-bit shifts. 8259 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 8260 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 8261 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 8262 8263 // Vector-related lowering. 8264 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 8265 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 8266 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 8267 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 8268 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 8269 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 8270 case ISD::MUL: return LowerMUL(Op, DAG); 8271 8272 // For counter-based loop handling. 8273 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 8274 8275 // Frame & Return address. 8276 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 8277 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 8278 } 8279 } 8280 8281 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 8282 SmallVectorImpl<SDValue>&Results, 8283 SelectionDAG &DAG) const { 8284 SDLoc dl(N); 8285 switch (N->getOpcode()) { 8286 default: 8287 llvm_unreachable("Do not know how to custom type legalize this operation!"); 8288 case ISD::READCYCLECOUNTER: { 8289 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 8290 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 8291 8292 Results.push_back(RTB); 8293 Results.push_back(RTB.getValue(1)); 8294 Results.push_back(RTB.getValue(2)); 8295 break; 8296 } 8297 case ISD::INTRINSIC_W_CHAIN: { 8298 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 8299 Intrinsic::ppc_is_decremented_ctr_nonzero) 8300 break; 8301 8302 assert(N->getValueType(0) == MVT::i1 && 8303 "Unexpected result type for CTR decrement intrinsic"); 8304 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 8305 N->getValueType(0)); 8306 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 8307 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 8308 N->getOperand(1)); 8309 8310 Results.push_back(NewInt); 8311 Results.push_back(NewInt.getValue(1)); 8312 break; 8313 } 8314 case ISD::VAARG: { 8315 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 8316 return; 8317 8318 EVT VT = N->getValueType(0); 8319 8320 if (VT == MVT::i64) { 8321 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 8322 8323 Results.push_back(NewNode); 8324 Results.push_back(NewNode.getValue(1)); 8325 } 8326 return; 8327 } 8328 case ISD::FP_ROUND_INREG: { 8329 assert(N->getValueType(0) == MVT::ppcf128); 8330 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 8331 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8332 MVT::f64, N->getOperand(0), 8333 DAG.getIntPtrConstant(0, dl)); 8334 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 8335 MVT::f64, N->getOperand(0), 8336 DAG.getIntPtrConstant(1, dl)); 8337 8338 // Add the two halves of the long double in round-to-zero mode. 8339 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 8340 8341 // We know the low half is about to be thrown away, so just use something 8342 // convenient. 8343 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 8344 FPreg, FPreg)); 8345 return; 8346 } 8347 case ISD::FP_TO_SINT: 8348 case ISD::FP_TO_UINT: 8349 // LowerFP_TO_INT() can only handle f32 and f64. 8350 if (N->getOperand(0).getValueType() == MVT::ppcf128) 8351 return; 8352 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 8353 return; 8354 } 8355 } 8356 8357 //===----------------------------------------------------------------------===// 8358 // Other Lowering Code 8359 //===----------------------------------------------------------------------===// 8360 8361 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 8362 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 8363 Function *Func = Intrinsic::getDeclaration(M, Id); 8364 return Builder.CreateCall(Func, {}); 8365 } 8366 8367 // The mappings for emitLeading/TrailingFence is taken from 8368 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 8369 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 8370 AtomicOrdering Ord, bool IsStore, 8371 bool IsLoad) const { 8372 if (Ord == AtomicOrdering::SequentiallyConsistent) 8373 return callIntrinsic(Builder, Intrinsic::ppc_sync); 8374 if (isReleaseOrStronger(Ord)) 8375 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8376 return nullptr; 8377 } 8378 8379 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 8380 AtomicOrdering Ord, bool IsStore, 8381 bool IsLoad) const { 8382 if (IsLoad && isAcquireOrStronger(Ord)) 8383 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 8384 // FIXME: this is too conservative, a dependent branch + isync is enough. 8385 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 8386 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 8387 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 8388 return nullptr; 8389 } 8390 8391 MachineBasicBlock * 8392 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 8393 unsigned AtomicSize, 8394 unsigned BinOpcode) const { 8395 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8396 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8397 8398 auto LoadMnemonic = PPC::LDARX; 8399 auto StoreMnemonic = PPC::STDCX; 8400 switch (AtomicSize) { 8401 default: 8402 llvm_unreachable("Unexpected size of atomic entity"); 8403 case 1: 8404 LoadMnemonic = PPC::LBARX; 8405 StoreMnemonic = PPC::STBCX; 8406 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8407 break; 8408 case 2: 8409 LoadMnemonic = PPC::LHARX; 8410 StoreMnemonic = PPC::STHCX; 8411 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 8412 break; 8413 case 4: 8414 LoadMnemonic = PPC::LWARX; 8415 StoreMnemonic = PPC::STWCX; 8416 break; 8417 case 8: 8418 LoadMnemonic = PPC::LDARX; 8419 StoreMnemonic = PPC::STDCX; 8420 break; 8421 } 8422 8423 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8424 MachineFunction *F = BB->getParent(); 8425 MachineFunction::iterator It = ++BB->getIterator(); 8426 8427 unsigned dest = MI.getOperand(0).getReg(); 8428 unsigned ptrA = MI.getOperand(1).getReg(); 8429 unsigned ptrB = MI.getOperand(2).getReg(); 8430 unsigned incr = MI.getOperand(3).getReg(); 8431 DebugLoc dl = MI.getDebugLoc(); 8432 8433 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8434 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8435 F->insert(It, loopMBB); 8436 F->insert(It, exitMBB); 8437 exitMBB->splice(exitMBB->begin(), BB, 8438 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8439 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8440 8441 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8442 unsigned TmpReg = (!BinOpcode) ? incr : 8443 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 8444 : &PPC::GPRCRegClass); 8445 8446 // thisMBB: 8447 // ... 8448 // fallthrough --> loopMBB 8449 BB->addSuccessor(loopMBB); 8450 8451 // loopMBB: 8452 // l[wd]arx dest, ptr 8453 // add r0, dest, incr 8454 // st[wd]cx. r0, ptr 8455 // bne- loopMBB 8456 // fallthrough --> exitMBB 8457 BB = loopMBB; 8458 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 8459 .addReg(ptrA).addReg(ptrB); 8460 if (BinOpcode) 8461 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 8462 BuildMI(BB, dl, TII->get(StoreMnemonic)) 8463 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 8464 BuildMI(BB, dl, TII->get(PPC::BCC)) 8465 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8466 BB->addSuccessor(loopMBB); 8467 BB->addSuccessor(exitMBB); 8468 8469 // exitMBB: 8470 // ... 8471 BB = exitMBB; 8472 return BB; 8473 } 8474 8475 MachineBasicBlock * 8476 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI, 8477 MachineBasicBlock *BB, 8478 bool is8bit, // operation 8479 unsigned BinOpcode) const { 8480 // If we support part-word atomic mnemonics, just use them 8481 if (Subtarget.hasPartwordAtomics()) 8482 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode); 8483 8484 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 8485 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8486 // In 64 bit mode we have to use 64 bits for addresses, even though the 8487 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 8488 // registers without caring whether they're 32 or 64, but here we're 8489 // doing actual arithmetic on the addresses. 8490 bool is64bit = Subtarget.isPPC64(); 8491 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 8492 8493 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8494 MachineFunction *F = BB->getParent(); 8495 MachineFunction::iterator It = ++BB->getIterator(); 8496 8497 unsigned dest = MI.getOperand(0).getReg(); 8498 unsigned ptrA = MI.getOperand(1).getReg(); 8499 unsigned ptrB = MI.getOperand(2).getReg(); 8500 unsigned incr = MI.getOperand(3).getReg(); 8501 DebugLoc dl = MI.getDebugLoc(); 8502 8503 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 8504 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 8505 F->insert(It, loopMBB); 8506 F->insert(It, exitMBB); 8507 exitMBB->splice(exitMBB->begin(), BB, 8508 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8509 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8510 8511 MachineRegisterInfo &RegInfo = F->getRegInfo(); 8512 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 8513 : &PPC::GPRCRegClass; 8514 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 8515 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 8516 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 8517 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 8518 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 8519 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 8520 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 8521 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 8522 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 8523 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 8524 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 8525 unsigned Ptr1Reg; 8526 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 8527 8528 // thisMBB: 8529 // ... 8530 // fallthrough --> loopMBB 8531 BB->addSuccessor(loopMBB); 8532 8533 // The 4-byte load must be aligned, while a char or short may be 8534 // anywhere in the word. Hence all this nasty bookkeeping code. 8535 // add ptr1, ptrA, ptrB [copy if ptrA==0] 8536 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 8537 // xori shift, shift1, 24 [16] 8538 // rlwinm ptr, ptr1, 0, 0, 29 8539 // slw incr2, incr, shift 8540 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 8541 // slw mask, mask2, shift 8542 // loopMBB: 8543 // lwarx tmpDest, ptr 8544 // add tmp, tmpDest, incr2 8545 // andc tmp2, tmpDest, mask 8546 // and tmp3, tmp, mask 8547 // or tmp4, tmp3, tmp2 8548 // stwcx. tmp4, ptr 8549 // bne- loopMBB 8550 // fallthrough --> exitMBB 8551 // srw dest, tmpDest, shift 8552 if (ptrA != ZeroReg) { 8553 Ptr1Reg = RegInfo.createVirtualRegister(RC); 8554 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 8555 .addReg(ptrA).addReg(ptrB); 8556 } else { 8557 Ptr1Reg = ptrB; 8558 } 8559 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 8560 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 8561 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 8562 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 8563 if (is64bit) 8564 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 8565 .addReg(Ptr1Reg).addImm(0).addImm(61); 8566 else 8567 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 8568 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 8569 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 8570 .addReg(incr).addReg(ShiftReg); 8571 if (is8bit) 8572 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 8573 else { 8574 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 8575 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 8576 } 8577 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 8578 .addReg(Mask2Reg).addReg(ShiftReg); 8579 8580 BB = loopMBB; 8581 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 8582 .addReg(ZeroReg).addReg(PtrReg); 8583 if (BinOpcode) 8584 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 8585 .addReg(Incr2Reg).addReg(TmpDestReg); 8586 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 8587 .addReg(TmpDestReg).addReg(MaskReg); 8588 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 8589 .addReg(TmpReg).addReg(MaskReg); 8590 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 8591 .addReg(Tmp3Reg).addReg(Tmp2Reg); 8592 BuildMI(BB, dl, TII->get(PPC::STWCX)) 8593 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 8594 BuildMI(BB, dl, TII->get(PPC::BCC)) 8595 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 8596 BB->addSuccessor(loopMBB); 8597 BB->addSuccessor(exitMBB); 8598 8599 // exitMBB: 8600 // ... 8601 BB = exitMBB; 8602 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 8603 .addReg(ShiftReg); 8604 return BB; 8605 } 8606 8607 llvm::MachineBasicBlock * 8608 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 8609 MachineBasicBlock *MBB) const { 8610 DebugLoc DL = MI.getDebugLoc(); 8611 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8612 8613 MachineFunction *MF = MBB->getParent(); 8614 MachineRegisterInfo &MRI = MF->getRegInfo(); 8615 8616 const BasicBlock *BB = MBB->getBasicBlock(); 8617 MachineFunction::iterator I = ++MBB->getIterator(); 8618 8619 // Memory Reference 8620 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 8621 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 8622 8623 unsigned DstReg = MI.getOperand(0).getReg(); 8624 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 8625 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 8626 unsigned mainDstReg = MRI.createVirtualRegister(RC); 8627 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 8628 8629 MVT PVT = getPointerTy(MF->getDataLayout()); 8630 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8631 "Invalid Pointer Size!"); 8632 // For v = setjmp(buf), we generate 8633 // 8634 // thisMBB: 8635 // SjLjSetup mainMBB 8636 // bl mainMBB 8637 // v_restore = 1 8638 // b sinkMBB 8639 // 8640 // mainMBB: 8641 // buf[LabelOffset] = LR 8642 // v_main = 0 8643 // 8644 // sinkMBB: 8645 // v = phi(main, restore) 8646 // 8647 8648 MachineBasicBlock *thisMBB = MBB; 8649 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 8650 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 8651 MF->insert(I, mainMBB); 8652 MF->insert(I, sinkMBB); 8653 8654 MachineInstrBuilder MIB; 8655 8656 // Transfer the remainder of BB and its successor edges to sinkMBB. 8657 sinkMBB->splice(sinkMBB->begin(), MBB, 8658 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8659 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 8660 8661 // Note that the structure of the jmp_buf used here is not compatible 8662 // with that used by libc, and is not designed to be. Specifically, it 8663 // stores only those 'reserved' registers that LLVM does not otherwise 8664 // understand how to spill. Also, by convention, by the time this 8665 // intrinsic is called, Clang has already stored the frame address in the 8666 // first slot of the buffer and stack address in the third. Following the 8667 // X86 target code, we'll store the jump address in the second slot. We also 8668 // need to save the TOC pointer (R2) to handle jumps between shared 8669 // libraries, and that will be stored in the fourth slot. The thread 8670 // identifier (R13) is not affected. 8671 8672 // thisMBB: 8673 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8674 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8675 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8676 8677 // Prepare IP either in reg. 8678 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 8679 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 8680 unsigned BufReg = MI.getOperand(1).getReg(); 8681 8682 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { 8683 setUsesTOCBasePtr(*MBB->getParent()); 8684 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 8685 .addReg(PPC::X2) 8686 .addImm(TOCOffset) 8687 .addReg(BufReg); 8688 MIB.setMemRefs(MMOBegin, MMOEnd); 8689 } 8690 8691 // Naked functions never have a base pointer, and so we use r1. For all 8692 // other functions, this decision must be delayed until during PEI. 8693 unsigned BaseReg; 8694 if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) 8695 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 8696 else 8697 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 8698 8699 MIB = BuildMI(*thisMBB, MI, DL, 8700 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 8701 .addReg(BaseReg) 8702 .addImm(BPOffset) 8703 .addReg(BufReg); 8704 MIB.setMemRefs(MMOBegin, MMOEnd); 8705 8706 // Setup 8707 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 8708 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 8709 MIB.addRegMask(TRI->getNoPreservedMask()); 8710 8711 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 8712 8713 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 8714 .addMBB(mainMBB); 8715 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 8716 8717 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 8718 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 8719 8720 // mainMBB: 8721 // mainDstReg = 0 8722 MIB = 8723 BuildMI(mainMBB, DL, 8724 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 8725 8726 // Store IP 8727 if (Subtarget.isPPC64()) { 8728 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 8729 .addReg(LabelReg) 8730 .addImm(LabelOffset) 8731 .addReg(BufReg); 8732 } else { 8733 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 8734 .addReg(LabelReg) 8735 .addImm(LabelOffset) 8736 .addReg(BufReg); 8737 } 8738 8739 MIB.setMemRefs(MMOBegin, MMOEnd); 8740 8741 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 8742 mainMBB->addSuccessor(sinkMBB); 8743 8744 // sinkMBB: 8745 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 8746 TII->get(PPC::PHI), DstReg) 8747 .addReg(mainDstReg).addMBB(mainMBB) 8748 .addReg(restoreDstReg).addMBB(thisMBB); 8749 8750 MI.eraseFromParent(); 8751 return sinkMBB; 8752 } 8753 8754 MachineBasicBlock * 8755 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 8756 MachineBasicBlock *MBB) const { 8757 DebugLoc DL = MI.getDebugLoc(); 8758 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8759 8760 MachineFunction *MF = MBB->getParent(); 8761 MachineRegisterInfo &MRI = MF->getRegInfo(); 8762 8763 // Memory Reference 8764 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); 8765 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); 8766 8767 MVT PVT = getPointerTy(MF->getDataLayout()); 8768 assert((PVT == MVT::i64 || PVT == MVT::i32) && 8769 "Invalid Pointer Size!"); 8770 8771 const TargetRegisterClass *RC = 8772 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 8773 unsigned Tmp = MRI.createVirtualRegister(RC); 8774 // Since FP is only updated here but NOT referenced, it's treated as GPR. 8775 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 8776 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 8777 unsigned BP = 8778 (PVT == MVT::i64) 8779 ? PPC::X30 8780 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 8781 : PPC::R30); 8782 8783 MachineInstrBuilder MIB; 8784 8785 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 8786 const int64_t SPOffset = 2 * PVT.getStoreSize(); 8787 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 8788 const int64_t BPOffset = 4 * PVT.getStoreSize(); 8789 8790 unsigned BufReg = MI.getOperand(0).getReg(); 8791 8792 // Reload FP (the jumped-to function may not have had a 8793 // frame pointer, and if so, then its r31 will be restored 8794 // as necessary). 8795 if (PVT == MVT::i64) { 8796 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 8797 .addImm(0) 8798 .addReg(BufReg); 8799 } else { 8800 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 8801 .addImm(0) 8802 .addReg(BufReg); 8803 } 8804 MIB.setMemRefs(MMOBegin, MMOEnd); 8805 8806 // Reload IP 8807 if (PVT == MVT::i64) { 8808 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 8809 .addImm(LabelOffset) 8810 .addReg(BufReg); 8811 } else { 8812 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 8813 .addImm(LabelOffset) 8814 .addReg(BufReg); 8815 } 8816 MIB.setMemRefs(MMOBegin, MMOEnd); 8817 8818 // Reload SP 8819 if (PVT == MVT::i64) { 8820 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 8821 .addImm(SPOffset) 8822 .addReg(BufReg); 8823 } else { 8824 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 8825 .addImm(SPOffset) 8826 .addReg(BufReg); 8827 } 8828 MIB.setMemRefs(MMOBegin, MMOEnd); 8829 8830 // Reload BP 8831 if (PVT == MVT::i64) { 8832 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 8833 .addImm(BPOffset) 8834 .addReg(BufReg); 8835 } else { 8836 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 8837 .addImm(BPOffset) 8838 .addReg(BufReg); 8839 } 8840 MIB.setMemRefs(MMOBegin, MMOEnd); 8841 8842 // Reload TOC 8843 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 8844 setUsesTOCBasePtr(*MBB->getParent()); 8845 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 8846 .addImm(TOCOffset) 8847 .addReg(BufReg); 8848 8849 MIB.setMemRefs(MMOBegin, MMOEnd); 8850 } 8851 8852 // Jump 8853 BuildMI(*MBB, MI, DL, 8854 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 8855 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 8856 8857 MI.eraseFromParent(); 8858 return MBB; 8859 } 8860 8861 MachineBasicBlock * 8862 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 8863 MachineBasicBlock *BB) const { 8864 if (MI.getOpcode() == TargetOpcode::STACKMAP || 8865 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 8866 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && 8867 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 8868 // Call lowering should have added an r2 operand to indicate a dependence 8869 // on the TOC base pointer value. It can't however, because there is no 8870 // way to mark the dependence as implicit there, and so the stackmap code 8871 // will confuse it with a regular operand. Instead, add the dependence 8872 // here. 8873 setUsesTOCBasePtr(*BB->getParent()); 8874 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 8875 } 8876 8877 return emitPatchPoint(MI, BB); 8878 } 8879 8880 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 8881 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 8882 return emitEHSjLjSetJmp(MI, BB); 8883 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 8884 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 8885 return emitEHSjLjLongJmp(MI, BB); 8886 } 8887 8888 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 8889 8890 // To "insert" these instructions we actually have to insert their 8891 // control-flow patterns. 8892 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8893 MachineFunction::iterator It = ++BB->getIterator(); 8894 8895 MachineFunction *F = BB->getParent(); 8896 8897 if (Subtarget.hasISEL() && 8898 (MI.getOpcode() == PPC::SELECT_CC_I4 || 8899 MI.getOpcode() == PPC::SELECT_CC_I8 || 8900 MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8)) { 8901 SmallVector<MachineOperand, 2> Cond; 8902 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 8903 MI.getOpcode() == PPC::SELECT_CC_I8) 8904 Cond.push_back(MI.getOperand(4)); 8905 else 8906 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 8907 Cond.push_back(MI.getOperand(1)); 8908 8909 DebugLoc dl = MI.getDebugLoc(); 8910 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 8911 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 8912 } else if (MI.getOpcode() == PPC::SELECT_CC_I4 || 8913 MI.getOpcode() == PPC::SELECT_CC_I8 || 8914 MI.getOpcode() == PPC::SELECT_CC_F4 || 8915 MI.getOpcode() == PPC::SELECT_CC_F8 || 8916 MI.getOpcode() == PPC::SELECT_CC_QFRC || 8917 MI.getOpcode() == PPC::SELECT_CC_QSRC || 8918 MI.getOpcode() == PPC::SELECT_CC_QBRC || 8919 MI.getOpcode() == PPC::SELECT_CC_VRRC || 8920 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 8921 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 8922 MI.getOpcode() == PPC::SELECT_CC_VSRC || 8923 MI.getOpcode() == PPC::SELECT_I4 || 8924 MI.getOpcode() == PPC::SELECT_I8 || 8925 MI.getOpcode() == PPC::SELECT_F4 || 8926 MI.getOpcode() == PPC::SELECT_F8 || 8927 MI.getOpcode() == PPC::SELECT_QFRC || 8928 MI.getOpcode() == PPC::SELECT_QSRC || 8929 MI.getOpcode() == PPC::SELECT_QBRC || 8930 MI.getOpcode() == PPC::SELECT_VRRC || 8931 MI.getOpcode() == PPC::SELECT_VSFRC || 8932 MI.getOpcode() == PPC::SELECT_VSSRC || 8933 MI.getOpcode() == PPC::SELECT_VSRC) { 8934 // The incoming instruction knows the destination vreg to set, the 8935 // condition code register to branch on, the true/false values to 8936 // select between, and a branch opcode to use. 8937 8938 // thisMBB: 8939 // ... 8940 // TrueVal = ... 8941 // cmpTY ccX, r1, r2 8942 // bCC copy1MBB 8943 // fallthrough --> copy0MBB 8944 MachineBasicBlock *thisMBB = BB; 8945 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8946 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8947 DebugLoc dl = MI.getDebugLoc(); 8948 F->insert(It, copy0MBB); 8949 F->insert(It, sinkMBB); 8950 8951 // Transfer the remainder of BB and its successor edges to sinkMBB. 8952 sinkMBB->splice(sinkMBB->begin(), BB, 8953 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8954 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8955 8956 // Next, add the true and fallthrough blocks as its successors. 8957 BB->addSuccessor(copy0MBB); 8958 BB->addSuccessor(sinkMBB); 8959 8960 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 8961 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 8962 MI.getOpcode() == PPC::SELECT_QFRC || 8963 MI.getOpcode() == PPC::SELECT_QSRC || 8964 MI.getOpcode() == PPC::SELECT_QBRC || 8965 MI.getOpcode() == PPC::SELECT_VRRC || 8966 MI.getOpcode() == PPC::SELECT_VSFRC || 8967 MI.getOpcode() == PPC::SELECT_VSSRC || 8968 MI.getOpcode() == PPC::SELECT_VSRC) { 8969 BuildMI(BB, dl, TII->get(PPC::BC)) 8970 .addReg(MI.getOperand(1).getReg()) 8971 .addMBB(sinkMBB); 8972 } else { 8973 unsigned SelectPred = MI.getOperand(4).getImm(); 8974 BuildMI(BB, dl, TII->get(PPC::BCC)) 8975 .addImm(SelectPred) 8976 .addReg(MI.getOperand(1).getReg()) 8977 .addMBB(sinkMBB); 8978 } 8979 8980 // copy0MBB: 8981 // %FalseValue = ... 8982 // # fallthrough to sinkMBB 8983 BB = copy0MBB; 8984 8985 // Update machine-CFG edges 8986 BB->addSuccessor(sinkMBB); 8987 8988 // sinkMBB: 8989 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8990 // ... 8991 BB = sinkMBB; 8992 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 8993 .addReg(MI.getOperand(3).getReg()) 8994 .addMBB(copy0MBB) 8995 .addReg(MI.getOperand(2).getReg()) 8996 .addMBB(thisMBB); 8997 } else if (MI.getOpcode() == PPC::ReadTB) { 8998 // To read the 64-bit time-base register on a 32-bit target, we read the 8999 // two halves. Should the counter have wrapped while it was being read, we 9000 // need to try again. 9001 // ... 9002 // readLoop: 9003 // mfspr Rx,TBU # load from TBU 9004 // mfspr Ry,TB # load from TB 9005 // mfspr Rz,TBU # load from TBU 9006 // cmpw crX,Rx,Rz # check if 'old'='new' 9007 // bne readLoop # branch if they're not equal 9008 // ... 9009 9010 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 9011 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9012 DebugLoc dl = MI.getDebugLoc(); 9013 F->insert(It, readMBB); 9014 F->insert(It, sinkMBB); 9015 9016 // Transfer the remainder of BB and its successor edges to sinkMBB. 9017 sinkMBB->splice(sinkMBB->begin(), BB, 9018 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9019 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9020 9021 BB->addSuccessor(readMBB); 9022 BB = readMBB; 9023 9024 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9025 unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 9026 unsigned LoReg = MI.getOperand(0).getReg(); 9027 unsigned HiReg = MI.getOperand(1).getReg(); 9028 9029 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 9030 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 9031 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 9032 9033 unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9034 9035 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 9036 .addReg(HiReg).addReg(ReadAgainReg); 9037 BuildMI(BB, dl, TII->get(PPC::BCC)) 9038 .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB); 9039 9040 BB->addSuccessor(readMBB); 9041 BB->addSuccessor(sinkMBB); 9042 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 9043 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 9044 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 9045 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 9046 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 9047 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 9048 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 9049 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 9050 9051 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 9052 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 9053 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 9054 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 9055 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 9056 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 9057 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 9058 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 9059 9060 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 9061 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 9062 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 9063 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 9064 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 9065 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 9066 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 9067 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 9068 9069 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 9070 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 9071 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 9072 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 9073 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 9074 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 9075 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 9076 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 9077 9078 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 9079 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 9080 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 9081 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 9082 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 9083 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 9084 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 9085 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 9086 9087 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 9088 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 9089 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 9090 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 9091 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 9092 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 9093 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 9094 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 9095 9096 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 9097 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 9098 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 9099 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 9100 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 9101 BB = EmitAtomicBinary(MI, BB, 4, 0); 9102 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 9103 BB = EmitAtomicBinary(MI, BB, 8, 0); 9104 9105 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 9106 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 9107 (Subtarget.hasPartwordAtomics() && 9108 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 9109 (Subtarget.hasPartwordAtomics() && 9110 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 9111 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 9112 9113 auto LoadMnemonic = PPC::LDARX; 9114 auto StoreMnemonic = PPC::STDCX; 9115 switch (MI.getOpcode()) { 9116 default: 9117 llvm_unreachable("Compare and swap of unknown size"); 9118 case PPC::ATOMIC_CMP_SWAP_I8: 9119 LoadMnemonic = PPC::LBARX; 9120 StoreMnemonic = PPC::STBCX; 9121 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9122 break; 9123 case PPC::ATOMIC_CMP_SWAP_I16: 9124 LoadMnemonic = PPC::LHARX; 9125 StoreMnemonic = PPC::STHCX; 9126 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 9127 break; 9128 case PPC::ATOMIC_CMP_SWAP_I32: 9129 LoadMnemonic = PPC::LWARX; 9130 StoreMnemonic = PPC::STWCX; 9131 break; 9132 case PPC::ATOMIC_CMP_SWAP_I64: 9133 LoadMnemonic = PPC::LDARX; 9134 StoreMnemonic = PPC::STDCX; 9135 break; 9136 } 9137 unsigned dest = MI.getOperand(0).getReg(); 9138 unsigned ptrA = MI.getOperand(1).getReg(); 9139 unsigned ptrB = MI.getOperand(2).getReg(); 9140 unsigned oldval = MI.getOperand(3).getReg(); 9141 unsigned newval = MI.getOperand(4).getReg(); 9142 DebugLoc dl = MI.getDebugLoc(); 9143 9144 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9145 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9146 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9147 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9148 F->insert(It, loop1MBB); 9149 F->insert(It, loop2MBB); 9150 F->insert(It, midMBB); 9151 F->insert(It, exitMBB); 9152 exitMBB->splice(exitMBB->begin(), BB, 9153 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9154 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9155 9156 // thisMBB: 9157 // ... 9158 // fallthrough --> loopMBB 9159 BB->addSuccessor(loop1MBB); 9160 9161 // loop1MBB: 9162 // l[bhwd]arx dest, ptr 9163 // cmp[wd] dest, oldval 9164 // bne- midMBB 9165 // loop2MBB: 9166 // st[bhwd]cx. newval, ptr 9167 // bne- loopMBB 9168 // b exitBB 9169 // midMBB: 9170 // st[bhwd]cx. dest, ptr 9171 // exitBB: 9172 BB = loop1MBB; 9173 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 9174 .addReg(ptrA).addReg(ptrB); 9175 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 9176 .addReg(oldval).addReg(dest); 9177 BuildMI(BB, dl, TII->get(PPC::BCC)) 9178 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9179 BB->addSuccessor(loop2MBB); 9180 BB->addSuccessor(midMBB); 9181 9182 BB = loop2MBB; 9183 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9184 .addReg(newval).addReg(ptrA).addReg(ptrB); 9185 BuildMI(BB, dl, TII->get(PPC::BCC)) 9186 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9187 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9188 BB->addSuccessor(loop1MBB); 9189 BB->addSuccessor(exitMBB); 9190 9191 BB = midMBB; 9192 BuildMI(BB, dl, TII->get(StoreMnemonic)) 9193 .addReg(dest).addReg(ptrA).addReg(ptrB); 9194 BB->addSuccessor(exitMBB); 9195 9196 // exitMBB: 9197 // ... 9198 BB = exitMBB; 9199 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 9200 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 9201 // We must use 64-bit registers for addresses when targeting 64-bit, 9202 // since we're actually doing arithmetic on them. Other registers 9203 // can be 32-bit. 9204 bool is64bit = Subtarget.isPPC64(); 9205 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 9206 9207 unsigned dest = MI.getOperand(0).getReg(); 9208 unsigned ptrA = MI.getOperand(1).getReg(); 9209 unsigned ptrB = MI.getOperand(2).getReg(); 9210 unsigned oldval = MI.getOperand(3).getReg(); 9211 unsigned newval = MI.getOperand(4).getReg(); 9212 DebugLoc dl = MI.getDebugLoc(); 9213 9214 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 9215 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 9216 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 9217 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 9218 F->insert(It, loop1MBB); 9219 F->insert(It, loop2MBB); 9220 F->insert(It, midMBB); 9221 F->insert(It, exitMBB); 9222 exitMBB->splice(exitMBB->begin(), BB, 9223 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9224 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 9225 9226 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9227 const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass 9228 : &PPC::GPRCRegClass; 9229 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 9230 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 9231 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 9232 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 9233 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 9234 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 9235 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 9236 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 9237 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 9238 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 9239 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 9240 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 9241 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 9242 unsigned Ptr1Reg; 9243 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 9244 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 9245 // thisMBB: 9246 // ... 9247 // fallthrough --> loopMBB 9248 BB->addSuccessor(loop1MBB); 9249 9250 // The 4-byte load must be aligned, while a char or short may be 9251 // anywhere in the word. Hence all this nasty bookkeeping code. 9252 // add ptr1, ptrA, ptrB [copy if ptrA==0] 9253 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 9254 // xori shift, shift1, 24 [16] 9255 // rlwinm ptr, ptr1, 0, 0, 29 9256 // slw newval2, newval, shift 9257 // slw oldval2, oldval,shift 9258 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 9259 // slw mask, mask2, shift 9260 // and newval3, newval2, mask 9261 // and oldval3, oldval2, mask 9262 // loop1MBB: 9263 // lwarx tmpDest, ptr 9264 // and tmp, tmpDest, mask 9265 // cmpw tmp, oldval3 9266 // bne- midMBB 9267 // loop2MBB: 9268 // andc tmp2, tmpDest, mask 9269 // or tmp4, tmp2, newval3 9270 // stwcx. tmp4, ptr 9271 // bne- loop1MBB 9272 // b exitBB 9273 // midMBB: 9274 // stwcx. tmpDest, ptr 9275 // exitBB: 9276 // srw dest, tmpDest, shift 9277 if (ptrA != ZeroReg) { 9278 Ptr1Reg = RegInfo.createVirtualRegister(RC); 9279 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 9280 .addReg(ptrA).addReg(ptrB); 9281 } else { 9282 Ptr1Reg = ptrB; 9283 } 9284 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 9285 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 9286 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 9287 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 9288 if (is64bit) 9289 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 9290 .addReg(Ptr1Reg).addImm(0).addImm(61); 9291 else 9292 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 9293 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 9294 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 9295 .addReg(newval).addReg(ShiftReg); 9296 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 9297 .addReg(oldval).addReg(ShiftReg); 9298 if (is8bit) 9299 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 9300 else { 9301 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 9302 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 9303 .addReg(Mask3Reg).addImm(65535); 9304 } 9305 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 9306 .addReg(Mask2Reg).addReg(ShiftReg); 9307 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 9308 .addReg(NewVal2Reg).addReg(MaskReg); 9309 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 9310 .addReg(OldVal2Reg).addReg(MaskReg); 9311 9312 BB = loop1MBB; 9313 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 9314 .addReg(ZeroReg).addReg(PtrReg); 9315 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 9316 .addReg(TmpDestReg).addReg(MaskReg); 9317 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 9318 .addReg(TmpReg).addReg(OldVal3Reg); 9319 BuildMI(BB, dl, TII->get(PPC::BCC)) 9320 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 9321 BB->addSuccessor(loop2MBB); 9322 BB->addSuccessor(midMBB); 9323 9324 BB = loop2MBB; 9325 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 9326 .addReg(TmpDestReg).addReg(MaskReg); 9327 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 9328 .addReg(Tmp2Reg).addReg(NewVal3Reg); 9329 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 9330 .addReg(ZeroReg).addReg(PtrReg); 9331 BuildMI(BB, dl, TII->get(PPC::BCC)) 9332 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 9333 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 9334 BB->addSuccessor(loop1MBB); 9335 BB->addSuccessor(exitMBB); 9336 9337 BB = midMBB; 9338 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 9339 .addReg(ZeroReg).addReg(PtrReg); 9340 BB->addSuccessor(exitMBB); 9341 9342 // exitMBB: 9343 // ... 9344 BB = exitMBB; 9345 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 9346 .addReg(ShiftReg); 9347 } else if (MI.getOpcode() == PPC::FADDrtz) { 9348 // This pseudo performs an FADD with rounding mode temporarily forced 9349 // to round-to-zero. We emit this via custom inserter since the FPSCR 9350 // is not modeled at the SelectionDAG level. 9351 unsigned Dest = MI.getOperand(0).getReg(); 9352 unsigned Src1 = MI.getOperand(1).getReg(); 9353 unsigned Src2 = MI.getOperand(2).getReg(); 9354 DebugLoc dl = MI.getDebugLoc(); 9355 9356 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9357 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 9358 9359 // Save FPSCR value. 9360 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 9361 9362 // Set rounding mode to round-to-zero. 9363 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 9364 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 9365 9366 // Perform addition. 9367 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 9368 9369 // Restore FPSCR value. 9370 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 9371 } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 9372 MI.getOpcode() == PPC::ANDIo_1_GT_BIT || 9373 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9374 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) { 9375 unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 || 9376 MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) 9377 ? PPC::ANDIo8 9378 : PPC::ANDIo; 9379 bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT || 9380 MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); 9381 9382 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9383 unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ? 9384 &PPC::GPRCRegClass : 9385 &PPC::G8RCRegClass); 9386 9387 DebugLoc dl = MI.getDebugLoc(); 9388 BuildMI(*BB, MI, dl, TII->get(Opcode), Dest) 9389 .addReg(MI.getOperand(1).getReg()) 9390 .addImm(1); 9391 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), 9392 MI.getOperand(0).getReg()) 9393 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT); 9394 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 9395 DebugLoc Dl = MI.getDebugLoc(); 9396 MachineRegisterInfo &RegInfo = F->getRegInfo(); 9397 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 9398 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 9399 return BB; 9400 } else { 9401 llvm_unreachable("Unexpected instr type to insert"); 9402 } 9403 9404 MI.eraseFromParent(); // The pseudo instruction is gone now. 9405 return BB; 9406 } 9407 9408 //===----------------------------------------------------------------------===// 9409 // Target Optimization Hooks 9410 //===----------------------------------------------------------------------===// 9411 9412 static std::string getRecipOp(const char *Base, EVT VT) { 9413 std::string RecipOp(Base); 9414 if (VT.getScalarType() == MVT::f64) 9415 RecipOp += "d"; 9416 else 9417 RecipOp += "f"; 9418 9419 if (VT.isVector()) 9420 RecipOp = "vec-" + RecipOp; 9421 9422 return RecipOp; 9423 } 9424 9425 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand, 9426 DAGCombinerInfo &DCI, 9427 unsigned &RefinementSteps, 9428 bool &UseOneConstNR) const { 9429 EVT VT = Operand.getValueType(); 9430 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 9431 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 9432 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9433 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9434 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9435 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9436 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9437 std::string RecipOp = getRecipOp("sqrt", VT); 9438 if (!Recips.isEnabled(RecipOp)) 9439 return SDValue(); 9440 9441 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9442 UseOneConstNR = true; 9443 return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 9444 } 9445 return SDValue(); 9446 } 9447 9448 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, 9449 DAGCombinerInfo &DCI, 9450 unsigned &RefinementSteps) const { 9451 EVT VT = Operand.getValueType(); 9452 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 9453 (VT == MVT::f64 && Subtarget.hasFRE()) || 9454 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 9455 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 9456 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 9457 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 9458 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals; 9459 std::string RecipOp = getRecipOp("div", VT); 9460 if (!Recips.isEnabled(RecipOp)) 9461 return SDValue(); 9462 9463 RefinementSteps = Recips.getRefinementSteps(RecipOp); 9464 return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 9465 } 9466 return SDValue(); 9467 } 9468 9469 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 9470 // Note: This functionality is used only when unsafe-fp-math is enabled, and 9471 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 9472 // enabled for division), this functionality is redundant with the default 9473 // combiner logic (once the division -> reciprocal/multiply transformation 9474 // has taken place). As a result, this matters more for older cores than for 9475 // newer ones. 9476 9477 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9478 // reciprocal if there are two or more FDIVs (for embedded cores with only 9479 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 9480 switch (Subtarget.getDarwinDirective()) { 9481 default: 9482 return 3; 9483 case PPC::DIR_440: 9484 case PPC::DIR_A2: 9485 case PPC::DIR_E500mc: 9486 case PPC::DIR_E5500: 9487 return 2; 9488 } 9489 } 9490 9491 // isConsecutiveLSLoc needs to work even if all adds have not yet been 9492 // collapsed, and so we need to look through chains of them. 9493 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 9494 int64_t& Offset, SelectionDAG &DAG) { 9495 if (DAG.isBaseWithConstantOffset(Loc)) { 9496 Base = Loc.getOperand(0); 9497 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 9498 9499 // The base might itself be a base plus an offset, and if so, accumulate 9500 // that as well. 9501 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 9502 } 9503 } 9504 9505 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 9506 unsigned Bytes, int Dist, 9507 SelectionDAG &DAG) { 9508 if (VT.getSizeInBits() / 8 != Bytes) 9509 return false; 9510 9511 SDValue BaseLoc = Base->getBasePtr(); 9512 if (Loc.getOpcode() == ISD::FrameIndex) { 9513 if (BaseLoc.getOpcode() != ISD::FrameIndex) 9514 return false; 9515 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9516 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 9517 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 9518 int FS = MFI.getObjectSize(FI); 9519 int BFS = MFI.getObjectSize(BFI); 9520 if (FS != BFS || FS != (int)Bytes) return false; 9521 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 9522 } 9523 9524 SDValue Base1 = Loc, Base2 = BaseLoc; 9525 int64_t Offset1 = 0, Offset2 = 0; 9526 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 9527 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 9528 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 9529 return true; 9530 9531 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9532 const GlobalValue *GV1 = nullptr; 9533 const GlobalValue *GV2 = nullptr; 9534 Offset1 = 0; 9535 Offset2 = 0; 9536 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 9537 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 9538 if (isGA1 && isGA2 && GV1 == GV2) 9539 return Offset1 == (Offset2 + Dist*Bytes); 9540 return false; 9541 } 9542 9543 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 9544 // not enforce equality of the chain operands. 9545 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 9546 unsigned Bytes, int Dist, 9547 SelectionDAG &DAG) { 9548 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 9549 EVT VT = LS->getMemoryVT(); 9550 SDValue Loc = LS->getBasePtr(); 9551 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 9552 } 9553 9554 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 9555 EVT VT; 9556 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9557 default: return false; 9558 case Intrinsic::ppc_qpx_qvlfd: 9559 case Intrinsic::ppc_qpx_qvlfda: 9560 VT = MVT::v4f64; 9561 break; 9562 case Intrinsic::ppc_qpx_qvlfs: 9563 case Intrinsic::ppc_qpx_qvlfsa: 9564 VT = MVT::v4f32; 9565 break; 9566 case Intrinsic::ppc_qpx_qvlfcd: 9567 case Intrinsic::ppc_qpx_qvlfcda: 9568 VT = MVT::v2f64; 9569 break; 9570 case Intrinsic::ppc_qpx_qvlfcs: 9571 case Intrinsic::ppc_qpx_qvlfcsa: 9572 VT = MVT::v2f32; 9573 break; 9574 case Intrinsic::ppc_qpx_qvlfiwa: 9575 case Intrinsic::ppc_qpx_qvlfiwz: 9576 case Intrinsic::ppc_altivec_lvx: 9577 case Intrinsic::ppc_altivec_lvxl: 9578 case Intrinsic::ppc_vsx_lxvw4x: 9579 VT = MVT::v4i32; 9580 break; 9581 case Intrinsic::ppc_vsx_lxvd2x: 9582 VT = MVT::v2f64; 9583 break; 9584 case Intrinsic::ppc_altivec_lvebx: 9585 VT = MVT::i8; 9586 break; 9587 case Intrinsic::ppc_altivec_lvehx: 9588 VT = MVT::i16; 9589 break; 9590 case Intrinsic::ppc_altivec_lvewx: 9591 VT = MVT::i32; 9592 break; 9593 } 9594 9595 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 9596 } 9597 9598 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 9599 EVT VT; 9600 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9601 default: return false; 9602 case Intrinsic::ppc_qpx_qvstfd: 9603 case Intrinsic::ppc_qpx_qvstfda: 9604 VT = MVT::v4f64; 9605 break; 9606 case Intrinsic::ppc_qpx_qvstfs: 9607 case Intrinsic::ppc_qpx_qvstfsa: 9608 VT = MVT::v4f32; 9609 break; 9610 case Intrinsic::ppc_qpx_qvstfcd: 9611 case Intrinsic::ppc_qpx_qvstfcda: 9612 VT = MVT::v2f64; 9613 break; 9614 case Intrinsic::ppc_qpx_qvstfcs: 9615 case Intrinsic::ppc_qpx_qvstfcsa: 9616 VT = MVT::v2f32; 9617 break; 9618 case Intrinsic::ppc_qpx_qvstfiw: 9619 case Intrinsic::ppc_qpx_qvstfiwa: 9620 case Intrinsic::ppc_altivec_stvx: 9621 case Intrinsic::ppc_altivec_stvxl: 9622 case Intrinsic::ppc_vsx_stxvw4x: 9623 VT = MVT::v4i32; 9624 break; 9625 case Intrinsic::ppc_vsx_stxvd2x: 9626 VT = MVT::v2f64; 9627 break; 9628 case Intrinsic::ppc_altivec_stvebx: 9629 VT = MVT::i8; 9630 break; 9631 case Intrinsic::ppc_altivec_stvehx: 9632 VT = MVT::i16; 9633 break; 9634 case Intrinsic::ppc_altivec_stvewx: 9635 VT = MVT::i32; 9636 break; 9637 } 9638 9639 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 9640 } 9641 9642 return false; 9643 } 9644 9645 // Return true is there is a nearyby consecutive load to the one provided 9646 // (regardless of alignment). We search up and down the chain, looking though 9647 // token factors and other loads (but nothing else). As a result, a true result 9648 // indicates that it is safe to create a new consecutive load adjacent to the 9649 // load provided. 9650 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 9651 SDValue Chain = LD->getChain(); 9652 EVT VT = LD->getMemoryVT(); 9653 9654 SmallSet<SDNode *, 16> LoadRoots; 9655 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 9656 SmallSet<SDNode *, 16> Visited; 9657 9658 // First, search up the chain, branching to follow all token-factor operands. 9659 // If we find a consecutive load, then we're done, otherwise, record all 9660 // nodes just above the top-level loads and token factors. 9661 while (!Queue.empty()) { 9662 SDNode *ChainNext = Queue.pop_back_val(); 9663 if (!Visited.insert(ChainNext).second) 9664 continue; 9665 9666 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 9667 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9668 return true; 9669 9670 if (!Visited.count(ChainLD->getChain().getNode())) 9671 Queue.push_back(ChainLD->getChain().getNode()); 9672 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 9673 for (const SDUse &O : ChainNext->ops()) 9674 if (!Visited.count(O.getNode())) 9675 Queue.push_back(O.getNode()); 9676 } else 9677 LoadRoots.insert(ChainNext); 9678 } 9679 9680 // Second, search down the chain, starting from the top-level nodes recorded 9681 // in the first phase. These top-level nodes are the nodes just above all 9682 // loads and token factors. Starting with their uses, recursively look though 9683 // all loads (just the chain uses) and token factors to find a consecutive 9684 // load. 9685 Visited.clear(); 9686 Queue.clear(); 9687 9688 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 9689 IE = LoadRoots.end(); I != IE; ++I) { 9690 Queue.push_back(*I); 9691 9692 while (!Queue.empty()) { 9693 SDNode *LoadRoot = Queue.pop_back_val(); 9694 if (!Visited.insert(LoadRoot).second) 9695 continue; 9696 9697 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 9698 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 9699 return true; 9700 9701 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 9702 UE = LoadRoot->use_end(); UI != UE; ++UI) 9703 if (((isa<MemSDNode>(*UI) && 9704 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 9705 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 9706 Queue.push_back(*UI); 9707 } 9708 } 9709 9710 return false; 9711 } 9712 9713 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 9714 DAGCombinerInfo &DCI) const { 9715 SelectionDAG &DAG = DCI.DAG; 9716 SDLoc dl(N); 9717 9718 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 9719 // If we're tracking CR bits, we need to be careful that we don't have: 9720 // trunc(binary-ops(zext(x), zext(y))) 9721 // or 9722 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 9723 // such that we're unnecessarily moving things into GPRs when it would be 9724 // better to keep them in CR bits. 9725 9726 // Note that trunc here can be an actual i1 trunc, or can be the effective 9727 // truncation that comes from a setcc or select_cc. 9728 if (N->getOpcode() == ISD::TRUNCATE && 9729 N->getValueType(0) != MVT::i1) 9730 return SDValue(); 9731 9732 if (N->getOperand(0).getValueType() != MVT::i32 && 9733 N->getOperand(0).getValueType() != MVT::i64) 9734 return SDValue(); 9735 9736 if (N->getOpcode() == ISD::SETCC || 9737 N->getOpcode() == ISD::SELECT_CC) { 9738 // If we're looking at a comparison, then we need to make sure that the 9739 // high bits (all except for the first) don't matter the result. 9740 ISD::CondCode CC = 9741 cast<CondCodeSDNode>(N->getOperand( 9742 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 9743 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 9744 9745 if (ISD::isSignedIntSetCC(CC)) { 9746 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 9747 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 9748 return SDValue(); 9749 } else if (ISD::isUnsignedIntSetCC(CC)) { 9750 if (!DAG.MaskedValueIsZero(N->getOperand(0), 9751 APInt::getHighBitsSet(OpBits, OpBits-1)) || 9752 !DAG.MaskedValueIsZero(N->getOperand(1), 9753 APInt::getHighBitsSet(OpBits, OpBits-1))) 9754 return SDValue(); 9755 } else { 9756 // This is neither a signed nor an unsigned comparison, just make sure 9757 // that the high bits are equal. 9758 APInt Op1Zero, Op1One; 9759 APInt Op2Zero, Op2One; 9760 DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One); 9761 DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One); 9762 9763 // We don't really care about what is known about the first bit (if 9764 // anything), so clear it in all masks prior to comparing them. 9765 Op1Zero.clearBit(0); Op1One.clearBit(0); 9766 Op2Zero.clearBit(0); Op2One.clearBit(0); 9767 9768 if (Op1Zero != Op2Zero || Op1One != Op2One) 9769 return SDValue(); 9770 } 9771 } 9772 9773 // We now know that the higher-order bits are irrelevant, we just need to 9774 // make sure that all of the intermediate operations are bit operations, and 9775 // all inputs are extensions. 9776 if (N->getOperand(0).getOpcode() != ISD::AND && 9777 N->getOperand(0).getOpcode() != ISD::OR && 9778 N->getOperand(0).getOpcode() != ISD::XOR && 9779 N->getOperand(0).getOpcode() != ISD::SELECT && 9780 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 9781 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 9782 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 9783 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 9784 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 9785 return SDValue(); 9786 9787 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 9788 N->getOperand(1).getOpcode() != ISD::AND && 9789 N->getOperand(1).getOpcode() != ISD::OR && 9790 N->getOperand(1).getOpcode() != ISD::XOR && 9791 N->getOperand(1).getOpcode() != ISD::SELECT && 9792 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 9793 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 9794 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 9795 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 9796 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 9797 return SDValue(); 9798 9799 SmallVector<SDValue, 4> Inputs; 9800 SmallVector<SDValue, 8> BinOps, PromOps; 9801 SmallPtrSet<SDNode *, 16> Visited; 9802 9803 for (unsigned i = 0; i < 2; ++i) { 9804 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9805 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9806 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9807 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9808 isa<ConstantSDNode>(N->getOperand(i))) 9809 Inputs.push_back(N->getOperand(i)); 9810 else 9811 BinOps.push_back(N->getOperand(i)); 9812 9813 if (N->getOpcode() == ISD::TRUNCATE) 9814 break; 9815 } 9816 9817 // Visit all inputs, collect all binary operations (and, or, xor and 9818 // select) that are all fed by extensions. 9819 while (!BinOps.empty()) { 9820 SDValue BinOp = BinOps.back(); 9821 BinOps.pop_back(); 9822 9823 if (!Visited.insert(BinOp.getNode()).second) 9824 continue; 9825 9826 PromOps.push_back(BinOp); 9827 9828 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 9829 // The condition of the select is not promoted. 9830 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 9831 continue; 9832 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 9833 continue; 9834 9835 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9836 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9837 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 9838 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 9839 isa<ConstantSDNode>(BinOp.getOperand(i))) { 9840 Inputs.push_back(BinOp.getOperand(i)); 9841 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 9842 BinOp.getOperand(i).getOpcode() == ISD::OR || 9843 BinOp.getOperand(i).getOpcode() == ISD::XOR || 9844 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 9845 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 9846 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 9847 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 9848 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 9849 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 9850 BinOps.push_back(BinOp.getOperand(i)); 9851 } else { 9852 // We have an input that is not an extension or another binary 9853 // operation; we'll abort this transformation. 9854 return SDValue(); 9855 } 9856 } 9857 } 9858 9859 // Make sure that this is a self-contained cluster of operations (which 9860 // is not quite the same thing as saying that everything has only one 9861 // use). 9862 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9863 if (isa<ConstantSDNode>(Inputs[i])) 9864 continue; 9865 9866 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 9867 UE = Inputs[i].getNode()->use_end(); 9868 UI != UE; ++UI) { 9869 SDNode *User = *UI; 9870 if (User != N && !Visited.count(User)) 9871 return SDValue(); 9872 9873 // Make sure that we're not going to promote the non-output-value 9874 // operand(s) or SELECT or SELECT_CC. 9875 // FIXME: Although we could sometimes handle this, and it does occur in 9876 // practice that one of the condition inputs to the select is also one of 9877 // the outputs, we currently can't deal with this. 9878 if (User->getOpcode() == ISD::SELECT) { 9879 if (User->getOperand(0) == Inputs[i]) 9880 return SDValue(); 9881 } else if (User->getOpcode() == ISD::SELECT_CC) { 9882 if (User->getOperand(0) == Inputs[i] || 9883 User->getOperand(1) == Inputs[i]) 9884 return SDValue(); 9885 } 9886 } 9887 } 9888 9889 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 9890 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 9891 UE = PromOps[i].getNode()->use_end(); 9892 UI != UE; ++UI) { 9893 SDNode *User = *UI; 9894 if (User != N && !Visited.count(User)) 9895 return SDValue(); 9896 9897 // Make sure that we're not going to promote the non-output-value 9898 // operand(s) or SELECT or SELECT_CC. 9899 // FIXME: Although we could sometimes handle this, and it does occur in 9900 // practice that one of the condition inputs to the select is also one of 9901 // the outputs, we currently can't deal with this. 9902 if (User->getOpcode() == ISD::SELECT) { 9903 if (User->getOperand(0) == PromOps[i]) 9904 return SDValue(); 9905 } else if (User->getOpcode() == ISD::SELECT_CC) { 9906 if (User->getOperand(0) == PromOps[i] || 9907 User->getOperand(1) == PromOps[i]) 9908 return SDValue(); 9909 } 9910 } 9911 } 9912 9913 // Replace all inputs with the extension operand. 9914 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 9915 // Constants may have users outside the cluster of to-be-promoted nodes, 9916 // and so we need to replace those as we do the promotions. 9917 if (isa<ConstantSDNode>(Inputs[i])) 9918 continue; 9919 else 9920 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 9921 } 9922 9923 std::list<HandleSDNode> PromOpHandles; 9924 for (auto &PromOp : PromOps) 9925 PromOpHandles.emplace_back(PromOp); 9926 9927 // Replace all operations (these are all the same, but have a different 9928 // (i1) return type). DAG.getNode will validate that the types of 9929 // a binary operator match, so go through the list in reverse so that 9930 // we've likely promoted both operands first. Any intermediate truncations or 9931 // extensions disappear. 9932 while (!PromOpHandles.empty()) { 9933 SDValue PromOp = PromOpHandles.back().getValue(); 9934 PromOpHandles.pop_back(); 9935 9936 if (PromOp.getOpcode() == ISD::TRUNCATE || 9937 PromOp.getOpcode() == ISD::SIGN_EXTEND || 9938 PromOp.getOpcode() == ISD::ZERO_EXTEND || 9939 PromOp.getOpcode() == ISD::ANY_EXTEND) { 9940 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 9941 PromOp.getOperand(0).getValueType() != MVT::i1) { 9942 // The operand is not yet ready (see comment below). 9943 PromOpHandles.emplace_front(PromOp); 9944 continue; 9945 } 9946 9947 SDValue RepValue = PromOp.getOperand(0); 9948 if (isa<ConstantSDNode>(RepValue)) 9949 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 9950 9951 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 9952 continue; 9953 } 9954 9955 unsigned C; 9956 switch (PromOp.getOpcode()) { 9957 default: C = 0; break; 9958 case ISD::SELECT: C = 1; break; 9959 case ISD::SELECT_CC: C = 2; break; 9960 } 9961 9962 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 9963 PromOp.getOperand(C).getValueType() != MVT::i1) || 9964 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 9965 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 9966 // The to-be-promoted operands of this node have not yet been 9967 // promoted (this should be rare because we're going through the 9968 // list backward, but if one of the operands has several users in 9969 // this cluster of to-be-promoted nodes, it is possible). 9970 PromOpHandles.emplace_front(PromOp); 9971 continue; 9972 } 9973 9974 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 9975 PromOp.getNode()->op_end()); 9976 9977 // If there are any constant inputs, make sure they're replaced now. 9978 for (unsigned i = 0; i < 2; ++i) 9979 if (isa<ConstantSDNode>(Ops[C+i])) 9980 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 9981 9982 DAG.ReplaceAllUsesOfValueWith(PromOp, 9983 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 9984 } 9985 9986 // Now we're left with the initial truncation itself. 9987 if (N->getOpcode() == ISD::TRUNCATE) 9988 return N->getOperand(0); 9989 9990 // Otherwise, this is a comparison. The operands to be compared have just 9991 // changed type (to i1), but everything else is the same. 9992 return SDValue(N, 0); 9993 } 9994 9995 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 9996 DAGCombinerInfo &DCI) const { 9997 SelectionDAG &DAG = DCI.DAG; 9998 SDLoc dl(N); 9999 10000 // If we're tracking CR bits, we need to be careful that we don't have: 10001 // zext(binary-ops(trunc(x), trunc(y))) 10002 // or 10003 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 10004 // such that we're unnecessarily moving things into CR bits that can more 10005 // efficiently stay in GPRs. Note that if we're not certain that the high 10006 // bits are set as required by the final extension, we still may need to do 10007 // some masking to get the proper behavior. 10008 10009 // This same functionality is important on PPC64 when dealing with 10010 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 10011 // the return values of functions. Because it is so similar, it is handled 10012 // here as well. 10013 10014 if (N->getValueType(0) != MVT::i32 && 10015 N->getValueType(0) != MVT::i64) 10016 return SDValue(); 10017 10018 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 10019 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 10020 return SDValue(); 10021 10022 if (N->getOperand(0).getOpcode() != ISD::AND && 10023 N->getOperand(0).getOpcode() != ISD::OR && 10024 N->getOperand(0).getOpcode() != ISD::XOR && 10025 N->getOperand(0).getOpcode() != ISD::SELECT && 10026 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 10027 return SDValue(); 10028 10029 SmallVector<SDValue, 4> Inputs; 10030 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 10031 SmallPtrSet<SDNode *, 16> Visited; 10032 10033 // Visit all inputs, collect all binary operations (and, or, xor and 10034 // select) that are all fed by truncations. 10035 while (!BinOps.empty()) { 10036 SDValue BinOp = BinOps.back(); 10037 BinOps.pop_back(); 10038 10039 if (!Visited.insert(BinOp.getNode()).second) 10040 continue; 10041 10042 PromOps.push_back(BinOp); 10043 10044 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 10045 // The condition of the select is not promoted. 10046 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 10047 continue; 10048 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 10049 continue; 10050 10051 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 10052 isa<ConstantSDNode>(BinOp.getOperand(i))) { 10053 Inputs.push_back(BinOp.getOperand(i)); 10054 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 10055 BinOp.getOperand(i).getOpcode() == ISD::OR || 10056 BinOp.getOperand(i).getOpcode() == ISD::XOR || 10057 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 10058 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 10059 BinOps.push_back(BinOp.getOperand(i)); 10060 } else { 10061 // We have an input that is not a truncation or another binary 10062 // operation; we'll abort this transformation. 10063 return SDValue(); 10064 } 10065 } 10066 } 10067 10068 // The operands of a select that must be truncated when the select is 10069 // promoted because the operand is actually part of the to-be-promoted set. 10070 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 10071 10072 // Make sure that this is a self-contained cluster of operations (which 10073 // is not quite the same thing as saying that everything has only one 10074 // use). 10075 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10076 if (isa<ConstantSDNode>(Inputs[i])) 10077 continue; 10078 10079 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 10080 UE = Inputs[i].getNode()->use_end(); 10081 UI != UE; ++UI) { 10082 SDNode *User = *UI; 10083 if (User != N && !Visited.count(User)) 10084 return SDValue(); 10085 10086 // If we're going to promote the non-output-value operand(s) or SELECT or 10087 // SELECT_CC, record them for truncation. 10088 if (User->getOpcode() == ISD::SELECT) { 10089 if (User->getOperand(0) == Inputs[i]) 10090 SelectTruncOp[0].insert(std::make_pair(User, 10091 User->getOperand(0).getValueType())); 10092 } else if (User->getOpcode() == ISD::SELECT_CC) { 10093 if (User->getOperand(0) == Inputs[i]) 10094 SelectTruncOp[0].insert(std::make_pair(User, 10095 User->getOperand(0).getValueType())); 10096 if (User->getOperand(1) == Inputs[i]) 10097 SelectTruncOp[1].insert(std::make_pair(User, 10098 User->getOperand(1).getValueType())); 10099 } 10100 } 10101 } 10102 10103 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 10104 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 10105 UE = PromOps[i].getNode()->use_end(); 10106 UI != UE; ++UI) { 10107 SDNode *User = *UI; 10108 if (User != N && !Visited.count(User)) 10109 return SDValue(); 10110 10111 // If we're going to promote the non-output-value operand(s) or SELECT or 10112 // SELECT_CC, record them for truncation. 10113 if (User->getOpcode() == ISD::SELECT) { 10114 if (User->getOperand(0) == PromOps[i]) 10115 SelectTruncOp[0].insert(std::make_pair(User, 10116 User->getOperand(0).getValueType())); 10117 } else if (User->getOpcode() == ISD::SELECT_CC) { 10118 if (User->getOperand(0) == PromOps[i]) 10119 SelectTruncOp[0].insert(std::make_pair(User, 10120 User->getOperand(0).getValueType())); 10121 if (User->getOperand(1) == PromOps[i]) 10122 SelectTruncOp[1].insert(std::make_pair(User, 10123 User->getOperand(1).getValueType())); 10124 } 10125 } 10126 } 10127 10128 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 10129 bool ReallyNeedsExt = false; 10130 if (N->getOpcode() != ISD::ANY_EXTEND) { 10131 // If all of the inputs are not already sign/zero extended, then 10132 // we'll still need to do that at the end. 10133 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10134 if (isa<ConstantSDNode>(Inputs[i])) 10135 continue; 10136 10137 unsigned OpBits = 10138 Inputs[i].getOperand(0).getValueSizeInBits(); 10139 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 10140 10141 if ((N->getOpcode() == ISD::ZERO_EXTEND && 10142 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 10143 APInt::getHighBitsSet(OpBits, 10144 OpBits-PromBits))) || 10145 (N->getOpcode() == ISD::SIGN_EXTEND && 10146 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 10147 (OpBits-(PromBits-1)))) { 10148 ReallyNeedsExt = true; 10149 break; 10150 } 10151 } 10152 } 10153 10154 // Replace all inputs, either with the truncation operand, or a 10155 // truncation or extension to the final output type. 10156 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 10157 // Constant inputs need to be replaced with the to-be-promoted nodes that 10158 // use them because they might have users outside of the cluster of 10159 // promoted nodes. 10160 if (isa<ConstantSDNode>(Inputs[i])) 10161 continue; 10162 10163 SDValue InSrc = Inputs[i].getOperand(0); 10164 if (Inputs[i].getValueType() == N->getValueType(0)) 10165 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 10166 else if (N->getOpcode() == ISD::SIGN_EXTEND) 10167 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10168 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 10169 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10170 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10171 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 10172 else 10173 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 10174 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 10175 } 10176 10177 std::list<HandleSDNode> PromOpHandles; 10178 for (auto &PromOp : PromOps) 10179 PromOpHandles.emplace_back(PromOp); 10180 10181 // Replace all operations (these are all the same, but have a different 10182 // (promoted) return type). DAG.getNode will validate that the types of 10183 // a binary operator match, so go through the list in reverse so that 10184 // we've likely promoted both operands first. 10185 while (!PromOpHandles.empty()) { 10186 SDValue PromOp = PromOpHandles.back().getValue(); 10187 PromOpHandles.pop_back(); 10188 10189 unsigned C; 10190 switch (PromOp.getOpcode()) { 10191 default: C = 0; break; 10192 case ISD::SELECT: C = 1; break; 10193 case ISD::SELECT_CC: C = 2; break; 10194 } 10195 10196 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 10197 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 10198 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 10199 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 10200 // The to-be-promoted operands of this node have not yet been 10201 // promoted (this should be rare because we're going through the 10202 // list backward, but if one of the operands has several users in 10203 // this cluster of to-be-promoted nodes, it is possible). 10204 PromOpHandles.emplace_front(PromOp); 10205 continue; 10206 } 10207 10208 // For SELECT and SELECT_CC nodes, we do a similar check for any 10209 // to-be-promoted comparison inputs. 10210 if (PromOp.getOpcode() == ISD::SELECT || 10211 PromOp.getOpcode() == ISD::SELECT_CC) { 10212 if ((SelectTruncOp[0].count(PromOp.getNode()) && 10213 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 10214 (SelectTruncOp[1].count(PromOp.getNode()) && 10215 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 10216 PromOpHandles.emplace_front(PromOp); 10217 continue; 10218 } 10219 } 10220 10221 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 10222 PromOp.getNode()->op_end()); 10223 10224 // If this node has constant inputs, then they'll need to be promoted here. 10225 for (unsigned i = 0; i < 2; ++i) { 10226 if (!isa<ConstantSDNode>(Ops[C+i])) 10227 continue; 10228 if (Ops[C+i].getValueType() == N->getValueType(0)) 10229 continue; 10230 10231 if (N->getOpcode() == ISD::SIGN_EXTEND) 10232 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10233 else if (N->getOpcode() == ISD::ZERO_EXTEND) 10234 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10235 else 10236 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 10237 } 10238 10239 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 10240 // truncate them again to the original value type. 10241 if (PromOp.getOpcode() == ISD::SELECT || 10242 PromOp.getOpcode() == ISD::SELECT_CC) { 10243 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 10244 if (SI0 != SelectTruncOp[0].end()) 10245 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 10246 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 10247 if (SI1 != SelectTruncOp[1].end()) 10248 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 10249 } 10250 10251 DAG.ReplaceAllUsesOfValueWith(PromOp, 10252 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 10253 } 10254 10255 // Now we're left with the initial extension itself. 10256 if (!ReallyNeedsExt) 10257 return N->getOperand(0); 10258 10259 // To zero extend, just mask off everything except for the first bit (in the 10260 // i1 case). 10261 if (N->getOpcode() == ISD::ZERO_EXTEND) 10262 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 10263 DAG.getConstant(APInt::getLowBitsSet( 10264 N->getValueSizeInBits(0), PromBits), 10265 dl, N->getValueType(0))); 10266 10267 assert(N->getOpcode() == ISD::SIGN_EXTEND && 10268 "Invalid extension type"); 10269 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 10270 SDValue ShiftCst = 10271 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 10272 return DAG.getNode( 10273 ISD::SRA, dl, N->getValueType(0), 10274 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 10275 ShiftCst); 10276 } 10277 10278 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 10279 DAGCombinerInfo &DCI) const { 10280 assert(N->getOpcode() == ISD::BUILD_VECTOR && 10281 "Should be called with a BUILD_VECTOR node"); 10282 10283 SelectionDAG &DAG = DCI.DAG; 10284 SDLoc dl(N); 10285 if (N->getValueType(0) != MVT::v2f64 || !Subtarget.hasVSX()) 10286 return SDValue(); 10287 10288 // Looking for: 10289 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 10290 if (N->getOperand(0).getOpcode() != ISD::SINT_TO_FP && 10291 N->getOperand(0).getOpcode() != ISD::UINT_TO_FP) 10292 return SDValue(); 10293 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 10294 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 10295 return SDValue(); 10296 if (N->getOperand(0).getOpcode() != N->getOperand(1).getOpcode()) 10297 return SDValue(); 10298 10299 SDValue Ext1 = N->getOperand(0).getOperand(0); 10300 SDValue Ext2 = N->getOperand(1).getOperand(0); 10301 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 10302 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 10303 return SDValue(); 10304 10305 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 10306 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 10307 if (!Ext1Op || !Ext2Op) 10308 return SDValue(); 10309 if (Ext1.getValueType() != MVT::i32 || 10310 Ext2.getValueType() != MVT::i32) 10311 if (Ext1.getOperand(0) != Ext2.getOperand(0)) 10312 return SDValue(); 10313 10314 int FirstElem = Ext1Op->getZExtValue(); 10315 int SecondElem = Ext2Op->getZExtValue(); 10316 int SubvecIdx; 10317 if (FirstElem == 0 && SecondElem == 1) 10318 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 10319 else if (FirstElem == 2 && SecondElem == 3) 10320 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 10321 else 10322 return SDValue(); 10323 10324 SDValue SrcVec = Ext1.getOperand(0); 10325 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 10326 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 10327 return DAG.getNode(NodeType, dl, MVT::v2f64, 10328 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 10329 } 10330 10331 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 10332 DAGCombinerInfo &DCI) const { 10333 assert((N->getOpcode() == ISD::SINT_TO_FP || 10334 N->getOpcode() == ISD::UINT_TO_FP) && 10335 "Need an int -> FP conversion node here"); 10336 10337 if (!Subtarget.has64BitSupport()) 10338 return SDValue(); 10339 10340 SelectionDAG &DAG = DCI.DAG; 10341 SDLoc dl(N); 10342 SDValue Op(N, 0); 10343 10344 // Don't handle ppc_fp128 here or i1 conversions. 10345 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 10346 return SDValue(); 10347 if (Op.getOperand(0).getValueType() == MVT::i1) 10348 return SDValue(); 10349 10350 // For i32 intermediate values, unfortunately, the conversion functions 10351 // leave the upper 32 bits of the value are undefined. Within the set of 10352 // scalar instructions, we have no method for zero- or sign-extending the 10353 // value. Thus, we cannot handle i32 intermediate values here. 10354 if (Op.getOperand(0).getValueType() == MVT::i32) 10355 return SDValue(); 10356 10357 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 10358 "UINT_TO_FP is supported only with FPCVT"); 10359 10360 // If we have FCFIDS, then use it when converting to single-precision. 10361 // Otherwise, convert to double-precision and then round. 10362 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 10363 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 10364 : PPCISD::FCFIDS) 10365 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 10366 : PPCISD::FCFID); 10367 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 10368 ? MVT::f32 10369 : MVT::f64; 10370 10371 // If we're converting from a float, to an int, and back to a float again, 10372 // then we don't need the store/load pair at all. 10373 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 10374 Subtarget.hasFPCVT()) || 10375 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 10376 SDValue Src = Op.getOperand(0).getOperand(0); 10377 if (Src.getValueType() == MVT::f32) { 10378 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 10379 DCI.AddToWorklist(Src.getNode()); 10380 } else if (Src.getValueType() != MVT::f64) { 10381 // Make sure that we don't pick up a ppc_fp128 source value. 10382 return SDValue(); 10383 } 10384 10385 unsigned FCTOp = 10386 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 10387 PPCISD::FCTIDUZ; 10388 10389 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 10390 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 10391 10392 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 10393 FP = DAG.getNode(ISD::FP_ROUND, dl, 10394 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 10395 DCI.AddToWorklist(FP.getNode()); 10396 } 10397 10398 return FP; 10399 } 10400 10401 return SDValue(); 10402 } 10403 10404 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 10405 // builtins) into loads with swaps. 10406 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 10407 DAGCombinerInfo &DCI) const { 10408 SelectionDAG &DAG = DCI.DAG; 10409 SDLoc dl(N); 10410 SDValue Chain; 10411 SDValue Base; 10412 MachineMemOperand *MMO; 10413 10414 switch (N->getOpcode()) { 10415 default: 10416 llvm_unreachable("Unexpected opcode for little endian VSX load"); 10417 case ISD::LOAD: { 10418 LoadSDNode *LD = cast<LoadSDNode>(N); 10419 Chain = LD->getChain(); 10420 Base = LD->getBasePtr(); 10421 MMO = LD->getMemOperand(); 10422 // If the MMO suggests this isn't a load of a full vector, leave 10423 // things alone. For a built-in, we have to make the change for 10424 // correctness, so if there is a size problem that will be a bug. 10425 if (MMO->getSize() < 16) 10426 return SDValue(); 10427 break; 10428 } 10429 case ISD::INTRINSIC_W_CHAIN: { 10430 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10431 Chain = Intrin->getChain(); 10432 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 10433 // us what we want. Get operand 2 instead. 10434 Base = Intrin->getOperand(2); 10435 MMO = Intrin->getMemOperand(); 10436 break; 10437 } 10438 } 10439 10440 MVT VecTy = N->getValueType(0).getSimpleVT(); 10441 SDValue LoadOps[] = { Chain, Base }; 10442 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 10443 DAG.getVTList(MVT::v2f64, MVT::Other), 10444 LoadOps, MVT::v2f64, MMO); 10445 10446 DCI.AddToWorklist(Load.getNode()); 10447 Chain = Load.getValue(1); 10448 SDValue Swap = DAG.getNode( 10449 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 10450 DCI.AddToWorklist(Swap.getNode()); 10451 10452 // Add a bitcast if the resulting load type doesn't match v2f64. 10453 if (VecTy != MVT::v2f64) { 10454 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 10455 DCI.AddToWorklist(N.getNode()); 10456 // Package {bitcast value, swap's chain} to match Load's shape. 10457 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 10458 N, Swap.getValue(1)); 10459 } 10460 10461 return Swap; 10462 } 10463 10464 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 10465 // builtins) into stores with swaps. 10466 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 10467 DAGCombinerInfo &DCI) const { 10468 SelectionDAG &DAG = DCI.DAG; 10469 SDLoc dl(N); 10470 SDValue Chain; 10471 SDValue Base; 10472 unsigned SrcOpnd; 10473 MachineMemOperand *MMO; 10474 10475 switch (N->getOpcode()) { 10476 default: 10477 llvm_unreachable("Unexpected opcode for little endian VSX store"); 10478 case ISD::STORE: { 10479 StoreSDNode *ST = cast<StoreSDNode>(N); 10480 Chain = ST->getChain(); 10481 Base = ST->getBasePtr(); 10482 MMO = ST->getMemOperand(); 10483 SrcOpnd = 1; 10484 // If the MMO suggests this isn't a store of a full vector, leave 10485 // things alone. For a built-in, we have to make the change for 10486 // correctness, so if there is a size problem that will be a bug. 10487 if (MMO->getSize() < 16) 10488 return SDValue(); 10489 break; 10490 } 10491 case ISD::INTRINSIC_VOID: { 10492 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 10493 Chain = Intrin->getChain(); 10494 // Intrin->getBasePtr() oddly does not get what we want. 10495 Base = Intrin->getOperand(3); 10496 MMO = Intrin->getMemOperand(); 10497 SrcOpnd = 2; 10498 break; 10499 } 10500 } 10501 10502 SDValue Src = N->getOperand(SrcOpnd); 10503 MVT VecTy = Src.getValueType().getSimpleVT(); 10504 10505 // All stores are done as v2f64 and possible bit cast. 10506 if (VecTy != MVT::v2f64) { 10507 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 10508 DCI.AddToWorklist(Src.getNode()); 10509 } 10510 10511 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 10512 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 10513 DCI.AddToWorklist(Swap.getNode()); 10514 Chain = Swap.getValue(1); 10515 SDValue StoreOps[] = { Chain, Swap, Base }; 10516 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 10517 DAG.getVTList(MVT::Other), 10518 StoreOps, VecTy, MMO); 10519 DCI.AddToWorklist(Store.getNode()); 10520 return Store; 10521 } 10522 10523 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 10524 DAGCombinerInfo &DCI) const { 10525 SelectionDAG &DAG = DCI.DAG; 10526 SDLoc dl(N); 10527 switch (N->getOpcode()) { 10528 default: break; 10529 case PPCISD::SHL: 10530 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 10531 return N->getOperand(0); 10532 break; 10533 case PPCISD::SRL: 10534 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 10535 return N->getOperand(0); 10536 break; 10537 case PPCISD::SRA: 10538 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 10539 if (C->isNullValue() || // 0 >>s V -> 0. 10540 C->isAllOnesValue()) // -1 >>s V -> -1. 10541 return N->getOperand(0); 10542 } 10543 break; 10544 case ISD::SIGN_EXTEND: 10545 case ISD::ZERO_EXTEND: 10546 case ISD::ANY_EXTEND: 10547 return DAGCombineExtBoolTrunc(N, DCI); 10548 case ISD::TRUNCATE: 10549 case ISD::SETCC: 10550 case ISD::SELECT_CC: 10551 return DAGCombineTruncBoolExt(N, DCI); 10552 case ISD::SINT_TO_FP: 10553 case ISD::UINT_TO_FP: 10554 return combineFPToIntToFP(N, DCI); 10555 case ISD::STORE: { 10556 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 10557 if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() && 10558 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 10559 N->getOperand(1).getValueType() == MVT::i32 && 10560 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 10561 SDValue Val = N->getOperand(1).getOperand(0); 10562 if (Val.getValueType() == MVT::f32) { 10563 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 10564 DCI.AddToWorklist(Val.getNode()); 10565 } 10566 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 10567 DCI.AddToWorklist(Val.getNode()); 10568 10569 SDValue Ops[] = { 10570 N->getOperand(0), Val, N->getOperand(2), 10571 DAG.getValueType(N->getOperand(1).getValueType()) 10572 }; 10573 10574 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 10575 DAG.getVTList(MVT::Other), Ops, 10576 cast<StoreSDNode>(N)->getMemoryVT(), 10577 cast<StoreSDNode>(N)->getMemOperand()); 10578 DCI.AddToWorklist(Val.getNode()); 10579 return Val; 10580 } 10581 10582 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 10583 if (cast<StoreSDNode>(N)->isUnindexed() && 10584 N->getOperand(1).getOpcode() == ISD::BSWAP && 10585 N->getOperand(1).getNode()->hasOneUse() && 10586 (N->getOperand(1).getValueType() == MVT::i32 || 10587 N->getOperand(1).getValueType() == MVT::i16 || 10588 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10589 N->getOperand(1).getValueType() == MVT::i64))) { 10590 SDValue BSwapOp = N->getOperand(1).getOperand(0); 10591 // Do an any-extend to 32-bits if this is a half-word input. 10592 if (BSwapOp.getValueType() == MVT::i16) 10593 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 10594 10595 SDValue Ops[] = { 10596 N->getOperand(0), BSwapOp, N->getOperand(2), 10597 DAG.getValueType(N->getOperand(1).getValueType()) 10598 }; 10599 return 10600 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 10601 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 10602 cast<StoreSDNode>(N)->getMemOperand()); 10603 } 10604 10605 // For little endian, VSX stores require generating xxswapd/lxvd2x. 10606 EVT VT = N->getOperand(1).getValueType(); 10607 if (VT.isSimple()) { 10608 MVT StoreVT = VT.getSimpleVT(); 10609 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10610 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 10611 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 10612 return expandVSXStoreForLE(N, DCI); 10613 } 10614 break; 10615 } 10616 case ISD::LOAD: { 10617 LoadSDNode *LD = cast<LoadSDNode>(N); 10618 EVT VT = LD->getValueType(0); 10619 10620 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10621 if (VT.isSimple()) { 10622 MVT LoadVT = VT.getSimpleVT(); 10623 if (Subtarget.hasVSX() && Subtarget.isLittleEndian() && 10624 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 10625 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 10626 return expandVSXLoadForLE(N, DCI); 10627 } 10628 10629 // We sometimes end up with a 64-bit integer load, from which we extract 10630 // two single-precision floating-point numbers. This happens with 10631 // std::complex<float>, and other similar structures, because of the way we 10632 // canonicalize structure copies. However, if we lack direct moves, 10633 // then the final bitcasts from the extracted integer values to the 10634 // floating-point numbers turn into store/load pairs. Even with direct moves, 10635 // just loading the two floating-point numbers is likely better. 10636 auto ReplaceTwoFloatLoad = [&]() { 10637 if (VT != MVT::i64) 10638 return false; 10639 10640 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 10641 LD->isVolatile()) 10642 return false; 10643 10644 // We're looking for a sequence like this: 10645 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 10646 // t16: i64 = srl t13, Constant:i32<32> 10647 // t17: i32 = truncate t16 10648 // t18: f32 = bitcast t17 10649 // t19: i32 = truncate t13 10650 // t20: f32 = bitcast t19 10651 10652 if (!LD->hasNUsesOfValue(2, 0)) 10653 return false; 10654 10655 auto UI = LD->use_begin(); 10656 while (UI.getUse().getResNo() != 0) ++UI; 10657 SDNode *Trunc = *UI++; 10658 while (UI.getUse().getResNo() != 0) ++UI; 10659 SDNode *RightShift = *UI; 10660 if (Trunc->getOpcode() != ISD::TRUNCATE) 10661 std::swap(Trunc, RightShift); 10662 10663 if (Trunc->getOpcode() != ISD::TRUNCATE || 10664 Trunc->getValueType(0) != MVT::i32 || 10665 !Trunc->hasOneUse()) 10666 return false; 10667 if (RightShift->getOpcode() != ISD::SRL || 10668 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 10669 RightShift->getConstantOperandVal(1) != 32 || 10670 !RightShift->hasOneUse()) 10671 return false; 10672 10673 SDNode *Trunc2 = *RightShift->use_begin(); 10674 if (Trunc2->getOpcode() != ISD::TRUNCATE || 10675 Trunc2->getValueType(0) != MVT::i32 || 10676 !Trunc2->hasOneUse()) 10677 return false; 10678 10679 SDNode *Bitcast = *Trunc->use_begin(); 10680 SDNode *Bitcast2 = *Trunc2->use_begin(); 10681 10682 if (Bitcast->getOpcode() != ISD::BITCAST || 10683 Bitcast->getValueType(0) != MVT::f32) 10684 return false; 10685 if (Bitcast2->getOpcode() != ISD::BITCAST || 10686 Bitcast2->getValueType(0) != MVT::f32) 10687 return false; 10688 10689 if (Subtarget.isLittleEndian()) 10690 std::swap(Bitcast, Bitcast2); 10691 10692 // Bitcast has the second float (in memory-layout order) and Bitcast2 10693 // has the first one. 10694 10695 SDValue BasePtr = LD->getBasePtr(); 10696 if (LD->isIndexed()) { 10697 assert(LD->getAddressingMode() == ISD::PRE_INC && 10698 "Non-pre-inc AM on PPC?"); 10699 BasePtr = 10700 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10701 LD->getOffset()); 10702 } 10703 10704 auto MMOFlags = 10705 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 10706 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 10707 LD->getPointerInfo(), LD->getAlignment(), 10708 MMOFlags, LD->getAAInfo()); 10709 SDValue AddPtr = 10710 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 10711 BasePtr, DAG.getIntPtrConstant(4, dl)); 10712 SDValue FloatLoad2 = DAG.getLoad( 10713 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 10714 LD->getPointerInfo().getWithOffset(4), 10715 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 10716 10717 if (LD->isIndexed()) { 10718 // Note that DAGCombine should re-form any pre-increment load(s) from 10719 // what is produced here if that makes sense. 10720 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 10721 } 10722 10723 DCI.CombineTo(Bitcast2, FloatLoad); 10724 DCI.CombineTo(Bitcast, FloatLoad2); 10725 10726 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 10727 SDValue(FloatLoad2.getNode(), 1)); 10728 return true; 10729 }; 10730 10731 if (ReplaceTwoFloatLoad()) 10732 return SDValue(N, 0); 10733 10734 EVT MemVT = LD->getMemoryVT(); 10735 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 10736 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 10737 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 10738 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 10739 if (LD->isUnindexed() && VT.isVector() && 10740 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 10741 // P8 and later hardware should just use LOAD. 10742 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 10743 VT == MVT::v4i32 || VT == MVT::v4f32)) || 10744 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 10745 LD->getAlignment() >= ScalarABIAlignment)) && 10746 LD->getAlignment() < ABIAlignment) { 10747 // This is a type-legal unaligned Altivec or QPX load. 10748 SDValue Chain = LD->getChain(); 10749 SDValue Ptr = LD->getBasePtr(); 10750 bool isLittleEndian = Subtarget.isLittleEndian(); 10751 10752 // This implements the loading of unaligned vectors as described in 10753 // the venerable Apple Velocity Engine overview. Specifically: 10754 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 10755 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 10756 // 10757 // The general idea is to expand a sequence of one or more unaligned 10758 // loads into an alignment-based permutation-control instruction (lvsl 10759 // or lvsr), a series of regular vector loads (which always truncate 10760 // their input address to an aligned address), and a series of 10761 // permutations. The results of these permutations are the requested 10762 // loaded values. The trick is that the last "extra" load is not taken 10763 // from the address you might suspect (sizeof(vector) bytes after the 10764 // last requested load), but rather sizeof(vector) - 1 bytes after the 10765 // last requested vector. The point of this is to avoid a page fault if 10766 // the base address happened to be aligned. This works because if the 10767 // base address is aligned, then adding less than a full vector length 10768 // will cause the last vector in the sequence to be (re)loaded. 10769 // Otherwise, the next vector will be fetched as you might suspect was 10770 // necessary. 10771 10772 // We might be able to reuse the permutation generation from 10773 // a different base address offset from this one by an aligned amount. 10774 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 10775 // optimization later. 10776 Intrinsic::ID Intr, IntrLD, IntrPerm; 10777 MVT PermCntlTy, PermTy, LDTy; 10778 if (Subtarget.hasAltivec()) { 10779 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 10780 Intrinsic::ppc_altivec_lvsl; 10781 IntrLD = Intrinsic::ppc_altivec_lvx; 10782 IntrPerm = Intrinsic::ppc_altivec_vperm; 10783 PermCntlTy = MVT::v16i8; 10784 PermTy = MVT::v4i32; 10785 LDTy = MVT::v4i32; 10786 } else { 10787 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 10788 Intrinsic::ppc_qpx_qvlpcls; 10789 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 10790 Intrinsic::ppc_qpx_qvlfs; 10791 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 10792 PermCntlTy = MVT::v4f64; 10793 PermTy = MVT::v4f64; 10794 LDTy = MemVT.getSimpleVT(); 10795 } 10796 10797 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 10798 10799 // Create the new MMO for the new base load. It is like the original MMO, 10800 // but represents an area in memory almost twice the vector size centered 10801 // on the original address. If the address is unaligned, we might start 10802 // reading up to (sizeof(vector)-1) bytes below the address of the 10803 // original unaligned load. 10804 MachineFunction &MF = DAG.getMachineFunction(); 10805 MachineMemOperand *BaseMMO = 10806 MF.getMachineMemOperand(LD->getMemOperand(), 10807 -(long)MemVT.getStoreSize()+1, 10808 2*MemVT.getStoreSize()-1); 10809 10810 // Create the new base load. 10811 SDValue LDXIntID = 10812 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 10813 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 10814 SDValue BaseLoad = 10815 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10816 DAG.getVTList(PermTy, MVT::Other), 10817 BaseLoadOps, LDTy, BaseMMO); 10818 10819 // Note that the value of IncOffset (which is provided to the next 10820 // load's pointer info offset value, and thus used to calculate the 10821 // alignment), and the value of IncValue (which is actually used to 10822 // increment the pointer value) are different! This is because we 10823 // require the next load to appear to be aligned, even though it 10824 // is actually offset from the base pointer by a lesser amount. 10825 int IncOffset = VT.getSizeInBits() / 8; 10826 int IncValue = IncOffset; 10827 10828 // Walk (both up and down) the chain looking for another load at the real 10829 // (aligned) offset (the alignment of the other load does not matter in 10830 // this case). If found, then do not use the offset reduction trick, as 10831 // that will prevent the loads from being later combined (as they would 10832 // otherwise be duplicates). 10833 if (!findConsecutiveLoad(LD, DAG)) 10834 --IncValue; 10835 10836 SDValue Increment = 10837 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 10838 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 10839 10840 MachineMemOperand *ExtraMMO = 10841 MF.getMachineMemOperand(LD->getMemOperand(), 10842 1, 2*MemVT.getStoreSize()-1); 10843 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 10844 SDValue ExtraLoad = 10845 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 10846 DAG.getVTList(PermTy, MVT::Other), 10847 ExtraLoadOps, LDTy, ExtraMMO); 10848 10849 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 10850 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 10851 10852 // Because vperm has a big-endian bias, we must reverse the order 10853 // of the input vectors and complement the permute control vector 10854 // when generating little endian code. We have already handled the 10855 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 10856 // and ExtraLoad here. 10857 SDValue Perm; 10858 if (isLittleEndian) 10859 Perm = BuildIntrinsicOp(IntrPerm, 10860 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 10861 else 10862 Perm = BuildIntrinsicOp(IntrPerm, 10863 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 10864 10865 if (VT != PermTy) 10866 Perm = Subtarget.hasAltivec() ? 10867 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 10868 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 10869 DAG.getTargetConstant(1, dl, MVT::i64)); 10870 // second argument is 1 because this rounding 10871 // is always exact. 10872 10873 // The output of the permutation is our loaded result, the TokenFactor is 10874 // our new chain. 10875 DCI.CombineTo(N, Perm, TF); 10876 return SDValue(N, 0); 10877 } 10878 } 10879 break; 10880 case ISD::INTRINSIC_WO_CHAIN: { 10881 bool isLittleEndian = Subtarget.isLittleEndian(); 10882 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10883 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 10884 : Intrinsic::ppc_altivec_lvsl); 10885 if ((IID == Intr || 10886 IID == Intrinsic::ppc_qpx_qvlpcld || 10887 IID == Intrinsic::ppc_qpx_qvlpcls) && 10888 N->getOperand(1)->getOpcode() == ISD::ADD) { 10889 SDValue Add = N->getOperand(1); 10890 10891 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 10892 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 10893 10894 if (DAG.MaskedValueIsZero( 10895 Add->getOperand(1), 10896 APInt::getAllOnesValue(Bits /* alignment */) 10897 .zext( 10898 Add.getValueType().getScalarType().getSizeInBits()))) { 10899 SDNode *BasePtr = Add->getOperand(0).getNode(); 10900 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10901 UE = BasePtr->use_end(); 10902 UI != UE; ++UI) { 10903 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10904 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 10905 // We've found another LVSL/LVSR, and this address is an aligned 10906 // multiple of that one. The results will be the same, so use the 10907 // one we've just found instead. 10908 10909 return SDValue(*UI, 0); 10910 } 10911 } 10912 } 10913 10914 if (isa<ConstantSDNode>(Add->getOperand(1))) { 10915 SDNode *BasePtr = Add->getOperand(0).getNode(); 10916 for (SDNode::use_iterator UI = BasePtr->use_begin(), 10917 UE = BasePtr->use_end(); UI != UE; ++UI) { 10918 if (UI->getOpcode() == ISD::ADD && 10919 isa<ConstantSDNode>(UI->getOperand(1)) && 10920 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 10921 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 10922 (1ULL << Bits) == 0) { 10923 SDNode *OtherAdd = *UI; 10924 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 10925 VE = OtherAdd->use_end(); VI != VE; ++VI) { 10926 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 10927 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 10928 return SDValue(*VI, 0); 10929 } 10930 } 10931 } 10932 } 10933 } 10934 } 10935 } 10936 10937 break; 10938 case ISD::INTRINSIC_W_CHAIN: { 10939 // For little endian, VSX loads require generating lxvd2x/xxswapd. 10940 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10941 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10942 default: 10943 break; 10944 case Intrinsic::ppc_vsx_lxvw4x: 10945 case Intrinsic::ppc_vsx_lxvd2x: 10946 return expandVSXLoadForLE(N, DCI); 10947 } 10948 } 10949 break; 10950 } 10951 case ISD::INTRINSIC_VOID: { 10952 // For little endian, VSX stores require generating xxswapd/stxvd2x. 10953 if (Subtarget.hasVSX() && Subtarget.isLittleEndian()) { 10954 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10955 default: 10956 break; 10957 case Intrinsic::ppc_vsx_stxvw4x: 10958 case Intrinsic::ppc_vsx_stxvd2x: 10959 return expandVSXStoreForLE(N, DCI); 10960 } 10961 } 10962 break; 10963 } 10964 case ISD::BSWAP: 10965 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 10966 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 10967 N->getOperand(0).hasOneUse() && 10968 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 10969 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 10970 N->getValueType(0) == MVT::i64))) { 10971 SDValue Load = N->getOperand(0); 10972 LoadSDNode *LD = cast<LoadSDNode>(Load); 10973 // Create the byte-swapping load. 10974 SDValue Ops[] = { 10975 LD->getChain(), // Chain 10976 LD->getBasePtr(), // Ptr 10977 DAG.getValueType(N->getValueType(0)) // VT 10978 }; 10979 SDValue BSLoad = 10980 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 10981 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 10982 MVT::i64 : MVT::i32, MVT::Other), 10983 Ops, LD->getMemoryVT(), LD->getMemOperand()); 10984 10985 // If this is an i16 load, insert the truncate. 10986 SDValue ResVal = BSLoad; 10987 if (N->getValueType(0) == MVT::i16) 10988 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 10989 10990 // First, combine the bswap away. This makes the value produced by the 10991 // load dead. 10992 DCI.CombineTo(N, ResVal); 10993 10994 // Next, combine the load away, we give it a bogus result value but a real 10995 // chain result. The result value is dead because the bswap is dead. 10996 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 10997 10998 // Return N so it doesn't get rechecked! 10999 return SDValue(N, 0); 11000 } 11001 11002 break; 11003 case PPCISD::VCMP: { 11004 // If a VCMPo node already exists with exactly the same operands as this 11005 // node, use its result instead of this node (VCMPo computes both a CR6 and 11006 // a normal output). 11007 // 11008 if (!N->getOperand(0).hasOneUse() && 11009 !N->getOperand(1).hasOneUse() && 11010 !N->getOperand(2).hasOneUse()) { 11011 11012 // Scan all of the users of the LHS, looking for VCMPo's that match. 11013 SDNode *VCMPoNode = nullptr; 11014 11015 SDNode *LHSN = N->getOperand(0).getNode(); 11016 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 11017 UI != E; ++UI) 11018 if (UI->getOpcode() == PPCISD::VCMPo && 11019 UI->getOperand(1) == N->getOperand(1) && 11020 UI->getOperand(2) == N->getOperand(2) && 11021 UI->getOperand(0) == N->getOperand(0)) { 11022 VCMPoNode = *UI; 11023 break; 11024 } 11025 11026 // If there is no VCMPo node, or if the flag value has a single use, don't 11027 // transform this. 11028 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 11029 break; 11030 11031 // Look at the (necessarily single) use of the flag value. If it has a 11032 // chain, this transformation is more complex. Note that multiple things 11033 // could use the value result, which we should ignore. 11034 SDNode *FlagUser = nullptr; 11035 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 11036 FlagUser == nullptr; ++UI) { 11037 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 11038 SDNode *User = *UI; 11039 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 11040 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 11041 FlagUser = User; 11042 break; 11043 } 11044 } 11045 } 11046 11047 // If the user is a MFOCRF instruction, we know this is safe. 11048 // Otherwise we give up for right now. 11049 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 11050 return SDValue(VCMPoNode, 0); 11051 } 11052 break; 11053 } 11054 case ISD::BRCOND: { 11055 SDValue Cond = N->getOperand(1); 11056 SDValue Target = N->getOperand(2); 11057 11058 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 11059 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 11060 Intrinsic::ppc_is_decremented_ctr_nonzero) { 11061 11062 // We now need to make the intrinsic dead (it cannot be instruction 11063 // selected). 11064 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 11065 assert(Cond.getNode()->hasOneUse() && 11066 "Counter decrement has more than one use"); 11067 11068 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 11069 N->getOperand(0), Target); 11070 } 11071 } 11072 break; 11073 case ISD::BR_CC: { 11074 // If this is a branch on an altivec predicate comparison, lower this so 11075 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 11076 // lowering is done pre-legalize, because the legalizer lowers the predicate 11077 // compare down to code that is difficult to reassemble. 11078 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 11079 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 11080 11081 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 11082 // value. If so, pass-through the AND to get to the intrinsic. 11083 if (LHS.getOpcode() == ISD::AND && 11084 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 11085 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 11086 Intrinsic::ppc_is_decremented_ctr_nonzero && 11087 isa<ConstantSDNode>(LHS.getOperand(1)) && 11088 !isNullConstant(LHS.getOperand(1))) 11089 LHS = LHS.getOperand(0); 11090 11091 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 11092 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 11093 Intrinsic::ppc_is_decremented_ctr_nonzero && 11094 isa<ConstantSDNode>(RHS)) { 11095 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 11096 "Counter decrement comparison is not EQ or NE"); 11097 11098 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 11099 bool isBDNZ = (CC == ISD::SETEQ && Val) || 11100 (CC == ISD::SETNE && !Val); 11101 11102 // We now need to make the intrinsic dead (it cannot be instruction 11103 // selected). 11104 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 11105 assert(LHS.getNode()->hasOneUse() && 11106 "Counter decrement has more than one use"); 11107 11108 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 11109 N->getOperand(0), N->getOperand(4)); 11110 } 11111 11112 int CompareOpc; 11113 bool isDot; 11114 11115 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 11116 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 11117 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 11118 assert(isDot && "Can't compare against a vector result!"); 11119 11120 // If this is a comparison against something other than 0/1, then we know 11121 // that the condition is never/always true. 11122 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 11123 if (Val != 0 && Val != 1) { 11124 if (CC == ISD::SETEQ) // Cond never true, remove branch. 11125 return N->getOperand(0); 11126 // Always !=, turn it into an unconditional branch. 11127 return DAG.getNode(ISD::BR, dl, MVT::Other, 11128 N->getOperand(0), N->getOperand(4)); 11129 } 11130 11131 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 11132 11133 // Create the PPCISD altivec 'dot' comparison node. 11134 SDValue Ops[] = { 11135 LHS.getOperand(2), // LHS of compare 11136 LHS.getOperand(3), // RHS of compare 11137 DAG.getConstant(CompareOpc, dl, MVT::i32) 11138 }; 11139 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 11140 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 11141 11142 // Unpack the result based on how the target uses it. 11143 PPC::Predicate CompOpc; 11144 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 11145 default: // Can't happen, don't crash on invalid number though. 11146 case 0: // Branch on the value of the EQ bit of CR6. 11147 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 11148 break; 11149 case 1: // Branch on the inverted value of the EQ bit of CR6. 11150 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 11151 break; 11152 case 2: // Branch on the value of the LT bit of CR6. 11153 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 11154 break; 11155 case 3: // Branch on the inverted value of the LT bit of CR6. 11156 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 11157 break; 11158 } 11159 11160 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 11161 DAG.getConstant(CompOpc, dl, MVT::i32), 11162 DAG.getRegister(PPC::CR6, MVT::i32), 11163 N->getOperand(4), CompNode.getValue(1)); 11164 } 11165 break; 11166 } 11167 case ISD::BUILD_VECTOR: 11168 return DAGCombineBuildVector(N, DCI); 11169 } 11170 11171 return SDValue(); 11172 } 11173 11174 SDValue 11175 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 11176 SelectionDAG &DAG, 11177 std::vector<SDNode *> *Created) const { 11178 // fold (sdiv X, pow2) 11179 EVT VT = N->getValueType(0); 11180 if (VT == MVT::i64 && !Subtarget.isPPC64()) 11181 return SDValue(); 11182 if ((VT != MVT::i32 && VT != MVT::i64) || 11183 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 11184 return SDValue(); 11185 11186 SDLoc DL(N); 11187 SDValue N0 = N->getOperand(0); 11188 11189 bool IsNegPow2 = (-Divisor).isPowerOf2(); 11190 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 11191 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 11192 11193 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 11194 if (Created) 11195 Created->push_back(Op.getNode()); 11196 11197 if (IsNegPow2) { 11198 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 11199 if (Created) 11200 Created->push_back(Op.getNode()); 11201 } 11202 11203 return Op; 11204 } 11205 11206 //===----------------------------------------------------------------------===// 11207 // Inline Assembly Support 11208 //===----------------------------------------------------------------------===// 11209 11210 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 11211 APInt &KnownZero, 11212 APInt &KnownOne, 11213 const SelectionDAG &DAG, 11214 unsigned Depth) const { 11215 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 11216 switch (Op.getOpcode()) { 11217 default: break; 11218 case PPCISD::LBRX: { 11219 // lhbrx is known to have the top bits cleared out. 11220 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 11221 KnownZero = 0xFFFF0000; 11222 break; 11223 } 11224 case ISD::INTRINSIC_WO_CHAIN: { 11225 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 11226 default: break; 11227 case Intrinsic::ppc_altivec_vcmpbfp_p: 11228 case Intrinsic::ppc_altivec_vcmpeqfp_p: 11229 case Intrinsic::ppc_altivec_vcmpequb_p: 11230 case Intrinsic::ppc_altivec_vcmpequh_p: 11231 case Intrinsic::ppc_altivec_vcmpequw_p: 11232 case Intrinsic::ppc_altivec_vcmpequd_p: 11233 case Intrinsic::ppc_altivec_vcmpgefp_p: 11234 case Intrinsic::ppc_altivec_vcmpgtfp_p: 11235 case Intrinsic::ppc_altivec_vcmpgtsb_p: 11236 case Intrinsic::ppc_altivec_vcmpgtsh_p: 11237 case Intrinsic::ppc_altivec_vcmpgtsw_p: 11238 case Intrinsic::ppc_altivec_vcmpgtsd_p: 11239 case Intrinsic::ppc_altivec_vcmpgtub_p: 11240 case Intrinsic::ppc_altivec_vcmpgtuh_p: 11241 case Intrinsic::ppc_altivec_vcmpgtuw_p: 11242 case Intrinsic::ppc_altivec_vcmpgtud_p: 11243 KnownZero = ~1U; // All bits but the low one are known to be zero. 11244 break; 11245 } 11246 } 11247 } 11248 } 11249 11250 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 11251 switch (Subtarget.getDarwinDirective()) { 11252 default: break; 11253 case PPC::DIR_970: 11254 case PPC::DIR_PWR4: 11255 case PPC::DIR_PWR5: 11256 case PPC::DIR_PWR5X: 11257 case PPC::DIR_PWR6: 11258 case PPC::DIR_PWR6X: 11259 case PPC::DIR_PWR7: 11260 case PPC::DIR_PWR8: 11261 case PPC::DIR_PWR9: { 11262 if (!ML) 11263 break; 11264 11265 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 11266 11267 // For small loops (between 5 and 8 instructions), align to a 32-byte 11268 // boundary so that the entire loop fits in one instruction-cache line. 11269 uint64_t LoopSize = 0; 11270 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 11271 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 11272 LoopSize += TII->getInstSizeInBytes(*J); 11273 if (LoopSize > 32) 11274 break; 11275 } 11276 11277 if (LoopSize > 16 && LoopSize <= 32) 11278 return 5; 11279 11280 break; 11281 } 11282 } 11283 11284 return TargetLowering::getPrefLoopAlignment(ML); 11285 } 11286 11287 /// getConstraintType - Given a constraint, return the type of 11288 /// constraint it is for this target. 11289 PPCTargetLowering::ConstraintType 11290 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 11291 if (Constraint.size() == 1) { 11292 switch (Constraint[0]) { 11293 default: break; 11294 case 'b': 11295 case 'r': 11296 case 'f': 11297 case 'd': 11298 case 'v': 11299 case 'y': 11300 return C_RegisterClass; 11301 case 'Z': 11302 // FIXME: While Z does indicate a memory constraint, it specifically 11303 // indicates an r+r address (used in conjunction with the 'y' modifier 11304 // in the replacement string). Currently, we're forcing the base 11305 // register to be r0 in the asm printer (which is interpreted as zero) 11306 // and forming the complete address in the second register. This is 11307 // suboptimal. 11308 return C_Memory; 11309 } 11310 } else if (Constraint == "wc") { // individual CR bits. 11311 return C_RegisterClass; 11312 } else if (Constraint == "wa" || Constraint == "wd" || 11313 Constraint == "wf" || Constraint == "ws") { 11314 return C_RegisterClass; // VSX registers. 11315 } 11316 return TargetLowering::getConstraintType(Constraint); 11317 } 11318 11319 /// Examine constraint type and operand type and determine a weight value. 11320 /// This object must already have been set up with the operand type 11321 /// and the current alternative constraint selected. 11322 TargetLowering::ConstraintWeight 11323 PPCTargetLowering::getSingleConstraintMatchWeight( 11324 AsmOperandInfo &info, const char *constraint) const { 11325 ConstraintWeight weight = CW_Invalid; 11326 Value *CallOperandVal = info.CallOperandVal; 11327 // If we don't have a value, we can't do a match, 11328 // but allow it at the lowest weight. 11329 if (!CallOperandVal) 11330 return CW_Default; 11331 Type *type = CallOperandVal->getType(); 11332 11333 // Look at the constraint type. 11334 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 11335 return CW_Register; // an individual CR bit. 11336 else if ((StringRef(constraint) == "wa" || 11337 StringRef(constraint) == "wd" || 11338 StringRef(constraint) == "wf") && 11339 type->isVectorTy()) 11340 return CW_Register; 11341 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 11342 return CW_Register; 11343 11344 switch (*constraint) { 11345 default: 11346 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 11347 break; 11348 case 'b': 11349 if (type->isIntegerTy()) 11350 weight = CW_Register; 11351 break; 11352 case 'f': 11353 if (type->isFloatTy()) 11354 weight = CW_Register; 11355 break; 11356 case 'd': 11357 if (type->isDoubleTy()) 11358 weight = CW_Register; 11359 break; 11360 case 'v': 11361 if (type->isVectorTy()) 11362 weight = CW_Register; 11363 break; 11364 case 'y': 11365 weight = CW_Register; 11366 break; 11367 case 'Z': 11368 weight = CW_Memory; 11369 break; 11370 } 11371 return weight; 11372 } 11373 11374 std::pair<unsigned, const TargetRegisterClass *> 11375 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 11376 StringRef Constraint, 11377 MVT VT) const { 11378 if (Constraint.size() == 1) { 11379 // GCC RS6000 Constraint Letters 11380 switch (Constraint[0]) { 11381 case 'b': // R1-R31 11382 if (VT == MVT::i64 && Subtarget.isPPC64()) 11383 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 11384 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 11385 case 'r': // R0-R31 11386 if (VT == MVT::i64 && Subtarget.isPPC64()) 11387 return std::make_pair(0U, &PPC::G8RCRegClass); 11388 return std::make_pair(0U, &PPC::GPRCRegClass); 11389 // 'd' and 'f' constraints are both defined to be "the floating point 11390 // registers", where one is for 32-bit and the other for 64-bit. We don't 11391 // really care overly much here so just give them all the same reg classes. 11392 case 'd': 11393 case 'f': 11394 if (VT == MVT::f32 || VT == MVT::i32) 11395 return std::make_pair(0U, &PPC::F4RCRegClass); 11396 if (VT == MVT::f64 || VT == MVT::i64) 11397 return std::make_pair(0U, &PPC::F8RCRegClass); 11398 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 11399 return std::make_pair(0U, &PPC::QFRCRegClass); 11400 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 11401 return std::make_pair(0U, &PPC::QSRCRegClass); 11402 break; 11403 case 'v': 11404 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 11405 return std::make_pair(0U, &PPC::QFRCRegClass); 11406 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 11407 return std::make_pair(0U, &PPC::QSRCRegClass); 11408 if (Subtarget.hasAltivec()) 11409 return std::make_pair(0U, &PPC::VRRCRegClass); 11410 case 'y': // crrc 11411 return std::make_pair(0U, &PPC::CRRCRegClass); 11412 } 11413 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 11414 // An individual CR bit. 11415 return std::make_pair(0U, &PPC::CRBITRCRegClass); 11416 } else if ((Constraint == "wa" || Constraint == "wd" || 11417 Constraint == "wf") && Subtarget.hasVSX()) { 11418 return std::make_pair(0U, &PPC::VSRCRegClass); 11419 } else if (Constraint == "ws" && Subtarget.hasVSX()) { 11420 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 11421 return std::make_pair(0U, &PPC::VSSRCRegClass); 11422 else 11423 return std::make_pair(0U, &PPC::VSFRCRegClass); 11424 } 11425 11426 std::pair<unsigned, const TargetRegisterClass *> R = 11427 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 11428 11429 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 11430 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 11431 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 11432 // register. 11433 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 11434 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 11435 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 11436 PPC::GPRCRegClass.contains(R.first)) 11437 return std::make_pair(TRI->getMatchingSuperReg(R.first, 11438 PPC::sub_32, &PPC::G8RCRegClass), 11439 &PPC::G8RCRegClass); 11440 11441 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 11442 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 11443 R.first = PPC::CR0; 11444 R.second = &PPC::CRRCRegClass; 11445 } 11446 11447 return R; 11448 } 11449 11450 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 11451 /// vector. If it is invalid, don't add anything to Ops. 11452 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 11453 std::string &Constraint, 11454 std::vector<SDValue>&Ops, 11455 SelectionDAG &DAG) const { 11456 SDValue Result; 11457 11458 // Only support length 1 constraints. 11459 if (Constraint.length() > 1) return; 11460 11461 char Letter = Constraint[0]; 11462 switch (Letter) { 11463 default: break; 11464 case 'I': 11465 case 'J': 11466 case 'K': 11467 case 'L': 11468 case 'M': 11469 case 'N': 11470 case 'O': 11471 case 'P': { 11472 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 11473 if (!CST) return; // Must be an immediate to match. 11474 SDLoc dl(Op); 11475 int64_t Value = CST->getSExtValue(); 11476 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 11477 // numbers are printed as such. 11478 switch (Letter) { 11479 default: llvm_unreachable("Unknown constraint letter!"); 11480 case 'I': // "I" is a signed 16-bit constant. 11481 if (isInt<16>(Value)) 11482 Result = DAG.getTargetConstant(Value, dl, TCVT); 11483 break; 11484 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 11485 if (isShiftedUInt<16, 16>(Value)) 11486 Result = DAG.getTargetConstant(Value, dl, TCVT); 11487 break; 11488 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 11489 if (isShiftedInt<16, 16>(Value)) 11490 Result = DAG.getTargetConstant(Value, dl, TCVT); 11491 break; 11492 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 11493 if (isUInt<16>(Value)) 11494 Result = DAG.getTargetConstant(Value, dl, TCVT); 11495 break; 11496 case 'M': // "M" is a constant that is greater than 31. 11497 if (Value > 31) 11498 Result = DAG.getTargetConstant(Value, dl, TCVT); 11499 break; 11500 case 'N': // "N" is a positive constant that is an exact power of two. 11501 if (Value > 0 && isPowerOf2_64(Value)) 11502 Result = DAG.getTargetConstant(Value, dl, TCVT); 11503 break; 11504 case 'O': // "O" is the constant zero. 11505 if (Value == 0) 11506 Result = DAG.getTargetConstant(Value, dl, TCVT); 11507 break; 11508 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 11509 if (isInt<16>(-Value)) 11510 Result = DAG.getTargetConstant(Value, dl, TCVT); 11511 break; 11512 } 11513 break; 11514 } 11515 } 11516 11517 if (Result.getNode()) { 11518 Ops.push_back(Result); 11519 return; 11520 } 11521 11522 // Handle standard constraint letters. 11523 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11524 } 11525 11526 // isLegalAddressingMode - Return true if the addressing mode represented 11527 // by AM is legal for this target, for a load/store of the specified type. 11528 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 11529 const AddrMode &AM, Type *Ty, 11530 unsigned AS) const { 11531 // PPC does not allow r+i addressing modes for vectors! 11532 if (Ty->isVectorTy() && AM.BaseOffs != 0) 11533 return false; 11534 11535 // PPC allows a sign-extended 16-bit immediate field. 11536 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 11537 return false; 11538 11539 // No global is ever allowed as a base. 11540 if (AM.BaseGV) 11541 return false; 11542 11543 // PPC only support r+r, 11544 switch (AM.Scale) { 11545 case 0: // "r+i" or just "i", depending on HasBaseReg. 11546 break; 11547 case 1: 11548 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 11549 return false; 11550 // Otherwise we have r+r or r+i. 11551 break; 11552 case 2: 11553 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 11554 return false; 11555 // Allow 2*r as r+r. 11556 break; 11557 default: 11558 // No other scales are supported. 11559 return false; 11560 } 11561 11562 return true; 11563 } 11564 11565 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 11566 SelectionDAG &DAG) const { 11567 MachineFunction &MF = DAG.getMachineFunction(); 11568 MachineFrameInfo &MFI = MF.getFrameInfo(); 11569 MFI.setReturnAddressIsTaken(true); 11570 11571 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 11572 return SDValue(); 11573 11574 SDLoc dl(Op); 11575 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11576 11577 // Make sure the function does not optimize away the store of the RA to 11578 // the stack. 11579 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 11580 FuncInfo->setLRStoreRequired(); 11581 bool isPPC64 = Subtarget.isPPC64(); 11582 auto PtrVT = getPointerTy(MF.getDataLayout()); 11583 11584 if (Depth > 0) { 11585 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11586 SDValue Offset = 11587 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 11588 isPPC64 ? MVT::i64 : MVT::i32); 11589 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11590 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 11591 MachinePointerInfo()); 11592 } 11593 11594 // Just load the return address off the stack. 11595 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 11596 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 11597 MachinePointerInfo()); 11598 } 11599 11600 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 11601 SelectionDAG &DAG) const { 11602 SDLoc dl(Op); 11603 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11604 11605 MachineFunction &MF = DAG.getMachineFunction(); 11606 MachineFrameInfo &MFI = MF.getFrameInfo(); 11607 MFI.setFrameAddressIsTaken(true); 11608 11609 EVT PtrVT = getPointerTy(MF.getDataLayout()); 11610 bool isPPC64 = PtrVT == MVT::i64; 11611 11612 // Naked functions never have a frame pointer, and so we use r1. For all 11613 // other functions, this decision must be delayed until during PEI. 11614 unsigned FrameReg; 11615 if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) 11616 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 11617 else 11618 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 11619 11620 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 11621 PtrVT); 11622 while (Depth--) 11623 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 11624 FrameAddr, MachinePointerInfo()); 11625 return FrameAddr; 11626 } 11627 11628 // FIXME? Maybe this could be a TableGen attribute on some registers and 11629 // this table could be generated automatically from RegInfo. 11630 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT, 11631 SelectionDAG &DAG) const { 11632 bool isPPC64 = Subtarget.isPPC64(); 11633 bool isDarwinABI = Subtarget.isDarwinABI(); 11634 11635 if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) || 11636 (!isPPC64 && VT != MVT::i32)) 11637 report_fatal_error("Invalid register global variable type"); 11638 11639 bool is64Bit = isPPC64 && VT == MVT::i64; 11640 unsigned Reg = StringSwitch<unsigned>(RegName) 11641 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 11642 .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2) 11643 .Case("r13", (!isPPC64 && isDarwinABI) ? 0 : 11644 (is64Bit ? PPC::X13 : PPC::R13)) 11645 .Default(0); 11646 11647 if (Reg) 11648 return Reg; 11649 report_fatal_error("Invalid register name global variable"); 11650 } 11651 11652 bool 11653 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11654 // The PowerPC target isn't yet aware of offsets. 11655 return false; 11656 } 11657 11658 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 11659 const CallInst &I, 11660 unsigned Intrinsic) const { 11661 11662 switch (Intrinsic) { 11663 case Intrinsic::ppc_qpx_qvlfd: 11664 case Intrinsic::ppc_qpx_qvlfs: 11665 case Intrinsic::ppc_qpx_qvlfcd: 11666 case Intrinsic::ppc_qpx_qvlfcs: 11667 case Intrinsic::ppc_qpx_qvlfiwa: 11668 case Intrinsic::ppc_qpx_qvlfiwz: 11669 case Intrinsic::ppc_altivec_lvx: 11670 case Intrinsic::ppc_altivec_lvxl: 11671 case Intrinsic::ppc_altivec_lvebx: 11672 case Intrinsic::ppc_altivec_lvehx: 11673 case Intrinsic::ppc_altivec_lvewx: 11674 case Intrinsic::ppc_vsx_lxvd2x: 11675 case Intrinsic::ppc_vsx_lxvw4x: { 11676 EVT VT; 11677 switch (Intrinsic) { 11678 case Intrinsic::ppc_altivec_lvebx: 11679 VT = MVT::i8; 11680 break; 11681 case Intrinsic::ppc_altivec_lvehx: 11682 VT = MVT::i16; 11683 break; 11684 case Intrinsic::ppc_altivec_lvewx: 11685 VT = MVT::i32; 11686 break; 11687 case Intrinsic::ppc_vsx_lxvd2x: 11688 VT = MVT::v2f64; 11689 break; 11690 case Intrinsic::ppc_qpx_qvlfd: 11691 VT = MVT::v4f64; 11692 break; 11693 case Intrinsic::ppc_qpx_qvlfs: 11694 VT = MVT::v4f32; 11695 break; 11696 case Intrinsic::ppc_qpx_qvlfcd: 11697 VT = MVT::v2f64; 11698 break; 11699 case Intrinsic::ppc_qpx_qvlfcs: 11700 VT = MVT::v2f32; 11701 break; 11702 default: 11703 VT = MVT::v4i32; 11704 break; 11705 } 11706 11707 Info.opc = ISD::INTRINSIC_W_CHAIN; 11708 Info.memVT = VT; 11709 Info.ptrVal = I.getArgOperand(0); 11710 Info.offset = -VT.getStoreSize()+1; 11711 Info.size = 2*VT.getStoreSize()-1; 11712 Info.align = 1; 11713 Info.vol = false; 11714 Info.readMem = true; 11715 Info.writeMem = false; 11716 return true; 11717 } 11718 case Intrinsic::ppc_qpx_qvlfda: 11719 case Intrinsic::ppc_qpx_qvlfsa: 11720 case Intrinsic::ppc_qpx_qvlfcda: 11721 case Intrinsic::ppc_qpx_qvlfcsa: 11722 case Intrinsic::ppc_qpx_qvlfiwaa: 11723 case Intrinsic::ppc_qpx_qvlfiwza: { 11724 EVT VT; 11725 switch (Intrinsic) { 11726 case Intrinsic::ppc_qpx_qvlfda: 11727 VT = MVT::v4f64; 11728 break; 11729 case Intrinsic::ppc_qpx_qvlfsa: 11730 VT = MVT::v4f32; 11731 break; 11732 case Intrinsic::ppc_qpx_qvlfcda: 11733 VT = MVT::v2f64; 11734 break; 11735 case Intrinsic::ppc_qpx_qvlfcsa: 11736 VT = MVT::v2f32; 11737 break; 11738 default: 11739 VT = MVT::v4i32; 11740 break; 11741 } 11742 11743 Info.opc = ISD::INTRINSIC_W_CHAIN; 11744 Info.memVT = VT; 11745 Info.ptrVal = I.getArgOperand(0); 11746 Info.offset = 0; 11747 Info.size = VT.getStoreSize(); 11748 Info.align = 1; 11749 Info.vol = false; 11750 Info.readMem = true; 11751 Info.writeMem = false; 11752 return true; 11753 } 11754 case Intrinsic::ppc_qpx_qvstfd: 11755 case Intrinsic::ppc_qpx_qvstfs: 11756 case Intrinsic::ppc_qpx_qvstfcd: 11757 case Intrinsic::ppc_qpx_qvstfcs: 11758 case Intrinsic::ppc_qpx_qvstfiw: 11759 case Intrinsic::ppc_altivec_stvx: 11760 case Intrinsic::ppc_altivec_stvxl: 11761 case Intrinsic::ppc_altivec_stvebx: 11762 case Intrinsic::ppc_altivec_stvehx: 11763 case Intrinsic::ppc_altivec_stvewx: 11764 case Intrinsic::ppc_vsx_stxvd2x: 11765 case Intrinsic::ppc_vsx_stxvw4x: { 11766 EVT VT; 11767 switch (Intrinsic) { 11768 case Intrinsic::ppc_altivec_stvebx: 11769 VT = MVT::i8; 11770 break; 11771 case Intrinsic::ppc_altivec_stvehx: 11772 VT = MVT::i16; 11773 break; 11774 case Intrinsic::ppc_altivec_stvewx: 11775 VT = MVT::i32; 11776 break; 11777 case Intrinsic::ppc_vsx_stxvd2x: 11778 VT = MVT::v2f64; 11779 break; 11780 case Intrinsic::ppc_qpx_qvstfd: 11781 VT = MVT::v4f64; 11782 break; 11783 case Intrinsic::ppc_qpx_qvstfs: 11784 VT = MVT::v4f32; 11785 break; 11786 case Intrinsic::ppc_qpx_qvstfcd: 11787 VT = MVT::v2f64; 11788 break; 11789 case Intrinsic::ppc_qpx_qvstfcs: 11790 VT = MVT::v2f32; 11791 break; 11792 default: 11793 VT = MVT::v4i32; 11794 break; 11795 } 11796 11797 Info.opc = ISD::INTRINSIC_VOID; 11798 Info.memVT = VT; 11799 Info.ptrVal = I.getArgOperand(1); 11800 Info.offset = -VT.getStoreSize()+1; 11801 Info.size = 2*VT.getStoreSize()-1; 11802 Info.align = 1; 11803 Info.vol = false; 11804 Info.readMem = false; 11805 Info.writeMem = true; 11806 return true; 11807 } 11808 case Intrinsic::ppc_qpx_qvstfda: 11809 case Intrinsic::ppc_qpx_qvstfsa: 11810 case Intrinsic::ppc_qpx_qvstfcda: 11811 case Intrinsic::ppc_qpx_qvstfcsa: 11812 case Intrinsic::ppc_qpx_qvstfiwa: { 11813 EVT VT; 11814 switch (Intrinsic) { 11815 case Intrinsic::ppc_qpx_qvstfda: 11816 VT = MVT::v4f64; 11817 break; 11818 case Intrinsic::ppc_qpx_qvstfsa: 11819 VT = MVT::v4f32; 11820 break; 11821 case Intrinsic::ppc_qpx_qvstfcda: 11822 VT = MVT::v2f64; 11823 break; 11824 case Intrinsic::ppc_qpx_qvstfcsa: 11825 VT = MVT::v2f32; 11826 break; 11827 default: 11828 VT = MVT::v4i32; 11829 break; 11830 } 11831 11832 Info.opc = ISD::INTRINSIC_VOID; 11833 Info.memVT = VT; 11834 Info.ptrVal = I.getArgOperand(1); 11835 Info.offset = 0; 11836 Info.size = VT.getStoreSize(); 11837 Info.align = 1; 11838 Info.vol = false; 11839 Info.readMem = false; 11840 Info.writeMem = true; 11841 return true; 11842 } 11843 default: 11844 break; 11845 } 11846 11847 return false; 11848 } 11849 11850 /// getOptimalMemOpType - Returns the target specific optimal type for load 11851 /// and store operations as a result of memset, memcpy, and memmove 11852 /// lowering. If DstAlign is zero that means it's safe to destination 11853 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 11854 /// means there isn't a need to check it against alignment requirement, 11855 /// probably because the source does not need to be loaded. If 'IsMemset' is 11856 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 11857 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 11858 /// source is constant so it does not need to be loaded. 11859 /// It returns EVT::Other if the type should be determined using generic 11860 /// target-independent logic. 11861 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 11862 unsigned DstAlign, unsigned SrcAlign, 11863 bool IsMemset, bool ZeroMemset, 11864 bool MemcpyStrSrc, 11865 MachineFunction &MF) const { 11866 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 11867 const Function *F = MF.getFunction(); 11868 // When expanding a memset, require at least two QPX instructions to cover 11869 // the cost of loading the value to be stored from the constant pool. 11870 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 11871 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 11872 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 11873 return MVT::v4f64; 11874 } 11875 11876 // We should use Altivec/VSX loads and stores when available. For unaligned 11877 // addresses, unaligned VSX loads are only fast starting with the P8. 11878 if (Subtarget.hasAltivec() && Size >= 16 && 11879 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 11880 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 11881 return MVT::v4i32; 11882 } 11883 11884 if (Subtarget.isPPC64()) { 11885 return MVT::i64; 11886 } 11887 11888 return MVT::i32; 11889 } 11890 11891 /// \brief Returns true if it is beneficial to convert a load of a constant 11892 /// to just the constant itself. 11893 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 11894 Type *Ty) const { 11895 assert(Ty->isIntegerTy()); 11896 11897 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 11898 return !(BitSize == 0 || BitSize > 64); 11899 } 11900 11901 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11902 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11903 return false; 11904 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11905 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11906 return NumBits1 == 64 && NumBits2 == 32; 11907 } 11908 11909 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11910 if (!VT1.isInteger() || !VT2.isInteger()) 11911 return false; 11912 unsigned NumBits1 = VT1.getSizeInBits(); 11913 unsigned NumBits2 = VT2.getSizeInBits(); 11914 return NumBits1 == 64 && NumBits2 == 32; 11915 } 11916 11917 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 11918 // Generally speaking, zexts are not free, but they are free when they can be 11919 // folded with other operations. 11920 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 11921 EVT MemVT = LD->getMemoryVT(); 11922 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 11923 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 11924 (LD->getExtensionType() == ISD::NON_EXTLOAD || 11925 LD->getExtensionType() == ISD::ZEXTLOAD)) 11926 return true; 11927 } 11928 11929 // FIXME: Add other cases... 11930 // - 32-bit shifts with a zext to i64 11931 // - zext after ctlz, bswap, etc. 11932 // - zext after and by a constant mask 11933 11934 return TargetLowering::isZExtFree(Val, VT2); 11935 } 11936 11937 bool PPCTargetLowering::isFPExtFree(EVT VT) const { 11938 assert(VT.isFloatingPoint()); 11939 return true; 11940 } 11941 11942 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11943 return isInt<16>(Imm) || isUInt<16>(Imm); 11944 } 11945 11946 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11947 return isInt<16>(Imm) || isUInt<16>(Imm); 11948 } 11949 11950 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 11951 unsigned, 11952 unsigned, 11953 bool *Fast) const { 11954 if (DisablePPCUnaligned) 11955 return false; 11956 11957 // PowerPC supports unaligned memory access for simple non-vector types. 11958 // Although accessing unaligned addresses is not as efficient as accessing 11959 // aligned addresses, it is generally more efficient than manual expansion, 11960 // and generally only traps for software emulation when crossing page 11961 // boundaries. 11962 11963 if (!VT.isSimple()) 11964 return false; 11965 11966 if (VT.getSimpleVT().isVector()) { 11967 if (Subtarget.hasVSX()) { 11968 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 11969 VT != MVT::v4f32 && VT != MVT::v4i32) 11970 return false; 11971 } else { 11972 return false; 11973 } 11974 } 11975 11976 if (VT == MVT::ppcf128) 11977 return false; 11978 11979 if (Fast) 11980 *Fast = true; 11981 11982 return true; 11983 } 11984 11985 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 11986 VT = VT.getScalarType(); 11987 11988 if (!VT.isSimple()) 11989 return false; 11990 11991 switch (VT.getSimpleVT().SimpleTy) { 11992 case MVT::f32: 11993 case MVT::f64: 11994 return true; 11995 default: 11996 break; 11997 } 11998 11999 return false; 12000 } 12001 12002 const MCPhysReg * 12003 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 12004 // LR is a callee-save register, but we must treat it as clobbered by any call 12005 // site. Hence we include LR in the scratch registers, which are in turn added 12006 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 12007 // to CTR, which is used by any indirect call. 12008 static const MCPhysReg ScratchRegs[] = { 12009 PPC::X12, PPC::LR8, PPC::CTR8, 0 12010 }; 12011 12012 return ScratchRegs; 12013 } 12014 12015 unsigned PPCTargetLowering::getExceptionPointerRegister( 12016 const Constant *PersonalityFn) const { 12017 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 12018 } 12019 12020 unsigned PPCTargetLowering::getExceptionSelectorRegister( 12021 const Constant *PersonalityFn) const { 12022 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 12023 } 12024 12025 bool 12026 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 12027 EVT VT , unsigned DefinedValues) const { 12028 if (VT == MVT::v2i64) 12029 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 12030 12031 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 12032 return true; 12033 12034 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 12035 } 12036 12037 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 12038 if (DisableILPPref || Subtarget.enableMachineScheduler()) 12039 return TargetLowering::getSchedulingPreference(N); 12040 12041 return Sched::ILP; 12042 } 12043 12044 // Create a fast isel object. 12045 FastISel * 12046 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 12047 const TargetLibraryInfo *LibInfo) const { 12048 return PPC::createFastISel(FuncInfo, LibInfo); 12049 } 12050 12051 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 12052 if (Subtarget.isDarwinABI()) return; 12053 if (!Subtarget.isPPC64()) return; 12054 12055 // Update IsSplitCSR in PPCFunctionInfo 12056 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 12057 PFI->setIsSplitCSR(true); 12058 } 12059 12060 void PPCTargetLowering::insertCopiesSplitCSR( 12061 MachineBasicBlock *Entry, 12062 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 12063 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 12064 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 12065 if (!IStart) 12066 return; 12067 12068 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 12069 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 12070 MachineBasicBlock::iterator MBBI = Entry->begin(); 12071 for (const MCPhysReg *I = IStart; *I; ++I) { 12072 const TargetRegisterClass *RC = nullptr; 12073 if (PPC::G8RCRegClass.contains(*I)) 12074 RC = &PPC::G8RCRegClass; 12075 else if (PPC::F8RCRegClass.contains(*I)) 12076 RC = &PPC::F8RCRegClass; 12077 else if (PPC::CRRCRegClass.contains(*I)) 12078 RC = &PPC::CRRCRegClass; 12079 else if (PPC::VRRCRegClass.contains(*I)) 12080 RC = &PPC::VRRCRegClass; 12081 else 12082 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 12083 12084 unsigned NewVR = MRI->createVirtualRegister(RC); 12085 // Create copy from CSR to a virtual register. 12086 // FIXME: this currently does not emit CFI pseudo-instructions, it works 12087 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 12088 // nounwind. If we want to generalize this later, we may need to emit 12089 // CFI pseudo-instructions. 12090 assert(Entry->getParent()->getFunction()->hasFnAttribute( 12091 Attribute::NoUnwind) && 12092 "Function should be nounwind in insertCopiesSplitCSR!"); 12093 Entry->addLiveIn(*I); 12094 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 12095 .addReg(*I); 12096 12097 // Insert the copy-back instructions right before the terminator 12098 for (auto *Exit : Exits) 12099 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 12100 TII->get(TargetOpcode::COPY), *I) 12101 .addReg(NewVR); 12102 } 12103 } 12104 12105 // Override to enable LOAD_STACK_GUARD lowering on Linux. 12106 bool PPCTargetLowering::useLoadStackGuardNode() const { 12107 if (!Subtarget.isTargetLinux()) 12108 return TargetLowering::useLoadStackGuardNode(); 12109 return true; 12110 } 12111 12112 // Override to disable global variable loading on Linux. 12113 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 12114 if (!Subtarget.isTargetLinux()) 12115 return TargetLowering::insertSSPDeclarations(M); 12116 } 12117